text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = ['Evaluable']
class Evaluable(object):
"""
A mixin class for types inherited from SymPy that may carry nested
unevaluated arguments.
This mixin is used to implement lazy evaluation of expressions.
"""
@classmethod
def _evaluate_maybe_nested(cls, maybe_evaluable):
if isinstance(maybe_evaluable, Evaluable):
return maybe_evaluable.evaluate
try:
# Not an Evaluable, but some Evaluables may still be hidden within `args`
if maybe_evaluable.args:
args = [Evaluable._evaluate_maybe_nested(i) for i in maybe_evaluable.args]
evaluate = not all(i is j for i, j in zip(args, maybe_evaluable.args))
try:
return maybe_evaluable.func(*args, evaluate=evaluate)
except TypeError:
# Not all objects support the `evaluate` kwarg
return maybe_evaluable.func(*args)
else:
return maybe_evaluable
except AttributeError:
# No `args` to be visited
return maybe_evaluable
@property
def args(self):
return ()
@property
def func(self):
return self.__class__
def _evaluate_args(self):
return [Evaluable._evaluate_maybe_nested(i) for i in self.args]
@property
def evaluate(self):
"""Return a new object from the evaluation of ``self``."""
args = self._evaluate_args()
evaluate = not all(i is j for i, j in zip(args, self.args))
return self.func(*args, evaluate=evaluate)
| {
"repo_name": "opesci/devito",
"path": "devito/types/lazy.py",
"copies": "1",
"size": "1611",
"license": "mit",
"hash": 1564296845042596600,
"line_mean": 31.8775510204,
"line_max": 90,
"alpha_frac": 0.5896958411,
"autogenerated": false,
"ratio": 4.184415584415585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006961463436595591,
"num_lines": 49
} |
__all__ = ['EventEmitter', 'on', 'once']
import types
class ListenerWrapper(object):
def __init__(self, listener, is_once=False):
self.listener = listener
self.is_once = is_once
def __call__(self, *args, **kwargs):
self.listener(*args, **kwargs)
def __eq__(self, other):
if isinstance(other, ListenerWrapper):
return other.listener == self.listener
if isinstance(other, types.FunctionType):
return other == self.listener
return False
class EventEmitter(object):
def __init__(self):
self._events = {}
def on(self, event, listener):
self._on(event, ListenerWrapper(listener))
def once(self, event, listener):
self._on(event, ListenerWrapper(listener, is_once=True))
def _on(self, event, listener_wrapper):
if not event in self._events:
self._events[event] = []
self._events[event].append(listener_wrapper)
def emit(self, event, *args, **kwargs):
if event in self._events:
# 'once' may delete items while iterating over listeners -> we use a copy
listeners = self._events[event][:]
for listener in listeners:
if listener.is_once:
self.remove(event, listener)
listener(*args, **kwargs)
def remove(self, event, listener):
if event in self._events:
events = self._events[event]
if listener in events:
events.remove(listener)
def remove_all(self, event):
if event in self._events:
self._events[event] = []
def count(self, event):
return len(self._events[event]) if event in self._events else 0
def on(emitter, event):
def decorator(func):
emitter.on(event, func)
return func
return decorator
def once(emitter, event):
def decorator(func):
emitter.once(event, func)
return func
return decorator
| {
"repo_name": "etissieres/PyEventEmitter",
"path": "event_emitter/events.py",
"copies": "1",
"size": "1995",
"license": "mit",
"hash": 1080353870556972800,
"line_mean": 26.3287671233,
"line_max": 85,
"alpha_frac": 0.5839598997,
"autogenerated": false,
"ratio": 4.071428571428571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155388471128571,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Event','EventDispatcher']
class Event(object):
def __init__(self):
self.handlers = list()
def handle(self, handler):
self.handlers.append(handler)
return self
def unhandle(self, handler):
try:
self.handlers.remove(handler)
except:
raise ValueError("Handler is not handling this event, so cannot unhandle it.")
return self
def fire(self, *args, **kargs):
for handler in self.handlers:
handler(*args, **kargs)
def getHandlerCount(self):
return len(self.handlers)
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = getHandlerCount
class EventDispatcher(object):
__events__ = []
def __init__(self):
for event in self.__events__ :
self.add_event(event)
def add_event(self, event_name):
new_event = Event()
if hasattr(self, event_name):
new_event += getattr(self, event_name)
setattr(self, event_name, new_event)
| {
"repo_name": "rocktavious/pyul",
"path": "pyul/coreUtils/event.py",
"copies": "1",
"size": "1049",
"license": "mit",
"hash": 174686829210697540,
"line_mean": 22.8409090909,
"line_max": 90,
"alpha_frac": 0.5672068637,
"autogenerated": false,
"ratio": 3.988593155893536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5055800019593536,
"avg_score": null,
"num_lines": null
} |
__all__ = ("Event", "EventManager", "VERSION")
VERSION = ("0", "7")
class Event(list):
"""This objects represents an event.
Can be provided with *args on init. This list should be a list of handlers.
It simply iterates thru a list of handlers once it's fired.
If a handler raises StopIteration,
it will not fire the rest of the handlers.
Supports list methods
Event.eventmanager => The EventManager for event.
Event.name => The name for the event
"""
def __init__(self, *args):
super(Event, self).__init__(args)
self.eventmanager = None
self.name = None
def clear(self):
"""Clears list of handlers."""
del self[:]
def add_handler(self, handler):
"""Adds a handler. Also checks if it is callable.
To bypass checks, use Event.append() instead."""
if not callable(handler):
raise TypeError("'%s' is not callable." % handler)
self.append(handler)
def remove_handler(self, handler):
"""Removes a handler. Same as Event.remove()"""
self.remove(handler)
def fire(self, *args, **kwargs):
"""Fires an event, executing all it's handlers with given
args and kwargs.
You can also use Event() directly instead of Event.fire()"""
if self.eventmanager:
# Fire global event. Assuming we have an eventmanager.
self.eventmanager.got_event(self.name, *args, **kwargs)
for handler in self: # Iterate over handlers
try:
handler(*args, **kwargs) # Execute handler with given args.
except StopIteration: # Stop iterating if handler raised StopIter
break
def __call__(self, *args, **kwargs):
self.fire(*args, **kwargs)
class EventManager(dict):
"""Object for managing events, basicly acts like a dict.
EventManager.got_event => An event that will be fired whenever another
event is fired, with the fired events name,
and the arguments it was called with.
Handlers to got_event should at least accept 1 arg (name).
Example:
EventManager.test_event() # Will fire test_event()
# This will also cause EventManager.got_event to fire, like so:
EventManager.got_event("test_event", *args, **kwargs)
# args and kwargs are the arguments given to test_event originally."""
def __init__(self, *args, **kwargs):
super(EventManager, self).__init__(*args, **kwargs)
self.got_event = Event() # Setup out global event, this will
# fire every time an event is fired.
self.got_event.name = "GLOBAL"
self.got_event.eventmanager = None # To stop looping forever.
def __setitem__(self, key, value):
if isinstance(value, Event): # If it is an event:
value.name = key # Set it's name.
value.eventmanager = self # Set it's eventmanager
super(EventManager, self).__setitem__(key, value)
def __getattr__(self, name): # So we can use '.'
return self[name]
def __setattr__(self, name, value): # So we can use '.'
self[name] = value
def apply(self, events):
"""
EventManager.apply(events) -> Takes an object with methods, and applies
them to EventManager.
Example:
class TestEvents(object):
@staticmethod
def test_method():
pass
e = TestEvents()
em = EventManager()
em.apply(e)
# em now has an event called test_method,
# and e.test_method as handler."""
for method in dir(events):
# Skip attributes
if not callable(getattr(events, method)):
continue
# Skip "trash" functions
if method.startswith("_"):
continue
if not hasattr(self, method): # Didn't have such an event already
self[method] = Event() # So we create it
self[method].add_handler(getattr(events, method))
| {
"repo_name": "dkkline/EventManager",
"path": "EventManager/EventManager.py",
"copies": "1",
"size": "4211",
"license": "mit",
"hash": -2693124640173185500,
"line_mean": 34.6173913043,
"line_max": 79,
"alpha_frac": 0.5732605082,
"autogenerated": false,
"ratio": 4.427970557308097,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5501231065508096,
"avg_score": null,
"num_lines": null
} |
__all__ = ['eventhandler', 'send_event', '_connect_event']
import asyncio
import functools
import inspect
from .asyncio_loop import loop
from .log import logger
_registered_events = {} # dict of all registered events
# @evenhandler decorator
def eventhandler(event):
def eventhandler_decorator(func):
if inspect.ismethod(func):
print('2')
_connect_event(event, func)
module = inspect.getmodule(func)
logger.debug('event ' + "'" + event + "' connected to " + str(func) + ' in module ' + str(module))
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return eventhandler_decorator
def send_event(server, event, data=None):
#logger.debug('send_event ' + event)
if event in _registered_events:
for e in _registered_events[event]:
try:
if asyncio.iscoroutinefunction(e):
if data:
task = loop.create_task(e(server, data))
else:
task = loop.create_task(e(server))
task.add_done_callback(server.handle_done_task)
else:
if data:
r = e(data)
else:
r = e()
#logger.debug('event finished ' + str(e) + ' result=' + str(r))
except Exception as exp:
#TODO delete fault event
logger.error(str(exp))
server.chat_send_error(str(exp))
def _connect_event(event, function):
if event in _registered_events:
# event already exists
if function in _registered_events[event]:
raise Exception('function already registered')
_registered_events[event].append(function)
else:
# new event
_registered_events[event] = [function]
| {
"repo_name": "juergenz/pie",
"path": "src/pie/events.py",
"copies": "1",
"size": "1959",
"license": "mit",
"hash": 6447212452673710000,
"line_mean": 26.9857142857,
"line_max": 106,
"alpha_frac": 0.5461970393,
"autogenerated": false,
"ratio": 4.277292576419214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5323489615719214,
"avg_score": null,
"num_lines": null
} |
__all__ = ['EventKeys']
class EventKeys(object):
"""Defines event keys for TFSnippet."""
# (TrainLoop) Enter the train loop.
ENTER_LOOP = 'enter_loop'
# (TrainLoop) Exit the train loop.
EXIT_LOOP = 'exit_loop'
# (TrainLoop) When metrics (except time metrics) have been collected.
METRICS_COLLECTED = 'metrics_collected'
# (TrainLoop) When time metrics have been collected.
TIME_METRICS_COLLECTED = 'time_metrics_collected'
# (TrainLoop) When metric statistics have been printed.
METRIC_STATS_PRINTED = 'metric_stats_printed'
# (TrainLoop) When time metric statistics have been printed.
TIME_METRIC_STATS_PRINTED = 'time_metric_stats_printed'
# (TrainLoop) When TensorFlow summary has been added.
SUMMARY_ADDED = 'summary_added'
# (TrainLoop, Trainer) Before executing an epoch.
BEFORE_EPOCH = 'before_epoch'
# (Trainer) Run evaluation after an epoch.
EPOCH_EVALUATION = 'epoch_evaluation'
# (Trainer) Anneal after an epoch.
EPOCH_ANNEALING = 'epoch_annealing'
# (Trainer) Log after an epoch.
EPOCH_LOGGING = 'epoch_logging'
# (TrainLoop, Trainer) After executing an epoch.
AFTER_EPOCH = 'after_epoch'
# (TrainLoop, Trainer) Before executing a step.
BEFORE_STEP = 'before_step'
# (Trainer) Run evaluation after a step.
STEP_EVALUATION = 'step_evaluation'
# (Trainer) Anneal after a step.
STEP_ANNEALING = 'step_annealing'
# (Trainer) Log after a step.
STEP_LOGGING = 'step_logging'
# (TrainLoop, Trainer) After executing a step.
AFTER_STEP = 'after_step'
# (Trainer, Evaluator) Before execution.
BEFORE_EXECUTION = 'before_execution'
# (Evaluator) After execution.
AFTER_EXECUTION = 'after_execution'
| {
"repo_name": "korepwx/tfsnippet",
"path": "tfsnippet/scaffold/event_keys.py",
"copies": "1",
"size": "1776",
"license": "mit",
"hash": -4137169072059834000,
"line_mean": 28.1147540984,
"line_max": 73,
"alpha_frac": 0.6745495495,
"autogenerated": false,
"ratio": 3.3893129770992365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9563862526599236,
"avg_score": 0,
"num_lines": 61
} |
__all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN',
'EVENT_SCHEDULER_JOB_ADDED', 'EVENT_SCHEDULER_JOB_REMOVED',
'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED',
'EVENT_ALL', 'SchedulerEvent', 'JobEvent')
EVENT_SCHEDULER_START = 1 # The scheduler was started
EVENT_SCHEDULER_SHUTDOWN = 2 # The scheduler was shut down
EVENT_SCHEDULER_JOB_ADDED = 4 # A job was added to a job store
EVENT_SCHEDULER_JOB_REMOVED = 8 # A job was removed from a job store
EVENT_JOB_EXECUTED = 16 # A job was executed successfully
EVENT_JOB_ERROR = 32 # A job raised an exception during execution
EVENT_JOB_MISSED = 64 # A job's execution was missed
EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN |
EVENT_SCHEDULER_JOB_ADDED | EVENT_SCHEDULER_JOB_REMOVED |
EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
class SchedulerEvent(object):
"""
An event that concerns the scheduler itself.
:var code: the type code of this event
"""
def __init__(self, code, job=None):
self.code = code
self.job = job
class JobEvent(SchedulerEvent):
"""
An event that concerns the execution of individual jobs.
:var job: the job instance in question
:var scheduled_run_time: the time when the job was scheduled to be run
:var retval: the return value of the successfully executed job
:var exception: the exception raised by the job
:var traceback: the traceback object associated with the exception
"""
def __init__(self, code, job, scheduled_run_time, retval=None,
exception=None, traceback=None):
SchedulerEvent.__init__(self, code)
self.job = job
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback
| {
"repo_name": "ecdpalma/napscheduler",
"path": "napscheduler/events.py",
"copies": "1",
"size": "1921",
"license": "mit",
"hash": 1900687860288430300,
"line_mean": 40.7608695652,
"line_max": 77,
"alpha_frac": 0.6574700677,
"autogenerated": false,
"ratio": 3.7156673114119925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9869740639981558,
"avg_score": 0.0006793478260869565,
"num_lines": 46
} |
__all__ = ['EventSource']
class EventSource(object):
"""
An object that may trigger events.
This class is designed to either be the parent of another class,
or be a member of another object. For example::
def event_handler(**kwargs):
print('event triggered: args {}, kwargs {}'.format(args, kwargs))
# use alone
class SomeObject(EventSource):
def func(self, **kwargs):
self.fire('some_event', **kwargs)
obj = SomeObject()
obj.on('some_event', event_handler)
# use as a member
class SomeObject(object):
def __init__(self):
self.events = EventSource()
def func(self, **kwargs):
self.events.fire('some_event', **kwargs)
obj = SomeObject()
obj.events.on('some_event', event_handler)
"""
def __init__(self, allowed_event_keys=None):
"""
Construct a new :class:`EventSource`.
Args:
allowed_event_keys (Iterable[str]): The allowed event keys
in :meth:`on` and :meth:`fire`. If not specified, all
names are allowed.
"""
if allowed_event_keys is not None:
allowed_event_keys = tuple(filter(str, allowed_event_keys))
self._event_handlers_map = {} # type: dict[str, list]
self._allowed_event_keys = allowed_event_keys
def on(self, event_key, handler):
"""
Register a new event handler.
Args:
event_key (str): The event key.
handler ((*args, **kwargs) -> any): The event handler.
Raises:
KeyError: If `event_key` is not allowed.
"""
event_key = str(event_key)
if self._allowed_event_keys is not None and \
event_key not in self._allowed_event_keys:
raise KeyError('`event_key` is not allowed: {}'.format(event_key))
if event_key not in self._event_handlers_map:
self._event_handlers_map[event_key] = []
self._event_handlers_map[event_key].append(handler)
def off(self, event_key, handler):
"""
De-register an event handler.
Args:
event_key (str): The event key.
handler ((*args, **kwargs) -> any): The event handler.
Raises:
ValueError: If `handler` is not a registered event handler of
the specified event `event_key`.
"""
event_key = str(event_key)
try:
self._event_handlers_map[event_key].remove(handler)
except (KeyError, ValueError):
raise ValueError('`handler` is not a registered event handler of '
'event `{}`: {}'.format(event_key, handler))
def _fire(self, event_key, args, kwargs, reverse=False):
event_key = str(event_key)
if self._allowed_event_keys is not None and \
event_key not in self._allowed_event_keys:
raise KeyError('`event_key` is not allowed: {}'.format(event_key))
event_handlers = self._event_handlers_map.get(event_key, None)
if event_handlers:
for h in (reversed(event_handlers) if reverse else event_handlers):
h(*args, **kwargs)
def fire(self, event_key, *args, **kwargs):
"""
Fire an event.
Args:
event_key (str): The event key.
*args: Arguments to be passed to the event handler.
\\**kwargs: Named arguments to be passed to the event handler.
Raises:
KeyError: If `event_key` is not allowed.
"""
return self._fire(event_key, args, kwargs, reverse=False)
def reverse_fire(self, event_key, *args, **kwargs):
"""
Fire an event, call event handlers in reversed order of registration.
Args:
event_key (str): The event key.
*args: Arguments to be passed to the event handler.
\\**kwargs: Named arguments to be passed to the event handler.
Raises:
KeyError: If `event_key` is not allowed.
"""
return self._fire(event_key, args, kwargs, reverse=True)
def clear_event_handlers(self, event_key=None):
"""
Clear all event handlers.
Args:
event_key (str or None): If specified, clear all event handlers
of this name. Otherwise clear all event handlers.
"""
if event_key is not None:
event_key = str(event_key)
self._event_handlers_map.pop(event_key, None)
else:
self._event_handlers_map.clear()
| {
"repo_name": "korepwx/tfsnippet",
"path": "tfsnippet/utils/events.py",
"copies": "1",
"size": "4673",
"license": "mit",
"hash": -5804295705750285000,
"line_mean": 32.8623188406,
"line_max": 79,
"alpha_frac": 0.5566017548,
"autogenerated": false,
"ratio": 4.194793536804309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5251395291604309,
"avg_score": null,
"num_lines": null
} |
"""All events related tests."""
import pytest
import transaction
from mock import MagicMock
from pyramid import testing
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from sqlalchemy.orm.exc import NoResultFound
from pytest_pyramid import factories
from velruse import AuthenticationComplete
from pyramid_fullauth.models import User
from pyramid_fullauth.views.social import SocialLoginViews
from pyramid_fullauth.events import (
BeforeLogIn,
AfterLogIn,
AlreadyLoggedIn,
BeforeEmailChange,
AfterEmailChange,
AfterEmailChangeActivation,
BeforeReset,
AfterResetRequest,
AfterReset,
AfterSocialRegister,
AfterSocialLogIn,
SocialAccountAlreadyConnected,
AfterActivate,
BeforeRegister,
AfterRegister,
)
from tests.tools import authenticate, is_user_logged, DEFAULT_USER
from tests.views.conftest import mock_translate
EVENT_PATH = "/event?event={0.__name__}"
EVENT_URL = "http://localhost" + EVENT_PATH
def unregister_subscriber(config, event):
"""Unregister subscribers from given events."""
# pylint:disable=protected-access
for key in config.registry.adapters._subscribers[1].keys():
if key.implementedBy(event):
del config.registry.adapters._subscribers[1][key]
break
evented_config = factories.pyramid_config(
settings={ # pylint:disable=invalid-name
"env": "login",
"fullauth.authtkt.timeout": 2,
"fullauth.authtkt.reissue_time": 0.2,
"fullauth.register.password.require": True,
"fullauth.register.password.length_min": 8,
"fullauth.register.password.confirm": True,
"pyramid.includes": [
"pyramid_tm",
"pyramid_fullauth",
"tests.tools.include_views",
"tests.views.test_events.include_views",
],
}
)
def include_views(config):
"""Configure pyramid to include test view and it's path."""
config.add_route("event", "/event")
config.scan("tests.views.test_events")
@view_config(route_name="event", renderer="json")
def event_view(request):
"""Return exactly received value."""
return {"event", request.GET.get("event")}
def redirect_to_secret(event):
"""Redirect to event page with event name set as query event attribute."""
raise HTTPFound(event.request.route_path("event", _query=(("event", event.__class__.__name__),)))
def raise_attribute_error(event):
"""Raise attribute error with message being the event class name."""
if event.user:
raise AttributeError(event.__class__.__name__)
@pytest.fixture
def alreadyloggedin_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AlreadyLoggedIn event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AlreadyLoggedIn)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AlreadyLoggedIn)
alreadyloggedin_app = factories.pyramid_app("alreadyloggedin_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("active_user")
def test_default_login_redirect_from_event(
alreadyloggedin_app,
): # pylint:disable=redefined-outer-name
"""After successful login, access to login page should result in redirect."""
authenticate(alreadyloggedin_app)
res = alreadyloggedin_app.get("/login", status=302)
assert res.location == EVENT_URL.format(AlreadyLoggedIn)
@pytest.fixture
def beforelogin_config(evented_config): # pylint:disable=redefined-outer-name
"""Add BeforeLogIn event that raises AttributeError with event class name."""
evented_config.add_subscriber(raise_attribute_error, BeforeLogIn)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, BeforeLogIn)
beforelogin_app = factories.pyramid_app("beforelogin_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("active_user")
def test_error_beforelogin(beforelogin_app): # pylint:disable=redefined-outer-name
"""Test errors from BeforeLogIn event."""
res = beforelogin_app.get("/login")
post_data = {
"email": DEFAULT_USER["email"],
"password": DEFAULT_USER["password"],
"csrf_token": res.form["csrf_token"].value,
}
res = beforelogin_app.post("/login", post_data, xhr=True)
assert res.json["status"] is False
assert res.json["msg"] == "BeforeLogIn"
@pytest.fixture
def afterlogin_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterLogIn event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterLogIn)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterLogIn)
afterlogin_app = factories.pyramid_app("afterlogin_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("active_user")
def test_login_redirect(afterlogin_app): # pylint:disable=redefined-outer-name
"""Log in and test redirect from AfterLogIn."""
assert is_user_logged(afterlogin_app) is False
res = authenticate(afterlogin_app)
assert res.location == EVENT_URL.format(AfterLogIn)
assert is_user_logged(afterlogin_app) is True
@pytest.fixture
def afterloginerror_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterLogIn event subscriber that raises AttributeError."""
evented_config.add_subscriber(raise_attribute_error, AfterLogIn)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterLogIn)
afterloginerror_app = factories.pyramid_app("afterloginerror_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("active_user")
def test_error_afterlogin(afterloginerror_app): # pylint:disable=redefined-outer-name
"""Test errors from BeforeLogIn event."""
res = afterloginerror_app.get("/login")
post_data = {
"email": DEFAULT_USER["email"],
"password": DEFAULT_USER["password"],
"csrf_token": res.form["csrf_token"].value,
}
res = afterloginerror_app.post("/login", post_data, xhr=True)
assert res.json["status"] is False
assert res.json["msg"] == "AfterLogIn"
assert is_user_logged(afterloginerror_app) is False
@pytest.fixture
def beforeemailchange_config(evented_config): # pylint:disable=redefined-outer-name
"""Add BeforeEmailChange event subscriber that raises AttributeError."""
evented_config.add_subscriber(raise_attribute_error, BeforeEmailChange)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, BeforeEmailChange)
beforeemailchange_app = factories.pyramid_app("beforeemailchange_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("active_user")
def test_beforeemailchange_error(
beforeemailchange_app,
): # pylint:disable=redefined-outer-name
"""Raise AttributeError from BeforeEmailChange event."""
app = beforeemailchange_app
authenticate(app)
new_email = "email@email.com"
res = app.get("/email/change")
res = app.post(
"/email/change",
{"csrf_token": res.form["csrf_token"].value, "email": new_email},
xhr=True,
)
assert res.json["status"] is False
assert res.json["msg"] == "BeforeEmailChange"
@pytest.fixture
def afteremailchange_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterEmailChange, AfterEmailChangeActivation event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterEmailChange)
evented_config.add_subscriber(redirect_to_secret, AfterEmailChangeActivation)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterEmailChange)
unregister_subscriber(evented_config, AfterEmailChangeActivation)
afteremailchange_app = factories.pyramid_app("afteremailchange_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("active_user")
def test_afteremailchange(db_session, afteremailchange_app): # pylint:disable=redefined-outer-name
"""Redirect after successful email change request."""
app = afteremailchange_app
authenticate(app)
email = DEFAULT_USER["email"]
new_email = "email@email.com"
user = db_session.query(User).filter(User.email == email).one()
res = app.get("/email/change")
form = res.form
form["email"] = new_email
res = form.submit()
assert res.location == EVENT_URL.format(AfterEmailChange)
transaction.commit()
user = db_session.query(User).filter(User.email == email).one()
assert user.new_email == new_email
assert user.email == email
assert user.email_change_key is not None
@pytest.mark.usefixtures("active_user")
def test_afteremailchange_xhr(db_session, afteremailchange_app): # pylint:disable=redefined-outer-name
"""Change email with valid data."""
app = afteremailchange_app
authenticate(app)
email = DEFAULT_USER["email"]
new_email = "email@email.com"
user = db_session.query(User).filter(User.email == email).one()
res = app.get("/email/change")
res = app.post(
"/email/change",
{"csrf_token": res.form["csrf_token"].value, "email": new_email},
xhr=True,
)
assert res.json["status"] is True
assert res.json["url"] == EVENT_PATH.format(AfterEmailChange)
transaction.commit()
user = db_session.query(User).filter(User.email == email).one()
assert user.new_email == new_email
assert user.email == email
assert user.email_change_key is not None
@pytest.mark.usefixtures("active_user")
def test_afteremailchangeactivation(db_session, afteremailchange_app): # pylint:disable=redefined-outer-name
"""Confirm email change view with redirect from AfterEmailChangeActivation."""
app = afteremailchange_app
# login user
authenticate(app)
email = DEFAULT_USER["email"]
user = db_session.query(User).filter(User.email == email).one()
new_email = "email2@email.com"
user.set_new_email(new_email)
transaction.commit()
user = db_session.merge(user)
res = app.get("/email/change/" + user.email_change_key)
assert res.status_code == 302
assert res.location == EVENT_URL.format(AfterEmailChangeActivation)
with pytest.raises(NoResultFound):
# there is no user with old email
db_session.query(User).filter(User.email == email).one()
user = db_session.query(User).filter(User.email == new_email).one()
assert not user.email_change_key
@pytest.fixture
def afterreset_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterReset, AfterResetRequest event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterResetRequest)
evented_config.add_subscriber(redirect_to_secret, AfterReset)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterResetRequest)
unregister_subscriber(evented_config, AfterReset)
afterreset_app = factories.pyramid_app("afterreset_config") # pylint:disable=invalid-name
def test_afterresetrequest(user, db_session, afterreset_app): # pylint:disable=redefined-outer-name
"""Successful password reset request with redirect from AfterResetRequest."""
user = db_session.merge(user)
assert user.reset_key is None
res = afterreset_app.get("/password/reset")
res.form["email"] = user.email
res = res.form.submit()
assert res.location == EVENT_URL.format(AfterResetRequest)
transaction.commit()
user = db_session.query(User).filter(User.email == user.email).one()
assert user.reset_key is not None
def test_afterreset(user, db_session, afterreset_app): # pylint:disable=redefined-outer-name
"""Actually change password with redirect from AfterReset."""
user = db_session.merge(user)
user.set_reset()
transaction.commit()
user = db_session.merge(user)
res = afterreset_app.get("/password/reset/" + user.reset_key)
res.form["password"] = "YouShallPass"
res.form["confirm_password"] = "YouShallPass"
res = res.form.submit()
assert res.location == EVENT_URL.format(AfterReset)
user = db_session.query(User).filter(User.email == user.email).one()
assert user.reset_key is None
assert user.check_password("YouShallPass") is True
@pytest.fixture
def beforereset_config(evented_config): # pylint:disable=redefined-outer-name
"""Add BeforeReset event subscriber that raises AttributeError."""
evented_config.add_subscriber(raise_attribute_error, BeforeReset)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, BeforeReset)
beforereset_app = factories.pyramid_app("beforereset_config") # pylint:disable=invalid-name
def test_beforereset(user, db_session, beforereset_app): # pylint:disable=redefined-outer-name
"""Error thrown from BeforeReset event."""
user = db_session.merge(user)
user.set_reset()
transaction.commit()
user = db_session.merge(user)
res = beforereset_app.get("/password/reset/" + user.reset_key)
res.form["password"] = "YouShallPass"
res.form["confirm_password"] = "YouShallPass"
res = res.form.submit()
assert "Error! BeforeReset" in res.body.decode("unicode_escape")
@pytest.fixture
def aftersocialregister_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterSocialRegister event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterSocialRegister)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterSocialRegister)
aftersocialregister_app = factories.pyramid_app("aftersocialregister_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("aftersocialregister_app")
def test_aftersocialregister(aftersocialregister_config, db_session): # pylint:disable=redefined-outer-name
"""Register fresh user and logs him in and check response if redirect from AfterSocialRegister."""
profile = {
"accounts": [{"domain": "facebook.com", "userid": "2343"}],
"displayName": "teddy",
"verifiedEmail": "we@po.pl",
"preferredUsername": "teddy",
"emails": [{"value": "aasd@bwwqwe.pl"}],
"name": "ted",
}
credentials = {"oauthAccessToken": "7897048593434"}
provider_name = "facebook"
provider_type = "facebook"
request = testing.DummyRequest()
request.user = None
request.registry = aftersocialregister_config.registry
request.remote_addr = "127.0.0.123"
request.context = AuthenticationComplete(profile, credentials, provider_name, provider_type)
request.login_perform = MagicMock(name="login_perform")
request.login_perform.return_value = {"status": True}
view = SocialLoginViews(request)
out = view()
assert out.location == EVENT_PATH.format(AfterSocialRegister)
transaction.commit()
# read first new account
user = db_session.query(User).one()
assert user.is_active
assert user.provider_id("facebook") == profile["accounts"][0]["userid"]
@pytest.fixture
def aftersociallogin_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterSocialLogIn event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterSocialLogIn)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterSocialLogIn)
aftersociallogin_app = factories.pyramid_app("aftersociallogin_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("aftersociallogin_app")
def test_aftersociallogin(aftersociallogin_config, db_session): # pylint:disable=redefined-outer-name
"""Register fresh user and logs him in and check response if redirect from AfterSocialLogIn."""
profile = {
"accounts": [{"domain": "facebook.com", "userid": "2343"}],
"displayName": "teddy",
"verifiedEmail": "we@po.pl",
"preferredUsername": "teddy",
"emails": [{"value": "aasd@bwwqwe.pl"}],
"name": "ted",
}
credentials = {"oauthAccessToken": "7897048593434"}
provider_name = "facebook"
provider_type = "facebook"
request = testing.DummyRequest()
request.user = None
request.registry = aftersociallogin_config.registry
request.remote_addr = "127.0.0.123"
request.context = AuthenticationComplete(profile, credentials, provider_name, provider_type)
def login_perform(*_, **kwargs):
return HTTPFound(location=kwargs["location"])
request.login_perform = login_perform
view = SocialLoginViews(request)
out = view()
assert out.location == EVENT_PATH.format(AfterSocialLogIn)
transaction.commit()
# read first new account
user = db_session.query(User).one()
assert user.is_active
assert user.provider_id("facebook") == profile["accounts"][0]["userid"]
@pytest.fixture
def alreadyconnected_config(evented_config): # pylint:disable=redefined-outer-name
"""Add SocialAccountAlreadyConnected event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, SocialAccountAlreadyConnected)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, SocialAccountAlreadyConnected)
alreadyconnected_app = factories.pyramid_app("alreadyconnected_config") # pylint:disable=invalid-name
@pytest.mark.usefixtures("alreadyconnected_app")
def test_alreadyconnected(alreadyconnected_config, facebook_user, db_session): # pylint:disable=redefined-outer-name
"""
Try to connect facebook account to logged in user used by other users facebook account.
Check redirect from SocialAccountAlreadyConnected.
"""
# this user will be logged and trying to connect facebook's user account.
fresh_user = User(
email="new@user.pl",
password="somepassword",
address_ip="127.0.0.1",
)
db_session.add(fresh_user)
transaction.commit()
user = db_session.merge(facebook_user)
fresh_user = db_session.merge(fresh_user)
# mock request
profile = {
"accounts": [
{
"domain": "facebook.com",
"userid": user.provider_id("facebook"),
}
],
"displayName": "teddy",
"preferredUsername": "teddy",
"emails": [{"value": "aasd@basd.pl"}],
"name": "ted",
}
credentials = {"oauthAccessToken": "7897048593434"}
provider_name = "facebook"
provider_type = "facebook"
request = testing.DummyRequest()
request.user = fresh_user
request.registry = alreadyconnected_config.registry
request.remote_addr = "127.0.0.123"
request.context = AuthenticationComplete(profile, credentials, provider_name, provider_type)
request._ = mock_translate
request.login_perform = MagicMock(name="login_perform")
request.login_perform.return_value = {"status": True}
# call!
view = SocialLoginViews(request)
out = view()
assert out.location == EVENT_PATH.format(SocialAccountAlreadyConnected)
transaction.begin()
fresh_user = db_session.merge(fresh_user)
assert fresh_user.provider_id("facebook") is None
@pytest.fixture
def afteractivate_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterActivate event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterActivate)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterActivate)
afteractivate_app = factories.pyramid_app("afteractivate_config") # pylint:disable=invalid-name
def test_afteractivate(user, db_session, afteractivate_app): # pylint:disable=redefined-outer-name
"""Activate user adn check redirect through AfterActivate."""
user = db_session.merge(user)
res = afteractivate_app.get("/register/activate/" + user.activate_key)
assert res.location == EVENT_URL.format(AfterActivate)
transaction.commit()
user = db_session.query(User).filter(User.email == user.email).one()
assert not user.activate_key
assert user.is_active
assert user.activated_at
authenticate(afteractivate_app)
assert is_user_logged(afteractivate_app) is True
@pytest.fixture
def beforeregister_config(evented_config): # pylint:disable=redefined-outer-name
"""Add BeforeRegister event subscriber that raises AttributeError."""
evented_config.add_subscriber(raise_attribute_error, BeforeRegister)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, BeforeRegister)
beforeregister_app = factories.pyramid_app("beforeregister_config") # pylint:disable=invalid-name
def test_beforeregister(db_session, beforeregister_app): # pylint:disable=redefined-outer-name
"""Register user check eror catching from BeforeRegister event."""
assert db_session.query(User).count() == 0
res = beforeregister_app.get("/register")
res = beforeregister_app.post(
"/register",
{
"csrf_token": res.form["csrf_token"].value,
"email": "test@test.co.uk",
"password": "passmeplease",
"confirm_password": "passmeplease",
},
extra_environ={"REMOTE_ADDR": "0.0.0.0"},
xhr=True,
)
assert res.json["errors"]["msg"] == "BeforeRegister"
@pytest.fixture
def afterregister_config(evented_config): # pylint:disable=redefined-outer-name
"""Add AfterRegister event subscriber that redirects to event page."""
evented_config.add_subscriber(redirect_to_secret, AfterRegister)
evented_config.commit()
yield evented_config
unregister_subscriber(evented_config, AfterRegister)
afteraregister_app = factories.pyramid_app("afterregister_config") # pylint:disable=invalid-name
def test_afterregister(db_session, afteraregister_app): # pylint:disable=redefined-outer-name
"""Register user check eror catching from BeforeRegister event."""
assert db_session.query(User).count() == 0
email = "test@test.co.uk"
password = "passmeplease"
res = afteraregister_app.get("/register")
res.form["email"] = email
res.form["password"] = password
res.form["confirm_password"] = password
res = res.form.submit(extra_environ={"REMOTE_ADDR": "0.0.0.0"})
assert res.location == EVENT_URL.format(AfterRegister)
transaction.commit()
user = db_session.query(User).filter(User.email == email).one()
# User should not have active account at this moment
assert user.is_active is not None
assert user.check_password(password)
| {
"repo_name": "fizyk/pyramid_fullauth",
"path": "tests/views/test_events.py",
"copies": "1",
"size": "22756",
"license": "mit",
"hash": -668314288264065400,
"line_mean": 34.7798742138,
"line_max": 117,
"alpha_frac": 0.7083846019,
"autogenerated": false,
"ratio": 3.7098141506358004,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49181987525358006,
"avg_score": null,
"num_lines": null
} |
__all__ = ['ewsum', 'ewsum_back', 'softmax_back', 'rectify_back']
import os
import math
import numpy
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
from pycuda.compiler import SourceModule
from .matrix import matrix_addition
from .utils import gpu_func
from .enums import MAX_BLOCK_SIZE, CUR_DIR, CACHE_DIR
mod = SourceModule(open(os.path.join(CUR_DIR, 'kernel/ewsum.cu')).read(), cache_dir=CACHE_DIR)
ewsum_kernel = mod.get_function('ewsum_kernel')
ewsum_sum_kernel = mod.get_function('ewsum_sum_kernel')
ewsum_back_kernel = mod.get_function('ewsum_back_kernel')
mod2 = SourceModule(open(os.path.join(CUR_DIR, 'kernel/softmax.cu')).read(), cache_dir=CACHE_DIR)
softmax_back_kernel = mod2.get_function('softmax_back_kernel')
mod3 = SourceModule(open(os.path.join(CUR_DIR, 'kernel/rectify.cu')).read(), cache_dir=CACHE_DIR)
rectify_back_kernel = mod3.get_function('rectify_back_kernel')
@gpu_func
def ewsum(d_a, d_w):
"""
YORI NOTES
This method is faster than CPU if num_w is large, and non_width is small:
When num_w is large, the for loop is small
When non_width is large, there are more threads necessary
"""
width = d_a.shape[0]
total_dim = d_a.size
num_w = d_w.shape[0]
d_tmp_out = gpuarray.zeros_like(d_a)
thread_size = min(d_a.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_a.size / float(thread_size))), 1)
ewsum_kernel(d_a, d_w, d_tmp_out,
numpy.int32(num_w), numpy.int32(width), numpy.int32(total_dim),
block=(thread_size,1,1), grid=(block_size,1,1))
# TODO: There HAS to be a better way to do this
x = width / num_w
d_out = gpuarray.zeros((x,) + d_a.shape[1:], numpy.float32)
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
ewsum_sum_kernel(d_tmp_out, d_out,
numpy.int32(num_w), numpy.int32(width), numpy.int32(total_dim),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def ewsum_back(d_error, d_w):
d_out = gpuarray.zeros((d_w.shape[0]*d_error.shape[0],) + d_error.shape[1:], dtype=d_error.dtype)
err_width = d_error.shape[0]
width = d_out.shape[0]
total_dim = d_out.size
num_w = d_w.shape[0]
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
ewsum_back_kernel(d_error, d_w, d_out,
numpy.int32(num_w), numpy.int32(err_width), numpy.int32(width), numpy.int32(total_dim),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def softmax_back(d_a, d_error, s):
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
softmax_back_kernel(d_a, d_error, d_out, numpy.float32(s), numpy.int32(d_out.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def rectify_back(d_a, d_error, inplace=False):
if inplace:
d_out = d_a
else:
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
rectify_back_kernel(d_a, d_error, d_out, numpy.int32(d_out.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
| {
"repo_name": "Captricity/sciguppy",
"path": "sciguppy/misc.py",
"copies": "1",
"size": "3415",
"license": "mit",
"hash": 9149195806412461000,
"line_mean": 36.9444444444,
"line_max": 101,
"alpha_frac": 0.6483162518,
"autogenerated": false,
"ratio": 2.697472353870458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3845788605670458,
"avg_score": null,
"num_lines": null
} |
__all__ = ['example']
# Don't look below, you wont understand this Python code :) I don't.
from js2py.pyjs import *
# setting scope
var = Scope( JS_BUILTINS )
set_global_object(var)
# Code follows:
var.registers([u'sayHello', u'someVariable', u'Rectangle', u'$nonPyName'])
@Js
def PyJsHoisted_sayHello_(name, this, arguments, var=var):
var = Scope({u'this':this, u'name':name, u'arguments':arguments}, var)
var.registers([u'name'])
var.get(u'console').callprop(u'log', ((Js(u'Hello, ')+var.get(u'name'))+Js(u'!')))
PyJsHoisted_sayHello_.func_name = u'sayHello'
var.put(u'sayHello', PyJsHoisted_sayHello_)
@Js
def PyJsHoisted_Rectangle_(w, h, this, arguments, var=var):
var = Scope({u'this':this, u'h':h, u'arguments':arguments, u'w':w}, var)
var.registers([u'h', u'w'])
var.get(u"this").put(u'w', var.get(u'w'))
var.get(u"this").put(u'h', var.get(u'h'))
PyJsHoisted_Rectangle_.func_name = u'Rectangle'
var.put(u'Rectangle', PyJsHoisted_Rectangle_)
@Js
def PyJsHoistedNonPyName(this, arguments, var=var):
var = Scope({u'this':this, u'arguments':arguments}, var)
var.registers([])
var.get(u'console').callprop(u'log', Js(u'Non-Py names like $ can be used too!'))
PyJsHoistedNonPyName.func_name = u'$nonPyName'
var.put(u'$nonPyName', PyJsHoistedNonPyName)
PyJs_Object_0_ = Js({u'a':Js(1.0),u'b':Js(2.0)})
var.put(u'someVariable', PyJs_Object_0_)
pass
pass
pass
@Js
def PyJs_anonymous_2_(this, arguments, var=var):
var = Scope({u'this':this, u'arguments':arguments}, var)
var.registers([])
return (var.get(u"this").get(u'w')*var.get(u"this").get(u'h'))
PyJs_anonymous_2_._set_name(u'anonymous')
PyJs_Object_1_ = Js({u'getArea':PyJs_anonymous_2_})
var.get(u'Rectangle').put(u'prototype', PyJs_Object_1_)
var.put(u'x', var.get(u'Rectangle').create(Js(10.0), Js(10.0)))
# Add lib to the module scope
example = var.to_python() | {
"repo_name": "PiotrDabkowski/Js2Py",
"path": "examples/example.py",
"copies": "1",
"size": "1875",
"license": "mit",
"hash": -2208926157842314200,
"line_mean": 35.7843137255,
"line_max": 86,
"alpha_frac": 0.6688,
"autogenerated": false,
"ratio": 2.5614754098360657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3730275409836066,
"avg_score": null,
"num_lines": null
} |
"""All exceptions are available directly from `mal_scraper.x`"""
import logging
from enum import Enum, unique
logger = logging.getLogger(__name__)
class MalScraperError(Exception):
"""Parent to all exceptions raised by this library."""
class RequestError(MalScraperError):
"""An error making the request.
Args:
code (RequestError.Code): Error code
message (str): Human readable string describing the problem.
Attributes:
code (.RequestError.Code): Error code
message (str): Human readable string describing the problem.
"""
@unique
class Code(Enum):
does_not_exist = 'NOEXIST' # Anime or User does not exist
forbidden = 'FORBIDDEN' # Access is forbidden by the user
def __init__(self, code, message):
if code not in self.Code.__members__.values():
raise RuntimeError('Invalid RequestError %s' % code)
super().__init__(message)
self.code = code
self.message = message
class ParseError(MalScraperError):
"""A component of the HTML could not be parsed/processed.
The tag is the "component" under consideration to help determine where
the error comes from.
Args:
message (str): Human readable string describing the problem.
tag (str, optional): Which part of the page does this pertain to.
Attributes:
message (str): Human readable string describing the problem.
tag (str): Which part of the page does this pertain to.
"""
def __init__(self, message, tag=None):
super().__init__(message)
self.message = message
self.tag = tag or ''
def specify_tag(self, tag):
"""Specify the tag later."""
self.tag = tag
# --- Internal Exceptions ---
class MissingTagError(ParseError):
"""The tag is missing from the soup/web-page.
This is internal, so you should instead catch :class:`.ParseError`.
"""
def __init__(self, tag=None):
super().__init__('Missing from soup/web-page', tag)
| {
"repo_name": "QasimK/mal-scraper",
"path": "src/mal_scraper/exceptions.py",
"copies": "1",
"size": "2043",
"license": "mit",
"hash": 1393112096072415000,
"line_mean": 26.6081081081,
"line_max": 74,
"alpha_frac": 0.6392559961,
"autogenerated": false,
"ratio": 4.283018867924528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5422274864024528,
"avg_score": null,
"num_lines": null
} |
"""All exceptions for Simple Salesforce"""
class SalesforceError(Exception):
"""Base Salesforce API exception"""
message = u'Unknown error occurred for {url}. Response content: {content}'
def __init__(self, url, status, resource_name, content):
"""Initialize the SalesforceError exception
SalesforceError is the base class of exceptions in simple-salesforce
Args:
url: Salesforce URL that was called
status: Status code of the error response
resource_name: Name of the Salesforce resource being queried
content: content of the response
"""
# TODO exceptions don't seem to be using parent constructors at all.
# this should be fixed.
# pylint: disable=super-init-not-called
self.url = url
self.status = status
self.resource_name = resource_name
self.content = content
def __str__(self):
return self.message.format(url=self.url, content=self.content)
def __unicode__(self):
return self.__str__()
class SalesforceMoreThanOneRecord(SalesforceError):
"""
Error Code: 300
The value returned when an external ID exists in more than one record. The
response body contains the list of matching records.
"""
message = u"More than one record for {url}. Response content: {content}"
class SalesforceMalformedRequest(SalesforceError):
"""
Error Code: 400
The request couldn't be understood, usually because the JSON or XML body
contains an error.
"""
message = u"Malformed request {url}. Response content: {content}"
class SalesforceExpiredSession(SalesforceError):
"""
Error Code: 401
The session ID or OAuth token used has expired or is invalid. The response
body contains the message and errorCode.
"""
message = u"Expired session for {url}. Response content: {content}"
class SalesforceRefusedRequest(SalesforceError):
"""
Error Code: 403
The request has been refused. Verify that the logged-in user has
appropriate permissions.
"""
message = u"Request refused for {url}. Response content: {content}"
class SalesforceResourceNotFound(SalesforceError):
"""
Error Code: 404
The requested resource couldn't be found. Check the URI for errors, and
verify that there are no sharing issues.
"""
message = u'Resource {name} Not Found. Response content: {content}'
def __str__(self):
return self.message.format(name=self.resource_name,
content=self.content)
class SalesforceAuthenticationFailed(SalesforceError):
"""
Thrown to indicate that authentication with Salesforce failed.
"""
def __init__(self, code, message):
# TODO exceptions don't seem to be using parent constructors at all.
# this should be fixed.
# pylint: disable=super-init-not-called
self.code = code
self.message = message
def __str__(self):
return u'{code}: {message}'.format(code=self.code,
message=self.message)
class SalesforceGeneralError(SalesforceError):
"""
A non-specific Salesforce error.
"""
message = u'Error Code {status}. Response content: {content}'
def __str__(self):
return self.message.format(status=self.status, content=self.content)
| {
"repo_name": "kawamon/hue",
"path": "desktop/core/ext-py/simple-salesforce-0.74.2/simple_salesforce/exceptions.py",
"copies": "3",
"size": "3406",
"license": "apache-2.0",
"hash": 4161865901540369400,
"line_mean": 31.4380952381,
"line_max": 78,
"alpha_frac": 0.6559013506,
"autogenerated": false,
"ratio": 4.394838709677419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003757944183476098,
"num_lines": 105
} |
"""All exceptions for speedcurve library."""
class SpeedCurveError(Exception):
"""The base exception class."""
def __init__(self, response):
super(SpeedCurveError, self).__init__(response)
self.response = response
self.code = response.status_code
self.errors = []
try:
error = response.json()
self.errors = error.get('errors')
except:
self.msg = response.content or '[No Message]'
class AuthenticationFailed(SpeedCurveError):
pass
class BadRequest(SpeedCurveError):
pass
class InternalServerError(SpeedCurveError):
pass
class ForbiddenError(SpeedCurveError):
pass
class NotFoundError(SpeedCurveError):
pass
class ServiceUnavailable(SpeedCurveError):
pass
error_classes = {
400: BadRequest,
401: AuthenticationFailed,
403: ForbiddenError,
404: NotFoundError,
500: InternalServerError,
503: ServiceUnavailable
}
def get_error_for(response):
"""Return initialized exception."""
status_code = response.status_code
error_class = error_classes.get(status_code)
return error_class(response)
| {
"repo_name": "itsmemattchung/speedcurve.py",
"path": "speedcurve/exceptions.py",
"copies": "1",
"size": "1160",
"license": "mit",
"hash": -4325208963408728000,
"line_mean": 19,
"line_max": 57,
"alpha_frac": 0.6672413793,
"autogenerated": false,
"ratio": 4.218181818181818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010775862068965517,
"num_lines": 58
} |
"""All exceptions used in the Cookiecutter code base are defined here."""
class CookiecutterException(Exception):
"""
Base exception class.
All Cookiecutter-specific exceptions should subclass this class.
"""
class NonTemplatedInputDirException(CookiecutterException):
"""
Exception for when a project's input dir is not templated.
The name of the input directory should always contain a string that is
rendered to something else, so that input_dir != output_dir.
"""
class UnknownTemplateDirException(CookiecutterException):
"""
Exception for ambiguous project template directory.
Raised when Cookiecutter cannot determine which directory is the project
template, e.g. more than one dir appears to be a template dir.
"""
# unused locally
class MissingProjectDir(CookiecutterException):
"""
Exception for missing generated project directory.
Raised during cleanup when remove_repo() can't find a generated project
directory inside of a repo.
"""
# unused locally
class ConfigDoesNotExistException(CookiecutterException):
"""
Exception for missing config file.
Raised when get_config() is passed a path to a config file, but no file
is found at that path.
"""
class InvalidConfiguration(CookiecutterException):
"""
Exception for invalid configuration file.
Raised if the global configuration file is not valid YAML or is
badly constructed.
"""
class UnknownRepoType(CookiecutterException):
"""
Exception for unknown repo types.
Raised if a repo's type cannot be determined.
"""
class VCSNotInstalled(CookiecutterException):
"""
Exception when version control is unavailable.
Raised if the version control system (git or hg) is not installed.
"""
class ContextDecodingException(CookiecutterException):
"""
Exception for failed JSON decoding.
Raised when a project's JSON context file can not be decoded.
"""
class OutputDirExistsException(CookiecutterException):
"""
Exception for existing output directory.
Raised when the output directory of the project exists already.
"""
class InvalidModeException(CookiecutterException):
"""
Exception for incompatible modes.
Raised when cookiecutter is called with both `no_input==True` and
`replay==True` at the same time.
"""
class FailedHookException(CookiecutterException):
"""
Exception for hook failures.
Raised when a hook script fails.
"""
class UndefinedVariableInTemplate(CookiecutterException):
"""
Exception for out-of-scope variables.
Raised when a template uses a variable which is not defined in the
context.
"""
def __init__(self, message, error, context):
"""Exception for out-of-scope variables."""
self.message = message
self.error = error
self.context = context
def __str__(self):
"""Text representation of UndefinedVariableInTemplate."""
return (
"{self.message}. "
"Error message: {self.error.message}. "
"Context: {self.context}"
).format(**locals())
class UnknownExtension(CookiecutterException):
"""
Exception for un-importable extention.
Raised when an environment is unable to import a required extension.
"""
class RepositoryNotFound(CookiecutterException):
"""
Exception for missing repo.
Raised when the specified cookiecutter repository doesn't exist.
"""
class RepositoryCloneFailed(CookiecutterException):
"""
Exception for un-cloneable repo.
Raised when a cookiecutter template can't be cloned.
"""
class InvalidZipRepository(CookiecutterException):
"""
Exception for bad zip repo.
Raised when the specified cookiecutter repository isn't a valid
Zip archive.
"""
| {
"repo_name": "audreyr/cookiecutter",
"path": "cookiecutter/exceptions.py",
"copies": "2",
"size": "3902",
"license": "bsd-3-clause",
"hash": 7685118689428905000,
"line_mean": 22.9386503067,
"line_max": 76,
"alpha_frac": 0.696053306,
"autogenerated": false,
"ratio": 4.449258836944128,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 163
} |
__all__ = ['EXECUTABLE', 'DIALOUGE_PIECES', 'ANNABEL_LEE']
import os
import sys
import shutil
ANNABEL_LEE = '''It was many and many a year ago,
In a kingdom by the sea,
That a maiden there lived whom you may know
By the name of ANNABEL LEE;--
And this maiden she lived with no other thought
Than to love and be loved by me.
She was a child and I was a child,
In this kingdom by the sea,
But we loved with a love that was more than love--
I and my Annabel Lee--
With a love that the winged seraphs of heaven
Coveted her and me.'''
_executable_dirpath = os.path.dirname(os.path.abspath(__file__))
_executable_filepath = os.path.join(_executable_dirpath, 'executable.py')
# running in Python 3
if sys.version_info[0] < 3:
PYTHON_COMMAND = 'python'
# running in Python 2
else:
# there is `python3` command in bash
if shutil.which('python3') is not None:
PYTHON_COMMAND = 'python3'
elif shutil.which('python3.5') is not None:
PYTHON_COMMAND = 'python3.5'
elif shutil.which('python3.4') is not None:
PYTHON_COMMAND = 'python3.4'
else:
PYTHON_COMMAND = 'python'
EXECUTABLE = PYTHON_COMMAND + ' ' + _executable_filepath
DIALOUGE_PIECES = {
'output_all': [{
'type': 'output',
'name': 'poem line',
'value': line
} for line in ANNABEL_LEE.splitlines()],
'output4': {
'type': 'output',
'name': 'poem line',
'value': ANNABEL_LEE.splitlines()[4]
},
'output4_upper': {
'type': 'output',
'name': 'poem line',
'value': ANNABEL_LEE.splitlines()[4].upper()
},
'output4_prefix': {
'type': 'output',
'name': 'poem line prefix',
'value': ANNABEL_LEE.splitlines()[4][0]
},
'output4_middle': {
'type': 'output',
'name': 'poem line',
'value': ANNABEL_LEE.splitlines()[4][1:-1]
},
'output4_suffix': {
'type': 'output',
'name': 'poem line suffix',
'value': ANNABEL_LEE.splitlines()[4][-1]
},
'input_comment': {
'type': 'input',
'name': 'comment',
'value': 'A really nice poem!'
},
'output_comment': {
'type': 'output',
'name': 'comment',
'value': 'A really nice poem!'
},
'output_poet':
{
'type': 'output',
'name': 'poet',
'value': 'Edgar Allan Poe'
}
}
| {
"repo_name": "shlomihod/scenario",
"path": "scenario/tests/consts.py",
"copies": "1",
"size": "2390",
"license": "mit",
"hash": 3194287369291795000,
"line_mean": 23.8958333333,
"line_max": 73,
"alpha_frac": 0.5715481172,
"autogenerated": false,
"ratio": 3.1488801054018447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9220428222601844,
"avg_score": 0,
"num_lines": 96
} |
__all__ = ["ExecuteAlertCommand", \
"ExecuteAcceptAlert"]
from browser.web_view_impl import WebViewImpl
from browser.status import *
from base.log import VLOG
def ExecuteAlertCommand(alert_command, session, params, value):
web_view = WebViewImpl("fake_id", 0, None)
# update web_view
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
status = web_view.HandleReceivedEvents()
if status.IsError():
return status
status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if status.IsError() and status.Code() != kUnexpectedAlertOpen:
return status
alert_command.Update([session, web_view, params, value])
return alert_command.Run(session, web_view, params, value)
def ExecuteAcceptAlert(session, web_view, params, value):
status = web_view.GetJavaScriptDialogManager().HandleDialog(True, session.prompt_text)
session.prompt_text = ""
return status
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "command/alert_commands.py",
"copies": "1",
"size": "1060",
"license": "bsd-3-clause",
"hash": -1648255248518075100,
"line_mean": 30.1764705882,
"line_max": 107,
"alpha_frac": 0.7367924528,
"autogenerated": false,
"ratio": 3.6805555555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.969346210861805,
"avg_score": 0.0447771799475013,
"num_lines": 34
} |
__all__ = ["ExecuteAsync"]
import re
import subprocess
import threading
from status import *
""" since python' subprocess module does not support manual
timeout setting. This class binds the wanted commands and post
the task to another thread which can be under control in timeout
setting calling thread.join(timeout) """
class ExecuteAsync(object):
def __init__(self, cmd="", timeout=3):
self.cmd = cmd
self.timeout = timeout
self.process = None
self.stdout = ""
self.stderr = ""
self.is_timeout = False
self.Run()
def Task(self):
self.process = subprocess.Popen(\
self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(self.stdout, self.stderr) = self.process.communicate()
return
def Run(self):
thread = threading.Thread(target=self.Task)
thread.start()
thread.join(self.timeout)
if thread.is_alive():
self.is_timeout = True
self.process.terminate()
thread.join()
return
# return status and response<string>
def GetResponse(self):
# handle timeout error
if self.is_timeout:
msg = "%s command timed out after %s seconds" \
% (self.cmd, str(self.timeout))
return (Status(kTimeout, msg), "")
# handle command execute shell-like error,
# etc. command unregconize or spelled error
if self.stderr:
return (Status(kUnknownError, "Failed to run %s command" % self.cmd), "")
# handle adb execute error
matchObj = re.search(r'error', self.stdout, re.I)
if matchObj:
return (Status(kUnknownError, \
"Failed to run %s command, detailed message: %s" \
% (self.cmd, self.stdout)), "")
return (Status(kOk), self.stdout)
| {
"repo_name": "weibohit/tools",
"path": "utility/execute_async.py",
"copies": "1",
"size": "1723",
"license": "mit",
"hash": -6586615137757989000,
"line_mean": 28.7068965517,
"line_max": 79,
"alpha_frac": 0.6546720836,
"autogenerated": false,
"ratio": 3.7868131868131867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874918289584023,
"avg_score": 0.013313396165832748,
"num_lines": 58
} |
__all__ = ["ExecuteElementCommand", \
"ExecuteFindChildElement", \
"ExecuteFindChildElements", \
"ExecuteGetElementText", \
"ExecuteGetElementTagName", \
"ExecuteIsElementSelected", \
"ExecuteIsElementEnabled", \
"ExecuteIsElementDisplayed", \
"ExecuteGetElementLocation", \
"ExecuteGetElementSize", \
"ExecuteClearElement", \
"ExecuteGetElementAttribute", \
"ExecuteGetElementValue", \
"ExecuteGetElementValueOfCSSProperty", \
"ExecuteElementEquals", \
"ExecuteSubmitElement", \
"ExecuteGetElementLocationOnceScrolledIntoView",
"ExecuteClickElement", \
"ExecuteSendKeysToElement"]
import time
from element_util import *
from browser.js import *
from browser.status import *
from browser.web_view_impl import WebViewImpl
from browser.ui_events import *
from third_party.atoms import *
from misc.basic_types import WebPoint
from misc.util import SendKeysOnWindow
def ExecuteElementCommand(command, element_id, session, params, value):
web_view = WebViewImpl("fake_id", 0, None)
# update web_view
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
status = web_view.HandleReceivedEvents()
if status.IsError():
return status
if web_view.GetJavaScriptDialogManager().IsDialogOpen():
return Status(kUnexpectedAlertOpen)
nav_status = Status(kOk)
for attempt in range(2):
if attempt == 1:
if status.Code() == kNoSuchExecutionContext:
# Switch to main frame and retry command if subframe no longer exists.
session.SwitchToTopFrame()
else:
break
nav_status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if nav_status.IsError():
return nav_status
command.Update([session, web_view, element_id, params, value])
status = command.Run()
nav_status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if status.IsOk() and nav_status.IsError() and nav_status.Code() != kUnexpectedAlertOpen:
return nav_status
if status.Code() == kUnexpectedAlertOpen:
return Status(kOk)
return status
def ExecuteFindChildElement(session, web_view, element_id, params, value):
interval_ms = 50
return FindElement(interval_ms, True, element_id, session, web_view, params, value)
def ExecuteFindChildElements(session, web_view, element_id, params, value):
interval_ms = 50
return FindElement(interval_ms, False, element_id, session, web_view, params, value)
def ExecuteGetElementText(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), GET_TEXT, args, value)
def ExecuteGetElementTagName(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), "function(elem) { return elem.tagName.toLowerCase(); }", args, value)
def ExecuteIsElementSelected(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), IS_SELECTED, args, value)
def ExecuteIsElementEnabled(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), IS_ENABLED, args, value)
def ExecuteIsElementDisplayed(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), IS_DISPLAYED, args, value)
def ExecuteGetElementLocation(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), GET_LOCATION, args, value)
def ExecuteGetElementSize(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), GET_SIZE, args, value)
def ExecuteClearElement(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
result = {}
return web_view.CallFunction(session.GetCurrentFrameId(), CLEAR, args, result)
def ExecuteGetElementAttribute(session, web_view, element_id, params, value):
# the desired parameter comes from url request not url content
# we define it on our own, make the name of key look fool that will not
# conflict with the standard label
name = params.get("extra_url_params")
if type(name) != str:
return Status(kUnknownError, "missing 'name'")
return GetElementAttribute(session, web_view, element_id, name, value)
def ExecuteGetElementValue(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), \
"function(elem) { return elem['value'] }", args, value)
def ExecuteGetElementValueOfCSSProperty(session, web_view, element_id, params, value):
# the desired parameter comes from url request not url content
# we define it on our own, make the name of key look fool that will not
# conflict with the standard label
property_name = params.get("extra_url_params")
if type(property_name) != str:
return Status(kUnknownError, "missing 'propertyName'")
(status, property_value) = GetElementEffectiveStyle(session.GetCurrentFrameId(), web_view, element_id, property_name)
if status.IsError():
return status
value.clear()
value.update({"value": property_value})
return Status(kOk)
def ExecuteElementEquals(session, web_view, element_id, params, value):
# the desired parameter comes from url request not url content
# we define it on our own, make the name of key look fool that will not
# conflict with the standard label
other_element_id = params.get("extra_url_params")
if type(other_element_id) != str:
return Status(kUnknownError, "'other' must be a string")
value.clear()
value.update({"value": (element_id == other_element_id)})
return Status(kOk)
def ExecuteSubmitElement(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), SUBMIT, args, value)
def ExecuteGetElementLocationOnceScrolledIntoView(session, web_view, element_id, params, value):
location = WebPoint()
status = ScrollElementIntoView(session, web_view, element_id, location)
if status.IsError():
return status
value.clear()
value.update({"value": CreateValueFrom(location)})
return Status(kOk)
def ExecuteTouchSingleTapAtom(session, web_view, element_id, params, value):
args = []
args.append(CreateElement(element_id))
return web_view.CallFunction(session.GetCurrentFrameId(), TOUCH_SINGLE_TAP, args, value)
def ExecuteClickElement(session, web_view, element_id, params, value):
(status, tag_name) = GetElementTagName(session, web_view, element_id)
if status.IsError():
return status
if tag_name == "option":
(status, is_toggleable) = IsOptionElementTogglable(session, web_view, element_id)
if status.IsError():
return status
if is_toggleable:
return ToggleOptionElement(session, web_view, element_id);
else:
return SetOptionElementSelected(session, web_view, element_id, True)
else:
location = WebPoint()
status = GetElementClickableLocation(session, web_view, element_id, location)
if status.IsError():
return status
events = []
events.append(MouseEvent(kMovedMouseEventType, kNoneMouseButton, \
location.x, location.y, session.sticky_modifiers, 0))
events.append(MouseEvent(kPressedMouseEventType, kLeftMouseButton, \
location.x, location.y, session.sticky_modifiers, 1))
events.append(MouseEvent(kReleasedMouseEventType, kLeftMouseButton, \
location.x, location.y, session.sticky_modifiers, 1))
status = web_view.DispatchMouseEvents(events, session.GetCurrentFrameId())
if status.IsOk():
session.mouse_position.Update(location)
return status
####### remain test api #####################
def SendKeysToElement(session, web_view, element_id, key_list):
is_displayed = False
is_focused = False
start_time = time.time()
while True:
(status, is_displayed) = IsElementDisplayed(session, web_view, element_id, True)
if status.IsError():
return status
if is_displayed:
break
(status, is_focused) = IsElementFocused(session, web_view, element_id)
if status.IsError():
return status
if is_focused:
break
if ((time.time() - start_time) >= session.implicit_wait):
return Status(kElementNotVisible);
time.sleep(0.1)
is_enabled = False
(status, is_enabled) = IsElementEnabled(session, web_view, element_id)
if status.IsError():
return status
if not is_enabled:
return Status(kInvalidElementState)
if not is_focused:
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kFocusScript, args, result)
if status.IsError():
return status
(status, session.sticky_modifiers) = SendKeysOnWindow(web_view, key_list, True, session.sticky_modifiers)
return status
def ExecuteHoverOverElement(session, web_view, element_id, params, value):
location = WebPoint()
status = GetElementClickableLocation(session, web_view, element_id, location)
if status.IsError():
return status
move_event = MouseEvent(kMovedMouseEventType, kNoneMouseButton, location.x, location.y, session.sticky_modifiers, 0)
events = []
events.append(move_event)
status = web_view.DispatchMouseEvents(events, session.GetCurrentFrameId())
if status.IsOk():
session.mouse_position.Update(location)
return status
def ExecuteTouchSingleTap(session, web_view, element_id, params, value):
# Fall back to javascript atom for pre-m30 Xwalk.
if session.xwalk.GetBuildNo() < 1576:
return ExecuteTouchSingleTapAtom(session, web_view, element_id, params, value)
location = WebPoint()
status = GetElementClickableLocation(session, web_view, element_id, location)
if status.IsError():
return status
events = []
events.append(TouchEvent(kTouchStart, location.x, location.y))
events.append(TouchEvent(kTouchEnd, location.x, location.y))
return web_view.DispatchTouchEvents(events)
def ExecuteSendKeysToElement(session, web_view, element_id, params, value):
key_list = params.get("value")
if type(key_list) != list:
return Status(kUnknownError, "'value' must be a list")
(status, is_input) = IsElementAttributeEqualToIgnoreCase(\
session, web_view, element_id, "tagName", "input")
if status.IsError():
return status
(status, is_file) = IsElementAttributeEqualToIgnoreCase(\
session, web_view, element_id, "type", "file")
if status.IsError():
return status
if is_input and is_file:
# Compress array into a single string.
paths_string = ""
for path_part in key_list:
if type(path_part) != str:
return Status(kUnknownError, "'value' is invalid")
paths_string += path_part
# Separate the string into separate paths, delimited by '\n'.
paths = paths_string.split("\n")
(status, multiple) = IsElementAttributeEqualToIgnoreCase(\
session, web_view, element_id, "multiple", "true")
if status.IsError():
return status
if not multiple and len(paths):
return Status(kUnknownError, "the element can not hold multiple files")
element = CreateElement(element_id)
return web_view.SetFileInputFiles(session.GetCurrentFrameId(), element, paths)
else:
return SendKeysToElement(session, web_view, element_id, key_list)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "command/element_commands.py",
"copies": "1",
"size": "12082",
"license": "bsd-3-clause",
"hash": 7907071839350697000,
"line_mean": 37.6006389776,
"line_max": 129,
"alpha_frac": 0.7151133918,
"autogenerated": false,
"ratio": 3.697062423500612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49121758153006123,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ExecuteGetSessionCapabilities", \
"ExecuteImplicitlyWait", \
"ExecuteSetTimeout", \
"ExecuteSetScriptTimeout", \
"ExecuteGetCurrentWindowHandle", \
"ExecuteIsLoading", \
"ExecuteGetBrowserOrientation", \
"ExecuteGetLocation", \
"ExecuteGetAppCacheStatus", \
"ExecuteGetWindowHandles", \
"ExecuteClose", \
"ExecuteSwitchToWindow"]
from browser.status import *
from browser.web_view_impl import WebViewImpl
from base.log import VLOG
from misc.basic_types import WebPoint
kWindowHandlePrefix = "CDwindow-"
def _WebViewIdToWindowHandle(web_view_id):
return kWindowHandlePrefix + web_view_id
# return bool and web_view_id<string>
def _WindowHandleToWebViewId(window_handle):
if kWindowHandlePrefix in window_handle:
return (True, window_handle[len(kWindowHandlePrefix):])
return (False, "")
def ExecuteSessionCommand(command, session, params, value):
command.Update([session, params, value])
status = command.Run()
if status.IsError() and session.xwalk:
if not session.quit and session.xwalk.HasCrashedWebView():
session.quit = True
message = "session deleted because of page crash"
if not session.detach:
quit_status = session.xwalk.Quit()
if quit_status.IsError():
message += ", but failed to kill browser:" + quit_status.Message()
status = Status(kUnknownError, message)
elif status.Code() == kDisconnected:
# Some commands, like clicking a button or link which closes the window,
# may result in a kDisconnected error code.
web_view_ids = []
status_tmp = session.xwalk.GetWebViewIds(web_view_ids)
if status_tmp.IsError() and status_tmp.Code() != kXwalkNotReachable:
status.AddDetails("failed to check if window was closed: " + status_tmp.Message())
elif session.window not in web_view_ids:
status = Status(kOk)
if status.IsError():
status.AddDetails("Session info: xwalk=" + session.xwalk.GetVersion())
return status
def ExecuteGetSessionCapabilities(session, params, value):
value.clear()
value.update(session.capabilities)
return Status(kOk)
def ExecuteImplicitlyWait(session, params, value):
ms = params.get("ms", -1.0)
if type(ms) != float or ms < 0.0:
return Status(kUnknownError, "'ms' must be a non-negative number")
session.implicit_wait = int(ms)
return Status(kOk)
def ExecuteSetTimeout(session, params, value):
ms_double = params.get("ms")
if type(ms_double) != float:
return Status(kUnknownError, "'ms' must be a double")
typer = params.get("type")
if type(typer) != str:
return Status(kUnknownError, "'type' must be a string")
timeout = int(ms_double)
# TODO(wyh): implicit and script timeout should be cleared
# if negative timeout is specified.
if typer == "implicit":
session.implicit_wait = timeout
elif typer == "script":
session.script_timeout = timeout
elif typer == "page load":
session.page_load_timeout = session.kDefaultPageLoadTimeout if timeout < 0 else timeout
else:
return Status(kUnknownError, "unknown type of timeout:" + typer)
return Status(kOk)
def ExecuteSetScriptTimeout(session, params, value):
ms = params.get("ms", -1.0)
if type(ms) != float or ms < 0.0:
return Status(kUnknownError, "'ms' must be a non-negative number")
session.script_timeout = int(ms)
return Status(kOk)
def ExecuteGetCurrentWindowHandle(session, params, value):
web_view = WebViewImpl("fake", 0, None)
status = session.GetTargetWindow(web_view)
web_view_ids = []
if status.IsError():
return status
value.clear()
value.update({"value": _WebViewIdToWindowHandle(session.window)})
return Status(kOk)
def ExecuteIsLoading(session, params, value):
web_view = WebViewImpl("fake", 0, None)
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
is_pending = False
(status, is_pending) = web_view.IsPendingNavigation(session.GetCurrentFrameId())
if status.IsError():
return status
value.clear()
value.update({"value": is_pending})
return Status(kOk)
def ExecuteGetBrowserOrientation(session, params, value):
web_view = WebViewImpl("fake", 0, None)
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
kGetBrowserOrientationScript = "function() { return window.screen.orientation;}"
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kGetBrowserOrientationScript, [], result)
if status.IsError():
return status
orientation = result["value"].get("type")
if type(orientation) != str:
return status(kUnknownError, "Failed acquire current browser's orientation")
value.clear()
value.update({"value": orientation})
return Status(kOk)
def ExecuteGetLocation(session, params, value):
if not session.overridden_geoposition:
return Status(kUnknownError, "Location must be set before it can be retrieved")
location = {}
location["latitude"] = session.overridden_geoposition.latitude
location["longitude"] = session.overridden_geoposition.longitude
location["accuracy"] = session.overridden_geoposition.accuracy
# Set a dummy altitude to make WebDriver clients happy.
# https://code.google.com/p/chromedriver/issues/detail?id=281
location["altitude"] = 0.0
value.clear()
value.update(location)
return Status(kOk)
def ExecuteGetAppCacheStatus(session, params, value):
web_view = WebViewImpl("fake", 0, None)
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
kGetAppCacheStatus = "function() { return window.applicationCache.status;}"
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kGetAppCacheStatus, [], result)
if status.IsError():
return status
cache_status = result["value"]
if type(cache_status) != int:
return status(kUnknownError, "Failed acquire current application cache status")
value.clear()
value.update({"value": cache_status})
return Status(kOk)
def ExecuteGetWindowHandles(session, params, value):
web_view_ids = []
status = session.xwalk.GetWebViewIds(web_view_ids)
if status.IsError():
return status
window_ids = []
for it in web_view_ids:
window_ids.append(_WebViewIdToWindowHandle(it))
value.clear()
value.update({"value": window_ids})
return Status(kOk)
def ExecuteClose(session, params, value):
web_view_ids = []
status = session.xwalk.GetWebViewIds(web_view_ids)
if status.IsError():
return status
is_last_web_view = (len(web_view_ids) == 1)
web_view_ids = []
web_view = WebViewImpl("fake", 0, None)
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = session.xwalk.CloseWebView(web_view.GetId())
if status.IsError():
return status
status = session.xwalk.GetWebViewIds(web_view_ids)
if ((status.Code() == kXwalkNotReachable and is_last_web_view) or \
(status.IsOk() and len(web_view_ids) == 1)):
# when the last web_view_id is "WebViewImpl("fake", 0, None)"
# If no window is open, close is the equivalent of calling "quit".
session.quit = True;
return session.xwalk.Quit()
return status;
def ExecuteSwitchToWindow(session, params, value):
name = params.get("name")
if type(name) != str:
return Status(kUnknownError, "'name' must be a nonempty string")
web_view_ids = []
status = session.xwalk.GetWebViewIds(web_view_ids)
if status.IsError():
return status
web_view_id = ""
found = False
(flag, web_view_id) = _WindowHandleToWebViewId(name)
if flag and web_view_id in web_view_ids:
# Check if any web_view matches |web_view_id|.
found = True
else:
# Check if any of the tab window names match |name|.
kGetWindowNameScript = "function() { return window.name; }"
for it in web_view_ids:
result = {}
web_view = WebViewImpl("fake", 0, None)
status = session.xwalk.GetWebViewById(it, web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
status = web_view.CallFunction("", kGetWindowNameScript, [], result)
if status.IsError():
return status
window_name = result["value"]
if type(window_name) != str:
return Status(kUnknownError, "failed to get window name")
if window_name == name:
web_view_id = it
found = True
break
if not found:
return Status(kNoSuchWindow)
if session.overridden_geoposition:
web_view = WebViewImpl("fake", 0, None)
status = session.xwalk.GetWebViewById(web_view_id, web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
status = web_view.OverrideGeolocation(session.overridden_geoposition)
if status.IsError():
return status
session.window = web_view_id
session.SwitchToTopFrame()
session.mouse_position = WebPoint(0, 0)
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "command/session_commands.py",
"copies": "1",
"size": "9351",
"license": "bsd-3-clause",
"hash": 6061691009510177000,
"line_mean": 32.1595744681,
"line_max": 103,
"alpha_frac": 0.6909421452,
"autogenerated": false,
"ratio": 3.5923933922397233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9636722853952846,
"avg_score": 0.029322536697375404,
"num_lines": 282
} |
__all__ = ["ExecuteWindowCommand", \
"ExecuteGetTitle", \
"ExecuteRefresh", \
"ExecuteGetCurrentUrl", \
"ExecuteGetPageSource", \
"ExecuteIsBrowserOnline", \
"ExecuteGet", \
"ExecuteGoBack", \
"ExecuteGoForward", \
"ExecuteFindElement", \
"ExecuteFindElements", \
"ExecuteExecuteScript", \
"ExecuteExecuteAsyncScript", \
"ExecuteScreenshot", \
"ExecuteGetWindowSize", \
"ExecuteGetWindowPosition", \
"ExecuteGetCookies", \
"ExecuteAddCookie", \
"ExecuteDeleteCookie", \
"ExecuteDeleteAllCookies", \
"ExecuteSwitchToFrame"]
from browser.status import *
from browser.js import *
from browser.web_view_impl import WebViewImpl
from base.log import VLOG
from command.element_util import FindElement
from command.init_session_commands import GenerateId
class Cookie(object):
def __init__(self, name, value, domain, path, expiry, secure, session):
self.name = name
self.value = value
self.domain = domain
self.path = path
self.expiry = expiry
self.secure = secure
self.session = session
def Update(self, other):
self.name = other.name
self.value = other.value
self.domain = other.domain
self.path = other.path
self.expiry = other.expiry
self.secure = other.secure
self.session = other.session
def _CreateDictionaryFrom(cookie):
dictionary = {}
dictionary["name"] = cookie.name
dictionary["value"] = cookie.value
if cookie.domain:
dictionary["domain"] = cookie.domain
if cookie.path:
dictionary["path"] = cookie.path
if not cookie.session:
dictionary["expiry"] = cookie.expiry
dictionary["secure"] = cookie.secure
return dictionary
def _GetVisibleCookies(web_view, cookies):
internal_cookies = []
status = web_view.GetCookies(internal_cookies)
if status.IsError():
return status
cookies_tmp = []
for cookie_dict in internal_cookies:
if type(cookie_dict) != dict:
return Status(kUnknownError, "DevTools returns a non-dictionary cookie")
name = cookie_dict.get("name", "")
value = cookie_dict.get("value", "")
domain = cookie_dict.get("domain", "")
path = cookie_dict.get("path", "")
expiry = cookie_dict.get("expires", 0)
# Convert from millisecond to second.
expiry = expiry / 1000.0
session = cookie_dict.get("session", False)
secure = cookie_dict.get("secure", False)
cookies_tmp.append(Cookie(name, value, domain, path, expiry, secure, session))
cookies[:] = cookies_tmp
return Status(kOk)
# return status and url<string>
def _GetUrl(web_view, frame):
value = {}
args = []
status = web_view.CallFunction(frame, "function() { return document.URL; }", args, value)
if status.IsError():
return (status, "")
if type(value["value"]) != str:
return (Status(kUnknownError, "javascript failed to return the url"), "")
return (Status(kOk), value["value"])
def ExecuteWindowCommand(command, session, params, value):
web_view = WebViewImpl("fake_id", 0, None)
# update web_view
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
status = web_view.HandleReceivedEvents()
if status.IsError():
return status
if web_view.GetJavaScriptDialogManager().IsDialogOpen():
return Status(kUnexpectedAlertOpen)
nav_status = Status(kOk)
for attempt in range(2):
if attempt == 1:
if status.Code() == kNoSuchExecutionContext:
# Switch to main frame and retry command if subframe no longer exists.
session.SwitchToTopFrame()
else:
break
nav_status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if nav_status.IsError():
return nav_status
command.Update([session, web_view, params, value])
status = command.Run()
nav_status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if status.IsOk() and nav_status.IsError() and nav_status.Code() != kUnexpectedAlertOpen:
return nav_status
if status.Code() == kUnexpectedAlertOpen:
return Status(kOk)
return status
def ExecuteGetTitle(session, web_view, params, value):
kGetTitleScript = "function() {"\
" if (document.title)"\
" return document.title;"\
" else"\
" return document.URL;"\
"}"
args = []
return web_view.CallFunction("", kGetTitleScript, args, value)
def ExecuteRefresh(session, web_view, params, value):
return web_view.Reload()
def ExecuteGetCurrentUrl(session, web_view, params, value):
(status, url) = _GetUrl(web_view, session.GetCurrentFrameId())
if status.IsError():
return status
value.clear()
value.update(url)
return Status(kOk)
def ExecuteGetPageSource(session, web_view, params, value):
kGetPageSource = \
"function() {"\
" return new XMLSerializer().serializeToString(document);"\
"}"
return web_view.CallFunction(session.GetCurrentFrameId(), kGetPageSource, [], value)
def ExecuteIsBrowserOnline(session, web_view, params, value):
return web_view.EvaluateScript(session.GetCurrentFrameId(), "navigator.onLine", value)
def ExecuteGet(session, web_view, params, value):
url = params.get("url", None)
if type(url) != str:
return Status(kUnknownError, "'url' must be a string")
return web_view.Load(url)
def ExecuteGoBack(session, web_view, params, value):
return web_view.EvaluateScript("", "window.history.back();", value)
def ExecuteGoForward(session, web_view, params, value):
return web_view.EvaluateScript("", "window.history.forward();", value)
def ExecuteFindElement(session, web_view, params, value):
interval_ms = 50
return FindElement(interval_ms, True, "", session, web_view, params, value)
def ExecuteFindElements(session, web_view, params, value):
interval_ms = 50
return FindElement(interval_ms, False, "", session, web_view, params, value)
def ExecuteExecuteScript(session, web_view, params, value):
script = params.get("script")
if type(script) != str:
return Status(kUnknownError, "'script' must be a string")
if script == ":takeHeapSnapshot":
#TODO:
#return web_view->TakeHeapSnapshot(value);
pass
else:
args = params.get("args")
if type(args) != list:
return Status(kUnknownError, "'args' must be a list")
return web_view.CallFunction(session.GetCurrentFrameId(), "function(){" + script + "}", args, value)
def ExecuteExecuteAsyncScript(session, web_view, params, value):
script = params.get("script")
if type(script) != str:
return Status(kUnknownError, "'script' must be a string")
args = params.get("args")
if type(args) != list:
return Status(kUnknownError, "'args' must be a list")
return web_view.CallUserAsyncFunction(session.GetCurrentFrameId(), "function(){" + script + "}", args, session.script_timeout, value)
def ExecuteScreenshot(session, web_view, params, value):
status = session.xwalk.ActivateWebView(web_view.GetId())
(status, screenshot) = web_view.CaptureScreenshot()
if status.IsError():
return status
value.clear()
value.update({"value": screenshot})
return Status(kOk)
def ExecuteGetWindowSize(session, web_view, params, value):
result = {}
kExecuteGetWindowSizeScript = \
"function() {"\
" var size = {'height': 0, 'width': 0};"\
" size.height = window.screen.height;"\
" size.width = window.screen.width;"\
" return size;"\
"}"
status = web_view.CallFunction(session.GetCurrentFrameId(), \
kExecuteGetWindowSizeScript, [], result)
if status.IsError():
return status
value.clear()
value.update(result)
return Status(kOk)
def ExecuteGetWindowPosition(session, web_view, params, value):
result = {}
kGetWindowPositionScript = \
"function() {"\
" var position = {'x': 0, 'y': 0};"\
" position.x = window.screenX;"\
" position.y = window.screenY;"\
" return position;"\
"}"
status = web_view.CallFunction(session.GetCurrentFrameId(), \
kGetWindowPositionScript, [], result);
if status.IsError():
return status
value.clear()
value.update(result)
return Status(kOk)
def ExecuteGetCookies(session, web_view, params, value):
cookies = []
status = _GetVisibleCookies(web_view, cookies)
if status.IsError():
return status
cookie_list = []
for it in cookies:
cookie_list.append(_CreateDictionaryFrom(it))
value.clear()
value.update({"value": cookie_list})
return Status(kOk)
def ExecuteAddCookie(session, web_view, params, value):
cookie = params.get("cookie")
if type(cookie) != dict:
return Status(kUnknownError, "missing 'cookie'")
args = []
args.append(cookie)
status = web_view.CallFunction(session.GetCurrentFrameId(), kAddCookieScript, args, {})
return status
def ExecuteDeleteCookie(session, web_view, params, value):
name = params.get("name")
if type(name) != str:
return Status(kUnknownError, "missing 'name'")
(status, url) = _GetUrl(web_view, session.GetCurrentFrameId())
if status.IsError():
return status
return web_view.DeleteCookie(name, url)
def ExecuteDeleteAllCookies(session, web_view, params, value):
cookies = []
status = _GetVisibleCookies(web_view, cookies)
if status.IsError():
return status
if cookies:
(status, url) = _GetUrl(web_view, session.GetCurrentFrameId())
if status.IsError():
return status
for it in cookies:
status = web_view.DeleteCookie(it.name, url)
if status.IsError():
return status
return Status(kOk)
def ExecuteSwitchToFrame(session, web_view, params, value):
if not params.has_key("id"):
return Status(kUnknownError, "missing 'id'")
id_value = params["id"]
if id_value == None:
session.SwitchToTopFrame()
return Status(kOk)
script = ""
args = []
if type(id_value) == dict:
script = "function(elem) { return elem; }"
args.append(id_value)
else:
script = \
"function(xpath) {"\
" return document.evaluate(xpath, document, null, "\
" XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;"\
"}"
xpath = "(/html/body//iframe|/html/frameset/frame)"
if type(id_value) == str:
xpath += '[@name="%s" or @id="%s"]' % (id_value, id_value)
elif type(id_value) == int:
xpath += "[%d]" % (id_value + 1)
else:
return Status(kUnknownError, "invalid 'id'")
args.append(xpath)
(status, frame) = web_view.GetFrameByFunction(session.GetCurrentFrameId(), script, args)
if status.IsError():
return status
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), script, args, result)
if status.IsError():
return status
if type(result) != dict:
return Status(kUnknownError, "fail to locate the sub frame element")
xwalk_driver_id = GenerateId()
kSetFrameIdentifier = \
"function(frame, id) {"\
" frame.setAttribute('cd_frame_id_', id);"\
"}"
new_args = []
new_args.append(result)
new_args.append(xwalk_driver_id);
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kSetFrameIdentifier, new_args, result)
if status.IsError():
return status
session.SwitchToSubFrame(frame, xwalk_driver_id)
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "command/window_commands.py",
"copies": "1",
"size": "11605",
"license": "bsd-3-clause",
"hash": -3803343867028671000,
"line_mean": 30.9696969697,
"line_max": 135,
"alpha_frac": 0.6611805256,
"autogenerated": false,
"ratio": 3.6333750782717598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.965447177215428,
"avg_score": 0.02801676634349588,
"num_lines": 363
} |
__all__ = ['expit', 'expit_back', 'exp']
import os
import math
import numpy
import pycuda.gpuarray as gpuarray
import pycuda.cumath as cumath
import pycuda.autoinit
from pycuda.compiler import SourceModule
from .utils import gpu_func
from .enums import MathModes, MAX_BLOCK_SIZE, CUR_DIR, CACHE_DIR
mod = SourceModule(open(os.path.join(CUR_DIR, 'kernel/expit.cu')).read(), cache_dir=CACHE_DIR)
expit_kernel = mod.get_function('expit_kernel')
expit_fast_kernel = mod.get_function('expit_fast_kernel')
expit_back_kernel = mod.get_function('expit_back_kernel')
exp_fast_kernel = mod.get_function('exp_fast_kernel')
@gpu_func
def exp(d_a, mode=MathModes.ACC):
if mode == MathModes.ACC:
return cumath.exp(d_a)
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_a.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_a.size / float(thread_size))), 1)
exp_fast_kernel(d_a, d_out, numpy.int32(d_a.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def expit(d_a, mode=MathModes.ACC):
"""Implements the expit function (aka sigmoid)
expit(x) = 1 / (1 + exp(-x))
"""
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_a.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_a.size / float(thread_size))), 1)
kernel = expit_fast_kernel if mode == MathModes.FAST else expit_kernel
kernel(d_a, d_out, numpy.int32(d_a.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def expit_back(d_a, d_error):
"""Implments the following function
out = in * (1 - in) * error
"""
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_a.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_a.size / float(thread_size))), 1)
expit_back_kernel(d_a, d_error, d_out, numpy.int32(d_a.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
| {
"repo_name": "Captricity/sciguppy",
"path": "sciguppy/expit.py",
"copies": "1",
"size": "1925",
"license": "mit",
"hash": 3034321986296887000,
"line_mean": 32.7719298246,
"line_max": 94,
"alpha_frac": 0.6571428571,
"autogenerated": false,
"ratio": 2.718926553672316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8826900435703064,
"avg_score": 0.009833795013850417,
"num_lines": 57
} |
__all__ = ['extract_ssi', 'extract_ssi_to_file',
'extract_eta', 'extract_eta_to_file',
'extract_Q_channel', 'extract_Q_down',
'extract_overland_volume', 'extract_overland_volume_to_file']
from datetime import timedelta
from configparser import SafeConfigParser
import h5py
import numpy as np
import numpy.ma as ma
# gzip compression flag
comp = 6
def extract_Q_down(control_fname):
"""Extract combined soil and overland out flow rates.
Read a PyTOPKAPI simulation file and return the combined overland
andsoil store outflows in a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
Qdown : Numpy array
A Numpy array containing the simulated outflow flow rates from
the overland and soil store of each cell.
"""
config = SafeConfigParser()
config.read(control_fname)
sim_fname = config.get('output_files', 'file_out')
tkpi_file = h5py.File(sim_fname, 'r')
Qdown = tkpi_file['/Q_down'][...]
tkpi_file.close()
return Qdown
def extract_Q_channel(control_fname):
"""Extract channel flow rates from a PyTOPKAPI simulation file.
Read a PyTOPKAPI simulation file and return the simulated channel
flows in a Numpy masked array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
Qc : Numpy masked array
A Numpy masked array containing the simulated flow rates for
channel cells.
"""
config = SafeConfigParser()
config.read(control_fname)
param_fname = config.get('input_files', 'file_cell_param')
sim_fname = config.get('output_files', 'file_out')
params = np.loadtxt(param_fname)
tkpi_file = h5py.File(sim_fname, 'r')
Qc = tkpi_file['/Channel/Qc_out'][...]
tkpi_file.close()
channel_mask = params[:, 3]
cond = params[:, 3]*np.ones(Qc.shape, dtype=np.int) != 1
Qc = np.ma.masked_where(cond, Qc)
return Qc
def extract_overland_volume(control_fname):
"""Extract the volumes in the overland stores.
Read a PyTOPKAPI simulation file and return the combined overland
and store volumes in a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current directory
(or the root of the file system).
Returns
-------
Vo : Numpy array
A Numpy array containing the simulated storage volume in the
overland store of each cell.
"""
config = SafeConfigParser()
config.read(control_fname)
sim_fname = config.get('output_files', 'file_out')
tkpi_file = h5py.File(sim_fname, 'r')
Vo = tkpi_file['/Overland/V_o'][...]
tkpi_file.close()
return Vo
def extract_overland_volume_to_file(sim_fname, param_fname,
result_fname, start_dt, timestep):
"""Extract the volumes in the overland stores to a file.
Read a TOPKAPI simulation file and it's associated parameter file
and extract the overland store volumes for each timestep. Store
the results in a new HDF5 file, grouped by date and containing
datasets of latitude, longitude and storage volume.
Parameters
----------
sim_fname : string
The name of a PyTOPKAPI simulation file. This should include
the full or relative path.
param_fname : string
The name of a parameter file describing the catchment. This
should include the full or relative path.
result_fname : string
The name of an HDF5 file to store the output. This should
include the full or relative path.
start_dt : datetime.datetime
The starting date and time of the simulated results in
`sim_fname`.
timestep : int
The length of each model time-step in seconds.
Returns
-------
Nothing
"""
params = np.loadtxt(param_fname)
x = params[:, 1]
y = params[:, 2]
soil_depth = params[:, 8]
soil_depth = ma.masked_values(soil_depth, 0.0)
x = ma.array(x, mask=soil_depth.mask).compressed()
y = ma.array(y, mask=soil_depth.mask).compressed()
tkpi_file = h5py.File(sim_fname, 'r')
result_file = h5py.File(result_fname, 'w')
overland_vol = tkpi_file['/Overland/V_o'][...]
tkpi_file.close()
rows, cols = overland_vol.shape
# y
dset = result_file.require_dataset('y', shape=y.shape,
dtype=np.float32, compression=comp)
dset[...] = y
dset.attrs['name'] = 'y coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
# x
dset = result_file.require_dataset('x', shape=x.shape,
dtype=np.float32, compression=comp)
dset[...] = x
dset.attrs['name'] = 'x coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
curr_dt = start_dt
for k in range(rows):
print(curr_dt)
ov = ma.array(overland_vol[k], mask=soil_depth.mask).compressed()
dset = result_file.require_dataset(curr_dt.strftime('%Y%m%d%H00'),
shape=ov.shape,
dtype=np.float32, compression=comp)
dset[...] = ov
dset.attrs['name'] = 'TOPKAPI overland store volume'
dset.attrs['units'] = 'm^3'
curr_dt += timedelta(seconds=timestep)
tkpi_file.close()
result_file.close()
def extract_ssi(control_fname):
"""Extract SSI from a PyTOPKAPI simulation file.
Read a PyTOPKAPI simulation file and it's associated parameter
file and compute the Soil Saturation Index (SSI) for each model
cell and timestep. The results are returned as a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
ssi : Numpy ndarray
A Numpy array containing the calculated SSI values.
"""
config = SafeConfigParser()
config.read(control_fname)
global_param_fname = config.get('input_files', 'file_global_param')
param_fname = config.get('input_files', 'file_cell_param')
sim_fname = config.get('output_files', 'file_out')
fac_L = config.getfloat('calib_params', 'fac_L')
params = np.loadtxt(param_fname)
glob_params = np.genfromtxt(global_param_fname, names=True)
soil_depth = fac_L*params[:, 8]
factor = params[:, 11] - params[:, 10]
cell_area = glob_params['X']**2 # m^2
soil_depth = ma.masked_values(soil_depth, 0.0)
factor = ma.array(factor, mask=soil_depth.mask)
div = factor*soil_depth*cell_area
tkpi_file = h5py.File(sim_fname, 'r')
soil_vol = tkpi_file['/Soil/V_s'][...]
tkpi_file.close()
# ssi = (Vs/cell_vol)*100
# cell_vol = (theta_s - theta_r)*soil_depth*cell_area
sv = ma.array(soil_vol, mask=soil_depth.mask)
ssi = (sv/(div))*100.0
return ssi
def extract_ssi_to_file(sim_fname, param_fname,
result_fname, start_dt, timestep):
"""Extract percentage saturation to a file
Read a TOPKAPI simulation file and it's associated parameter file
and compute the SSI for each timestep. Store the results in a new
HDF5 file, grouped by date and containing datasets of latitude,
longitude and SSI value.
Parameters
----------
sim_fname : string
The name of a PyTOPKAPI simulation file. This should include
the full or relative path.
param_fname : string
The name of a parameter file describing the catchment. This
should include the full or relative path.
result_fname : string
The name of an HDF5 file to store the output. This should
include the full or relative path.
start_dt : datetime.datetime
The starting date and time of the simulated results in
`sim_fname`.
timestep : int
The length of each model time-step in seconds.
Returns
-------
Nothing
"""
params = np.loadtxt(param_fname)
x = params[:, 1]
y = params[:, 2]
soil_depth = params[:, 8]
factor = params[:, 11] - params[:, 10]
cell_area = 1000.0**2 # m^2
soil_depth = ma.masked_values(soil_depth, 0.0)
factor = ma.array(factor, mask=soil_depth.mask)
x = ma.array(x, mask=soil_depth.mask).compressed()
y = ma.array(y, mask=soil_depth.mask).compressed()
div = factor*soil_depth*cell_area
tkpi_file = h5py.File(sim_fname, 'r')
result_file = h5py.File(result_fname, 'w')
soil_vol = tkpi_file['/Soil/V_s'][...]
tkpi_file.close()
rows, cols = soil_vol.shape
# y
dset = result_file.require_dataset('y', shape=y.shape,
dtype=np.float32, compression=comp)
dset[...] = y
dset.attrs['name'] = 'y coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
# x
dset = result_file.require_dataset('x', shape=x.shape,
dtype=np.float32, compression=comp)
dset[...] = x
dset.attrs['name'] = 'x coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
curr_dt = start_dt
for k in range(rows):
print(curr_dt)
# ssi = (Vs/cell_vol)*100
# cell_vol = (theta_s - theta_r)*soil_depth*cell_area
sv = ma.array(soil_vol[k], mask=soil_depth.mask)
ssi = (sv/(div))*100.0
ssi = ssi.compressed()
# ssi
dset = result_file.require_dataset(curr_dt.strftime('%Y%m%d%H00'),
shape=ssi.shape,
dtype=np.float32, compression=comp)
dset[...] = ssi
dset.attrs['name'] = 'TOPKAPI soil saturation index'
dset.attrs['units'] = '% saturation'
curr_dt += timedelta(seconds=timestep)
tkpi_file.close()
result_file.close()
def extract_eta(control_fname):
"""Extract ETa from a PyTOPKAPI simulation file.
Read a PyTOPKAPI simulation file and it's associated parameter file
and extract the actual evapotranspiration for each model cell and
timestep. The results are returned as a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
eta : Numpy ndarray
A Numpy array containing the calculated ETa values.
"""
config = SafeConfigParser()
config.read(control_fname)
param_fname = config.get('input_files', 'file_cell_param')
sim_fname = config.get('output_files', 'file_out')
params = np.loadtxt(param_fname)
soil_depth = params[:, 8]
soil_depth = ma.masked_values(soil_depth, 0.0)
tkpi_file = h5py.File(sim_fname, 'r')
eta = tkpi_file['/ET_out'][...]
tkpi_file.close()
eta = ma.array(eta, mask=soil_depth.mask)
return eta
def extract_eta_to_file(sim_fname, param_fname,
result_fname, start_dt, timestep):
"""Extract actual evapotranspiration to a file
Read a PyTOPKAPI simulation file and it's associated parameter
file and extract the actual evapotranspiration for each
timestep. Store the results in a new HDF5 file, grouped by date
and containing datasets of latitude, longitude and ETa value.
Parameters
----------
sim_fname : string
The name of a PyTOPKAPI simulation file. This should include
the full or relative path.
param_fname : string
The name of a parameter file describing the catchment. This
should include the full or relative path.
result_fname : string
The name of an HDF5 file to store the output. This should
include the full or relative path.
start_dt : datetime.datetime
The starting date and time of the simulated results in
`sim_fname`.
timestep : int
The length of each model time-step in seconds.
Returns
-------
Nothing
"""
params = np.loadtxt(param_fname)
x = params[:, 1]
y = params[:, 2]
soil_depth = params[:, 8]
soil_depth = ma.masked_values(soil_depth, 0.0)
x = ma.array(x, mask=soil_depth.mask).compressed()
y = ma.array(y, mask=soil_depth.mask).compressed()
tkpi_file = h5py.File(sim_fname, 'r')
result_file = h5py.File(result_fname, 'w')
eta = tkpi_file['/ET_out'][...]
tkpi_file.close()
rows, cols = eta.shape
# y
dset = result_file.require_dataset('y', shape=y.shape,
dtype=np.float32, compression=comp)
dset[...] = y
dset.attrs['name'] = 'y coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
# x
dset = result_file.require_dataset('x', shape=x.shape,
dtype=np.float32, compression=comp)
dset[...] = x
dset.attrs['name'] = 'x coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
curr_dt = start_dt
for k in range(rows):
print(curr_dt)
et = ma.array(eta[k], mask=soil_depth.mask)
et = et.compressed()
# ETa
dset = result_file.require_dataset(curr_dt.strftime('%Y%m%d%H00'),
shape=et.shape,
dtype=np.float32, compression=comp)
dset[...] = et
dset.attrs['name'] = 'PyTOPKAPI actual ET'
dset.attrs['units'] = 'mm'
curr_dt += timedelta(seconds=timestep)
result_file.close()
| {
"repo_name": "scottza/PyTOPKAPI",
"path": "pytopkapi/results_analysis/sim_result_tools.py",
"copies": "2",
"size": "14176",
"license": "bsd-3-clause",
"hash": -8875523243643214000,
"line_mean": 29.5517241379,
"line_max": 78,
"alpha_frac": 0.6129373589,
"autogenerated": false,
"ratio": 3.698408557265849,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5311345916165849,
"avg_score": null,
"num_lines": null
} |
__all__ = ['eye']
from ..core.numeric import zeros
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
| {
"repo_name": "bussiere/pypyjs",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/numpypy/lib/twodim_base.py",
"copies": "2",
"size": "1334",
"license": "mit",
"hash": -4786133914286648000,
"line_mean": 23.7037037037,
"line_max": 74,
"alpha_frac": 0.5254872564,
"autogenerated": false,
"ratio": 3.5573333333333332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082820589733333,
"avg_score": null,
"num_lines": null
} |
__all__ = ('EyesError', 'EyesIllegalArgument', 'OutOfBoundsError', 'TestFailedError', 'NewTestError',
'DiffsFoundError')
class EyesError(Exception):
"""
Applitools Eyes Exception.
"""
class EyesIllegalArgument(EyesError):
"""
Raise when parameter with wrong type passed to function
"""
class OutOfBoundsError(EyesError):
"""
Indicates that an element is outside a specific boundary (e.g, region outside a frame,
or point outside an image).
"""
class TestFailedError(Exception):
"""
Indicates that a test did not pass (i.e., test either failed or is a new test).
"""
def __init__(self, message, test_results=None, ):
self.message = message
self.test_results = test_results
def __str__(self):
return "%s , %s" % (self.message, self.test_results)
class NewTestError(TestFailedError):
"""
Indicates that a test is a new test.
"""
def __init__(self, message, test_results=None):
super(NewTestError, self).__init__(message, test_results)
class DiffsFoundError(TestFailedError):
"""
Indicates that an existing test ended, and that differences where found from the baseline.
"""
def __init__(self, message, test_results=None):
super(DiffsFoundError, self).__init__(message, test_results)
| {
"repo_name": "applitools/eyes.selenium.python",
"path": "applitools/core/errors.py",
"copies": "1",
"size": "1339",
"license": "apache-2.0",
"hash": -3985664554509709000,
"line_mean": 24.75,
"line_max": 101,
"alpha_frac": 0.6482449589,
"autogenerated": false,
"ratio": 3.881159420289855,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5029404379189855,
"avg_score": null,
"num_lines": null
} |
__all__ = ('FacebookAPI', 'GraphAPI', 'FacebookClientError',
'FacebookAuthError', 'FacebookAPIError', 'GraphAPIError')
""" Requests-Facebook """
__author__ = 'Mike Helmick <mikehelmick@me.com>'
__version__ = '0.4.2'
import requests
try:
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
try:
from urllib.parse import parse_qsl
except ImportError: # Python 2
try:
from urlparse import parse_qsl
except ImportError: # Python < 2.6
from cgi import parse_qsl
try:
string_types = basestring
except NameError:
string_types = str
def _split_params_and_files(params_):
params = {}
files = {}
for k, v in params_.items():
if hasattr(v, 'read') and callable(v.read):
files[k] = v
elif isinstance(v, string_types):
params[k] = v
else:
continue
return params, files
class FacebookClientError(Exception):
def __init__(self, message, error_type=None, error_code=None):
self.type = error_type
self.message = message
if error_type is not None:
self.message = '%s: %s' % (error_type, message)
self.code = error_code
super(FacebookClientError, self).__init__(self.message)
class FacebookAuthError(FacebookClientError):
pass
class FacebookAPIError(FacebookClientError):
pass
class GraphAPIError(FacebookClientError):
pass
class FacebookAPI(object):
def __init__(self, client_id=None, client_secret=None, redirect_uri=None,
headers=None):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
# If there's headers, set them. If not, lets
self.headers = headers or {'User-agent': 'Requests-Facebook %s' % __version__}
def get_auth_url(self, display='popup', scope=None):
scope = scope or []
url = 'https://www.facebook.com/dialog/oauth'
qs = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri,
'display': display,
'scope': ','.join(scope)
}
return '%s?%s' % (url, urlencode(qs))
def get_access_token(self, code):
url = 'https://graph.facebook.com/oauth/access_token'
qs = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri,
'code': code
}
response = requests.get(url, params=qs, headers=self.headers)
status_code = response.status_code
content = response.content
if status_code != 200:
try:
content = response.json()
except ValueError:
raise FacebookClientError('Unable to parse response, invalid JSON.')
if content.get('error') is not None:
error = content['error']
error_type = error.get('type', '')
error_message = error.get('message', '')
error_code = error.get('code')
raise FacebookAuthError(error_message, error_type=error_type, error_code=error_code)
else:
raise FacebookClientError('An unknown error occurred.')
try:
data = response.json()
except ValueError:
data = dict(parse_qsl(content))
except AttributeError:
raise FacebookAuthError('Unable to obtain access token.')
return data
def __repr__(self):
return u'<FacebookAPI: %s>' % self.client_id
class GraphAPI(object):
def __init__(self, access_token=None, headers=None, api_version='v2.6'):
self.api_url = 'https://graph.facebook.com/'
self.api_version = api_version
if self.api_version:
self.api_url = '%s%s/' % (self.api_url, self.api_version)
self.access_token = access_token
# If there's headers, set them. If not, lets
self.headers = headers or {'User-agent': 'Requests-Facebook %s' % __version__}
def get(self, endpoint, params=None):
return self.request(endpoint, params=params)
def post(self, endpoint, params=None, files=None):
return self.request(endpoint, method='POST', params=params)
def delete(self, endpoint, params=None):
return self.request(endpoint, method='DELETE', params=params)
def request(self, endpoint, method='GET', params=None):
params = params or {}
url = self.api_url + endpoint + '?access_token=' + self.access_token
method = method.lower()
if not method in ('get', 'post', 'delete'):
raise FacebookClientError('Method must be of GET, POST or DELETE')
params, files = _split_params_and_files(params)
func = getattr(requests, method)
try:
if method == 'get':
response = func(url, params=params, headers=self.headers)
else:
response = func(url,
data=params,
files=files,
headers=self.headers)
except requests.exceptions.RequestException:
raise FacebookClientError('An unknown error occurred.')
try:
content = response.json()
except ValueError:
raise FacebookClientError('Unable to parse response, invalid JSON.')
if response.status_code != 200:
if content.get('error') is not None:
error = content['error']
error_type = error.get('type', '')
error_message = error.get('message', '')
error_code = error.get('code')
raise GraphAPIError(error_message, error_type=error_type, error_code=error_code)
return content
def __repr__(self):
return u'<GraphAPI: %s>' % self.access_token
| {
"repo_name": "michaelhelmick/requests-facebook",
"path": "facebook.py",
"copies": "1",
"size": "6057",
"license": "bsd-2-clause",
"hash": -6754626745171765000,
"line_mean": 30.2216494845,
"line_max": 100,
"alpha_frac": 0.5745418524,
"autogenerated": false,
"ratio": 4.134470989761092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008700927465889109,
"num_lines": 194
} |
__all__ = ('FboFloatLayout', )
from kivy.graphics import Color, Rectangle, Canvas, ClearBuffers, ClearColor
from kivy.graphics.fbo import Fbo
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty, NumericProperty
class FboFloatLayout(FloatLayout):
texture = ObjectProperty(None, allownone=True)
alpha = NumericProperty(1)
def __init__(self, **kwargs):
self.canvas = Canvas()
with self.canvas:
self.fbo = Fbo(size=self.size)
self.fbo_color = Color(1, 1, 1, 1)
self.fbo_rect = Rectangle()
with self.fbo:
ClearColor(0,0,0,0)
ClearBuffers()
# wait that all the instructions are in the canvas to set texture
self.texture = self.fbo.texture
super(FboFloatLayout, self).__init__(**kwargs)
def add_widget(self, *largs):
# trick to attach graphics instructino to fbo instead of canvas
canvas = self.canvas
self.canvas = self.fbo
ret = super(FboFloatLayout, self).add_widget(*largs)
self.canvas = canvas
return ret
def remove_widget(self, *largs):
canvas = self.canvas
self.canvas = self.fbo
super(FboFloatLayout, self).remove_widget(*largs)
self.canvas = canvas
def on_size(self, instance, value):
self.fbo.size = value
self.texture = self.fbo.texture
self.fbo_rect.size = value
def on_pos(self, instance, value):
self.fbo_rect.pos = value
def on_texture(self, instance, value):
self.fbo_rect.texture = value
def on_alpha(self, instance, value):
self.fbo_color.rgba = (1, 1, 1, value)
from kivy.app import App
from kivy.core.window import Window
from kivy.animation import Animation
from kivy.factory import Factory as F
class ScreenLayerApp(App):
def build(self):
f = FboFloatLayout()
b = F.Button(text="FBO", size_hint=(None, None))
f.add_widget(b)
def anim_btn(*args):
Animation(x=f.width-b.width).start(b)
b.bind(on_press=anim_btn)
#before this or calback instruction was only way...
#so no way to avoid going into python instead of stayingin c
#def clear_fb(*args):
# f.fbo.bind()
# f.fbo.clear_buffer()
# f.fbo.release()
#Window.bind(on_draw=clear_fb)
return f
if __name__ == "__main__":
ScreenLayerApp().run()
| {
"repo_name": "JohnHowland/kivy",
"path": "examples/canvas/clearbuffers.py",
"copies": "20",
"size": "2471",
"license": "mit",
"hash": 3496770374543944000,
"line_mean": 27.0795454545,
"line_max": 76,
"alpha_frac": 0.616349656,
"autogenerated": false,
"ratio": 3.5656565656565657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0046660549903541895,
"num_lines": 88
} |
__all__ = ('FeedFilter')
import re
import csv
import socket
import sqlite3
import sys
from bulkwhois.cymru import BulkWhoisCymru
from async_dns import AsyncResolver
from publicsuffix import PublicSuffixList
#import tldextract
import httplib2
import tempfile
import logging
import time
class NetObjectRepo:
db = None
def __init__(self):
self.db = sqlite3.connect(":memory:")
self.db.row_factory = sqlite3.Row
# normally, we should store IPs as ints, but let's try to get away \
# with it here
self.db.execute("CREATE TABLE ips (id integer primary key, ip text unique, \
asn int, cc varchar)")
self.db.execute("CREATE TABLE domains (id integer primary key, \
domain varchar unique, cc varchar)")
self.db.execute("CREATE TABLE domain_ips (ip_id integer, \
domain_id integer)")
def add(self, datatype="", data=""):
if datatype == "ip":
self.add_ip(data)
elif datatype == "domain":
self.add_domain(data)
else:
raise TypeError, "datatype must be of 'ip' or 'domain'"
def belongs_to(self, datatype="", data="", asn_filters=None, cc_filters=None):
if not data:
raise TypeError, "Data cannot be empty"
if datatype == "ip":
return self.ip_belongs_to(data, asn_filters, cc_filters)
elif datatype == "domain":
return self.domain_belongs_to(data, asn_filters, cc_filters)
else:
raise TypeError, "datatype must be 'ip' or 'domain'"
def ip_belongs_to(self, ip, asn_filters, cc_filters):
query = "SELECT id FROM ips WHERE ip = ? AND ("
params = [ip]
if isinstance(asn_filters, list) and asn_filters:
query = query + " asn in (" + ', '.join('?' for asn_filter in asn_filters) + ")"
params.extend(asn_filters)
if isinstance(cc_filters, list) and cc_filters:
if len(params) > 1: # has ip + 1 or more asns
query = query + " OR "
query = query + " cc in (" + ', '.join('?' for cc_filter in cc_filters) + ")"
params.extend(cc_filters)
query = query + ")"
rows = list(self.db.execute(query, params))
return len(rows) >= 1
def domain_belongs_to(self, domain, asn_filters, cc_filters):
query = "SELECT d.id FROM domains d, ips i, domain_ips di WHERE d.domain = ? AND "
params = [domain]
query = query + " d.id = di.domain_id AND i.id = di.ip_id AND"
query = query + " ("
if isinstance(asn_filters, list) and asn_filters:
query = query + " i.asn in (" + ', '.join('?' for asn_filter in asn_filters) + ")"
params.extend(asn_filters)
if isinstance(cc_filters, list) and cc_filters:
if len(params) > 1: # has ip + 1 or more asns
query = query + " OR "
query = query + " i.cc in (" + ', '.join('?' for cc_filter in cc_filters) + ")"
params.extend(cc_filters)
query = query + ")"
rows = list(self.db.execute(query, params))
return len(rows) >= 1 or self.get_domain_tld(domain) in cc_filters
def get_ip_data(self):
for row in self.db.execute("SELECT * FROM ips"):
yield(row)
def get_ip_count(self):
return self.db.execute("SELECT count(id) as ipcount from ips").fetchone()["ipcount"]
def add_ip(self, ip):
ip_query = "SELECT id from ips WHERE ip = ?"
if not list(self.db.execute(ip_query, [ip])):
self.db.execute("INSERT INTO ips (ip) VALUES (?)",
[ip])
return self.db.execute(ip_query, [ip]).fetchone()["id"]
def add_ip_asn_cc(self, ip, asn, cc):
self.add_ip(ip)
self.db.execute("UPDATE ips SET asn=?, cc=? WHERE ip=?", [asn, cc.upper(), ip])
def get_domain_data(self):
for row in self.db.execute("SELECT * FROM domains"):
yield(row)
def get_domain_count(self):
return self.db.execute("SELECT count(id) as domcount from domains").fetchone()["domcount"]
def get_domain_tld(self, domain):
row = self.db.execute("SELECT * from domains WHERE domain = ?", [domain]).fetchone()
return row and row["cc"]
def add_domain(self, domain, cc=""):
domain_query = "SELECT id from domains WHERE domain = ?"
if not list(self.db.execute(domain_query, [domain])):
self.db.execute("INSERT INTO domains (domain, cc) VALUES (?, ?)",
[domain, cc.upper()])
return self.db.execute(domain_query, [domain]).fetchone()["id"]
def add_domain_cc(self, domain, cc):
self.add_domain(domain)
self.db.execute("UPDATE domains SET cc=? WHERE domain=?", [cc.upper(), domain])
def add_domain_ips(self, domain, ips):
for ip in ips:
ip_id = self.add_ip(ip)
domain_id = self.add_domain(domain)
self.db.execute("INSERT INTO domain_ips (domain_id, ip_id) VALUES (?, ?)",
[domain_id, ip_id])
def dump(self):
for line in self.db.iterdump():
logging.debug(line)
class FeedFilter:
"""
Feedfilter takes in the arguments from the command line,
processes them, and passes them out to an appropriate filter.
"""
def __init__(self, **kwargs):
""" args - passed in by optparse """
self.cc_filters = []
self.asn_filters = []
self.format = None
self.has_header = False
self.infile = None
self.outfile = None
self.verbose = False
self.quiet = False
self.matchers = {}
self.repo = NetObjectRepo()
# regexs are intentionally broad - we'll run more tests later.
self.matchers["ip"] = {
"rex": "(?:\d+\.){3}\d+",
"chk_func": self._is_valid_ip,
"type": "ip",
}
self.matchers["hostname"] = {
"rex": "([a-zA-Z0-9\-\.]+\.[0-9a-zA-Z\-\.]+)(?:\d+)?",
"chk_func": self._is_valid_domain,
"type": "domain",
}
logging.info("Getting Public Suffix List")
self.psl = PublicSuffixList(self._get_psl_file())
logging.info("Got Public Suffix List")
self.parse_args(**kwargs)
def _get_psl_file(self):
""" returns Public Suffix List as a list of lines in the PSL """
url = 'http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1'
headers = {'cache-control': 'max-age=%d' % (60*60*24*7)}
http = httplib2.Http(tempfile.gettempdir())
response, content = http.request(url, headers=headers)
return content.split("\n")
def parse_args(self, infile=sys.stdin, outfile=sys.stdout, verbose=False,
verboser=False, quiet=False, has_header=False,
format=None, filter=None):
def create_stdin_temp_file():
f = tempfile.NamedTemporaryFile()
for line in sys.stdin.read():
f.write(line)
# TODO: according to docs, a second open won't work on Win
return open(f.name, "r")
self.outfile = outfile
self.verbose = verbose
self.quiet = quiet
self.has_header = has_header
level = logging.WARN
# quiet overrides everything else
if verbose:
level = logging.INFO
if verboser:
level = logging.DEBUG
if quiet:
level = logging.ERROR
logging.basicConfig(level=level, format="%(message)s")
if not infile or infile.name == "<stdin>":
self.infile = create_stdin_temp_file()
else:
self.infile = infile
for filt in filter.split(','):
for m in re.findall("^(?:AS)?(\d+)$", filt):
self.asn_filters.append(m.upper())
for m in re.findall("^[A-Za-z]+$", filt):
self.cc_filters.append(m.upper())
if len(self.asn_filters) == 0 and len(self.cc_filters) == 0:
#raise ValueError, "You need to specify at least one valid TLD or ASN filter. e.g. AS254,JP,AU"
sys.exit("You need to specify --filter with at least one valid TLD or ASN filter. e.g. AS254,JP,AU")
logging.info("Using filters: ")
if self.asn_filters:
logging.info(" ASN: %s" % (", ".join(self.asn_filters)))
if self.cc_filters:
logging.info(" Country codes: %s" % (", ".join(self.cc_filters)))
def domains_to_ips(self):
ar = AsyncResolver([domain_data["domain"] for domain_data in self.repo.get_domain_data()])
resolved = ar.resolve()
for host, ips in resolved.items():
if ips is None:
logging.debug("%s could not be resolved." % host)
else:
self.repo.add_domain_ips(host, ips)
def extract_matches(self):
self.infile.seek(0)
for linenum, line in enumerate(self.infile.readlines()):
# no need to parse a header line
if self.has_header and linenum == 0:
pass
for (match_type, match) in self.get_line_matches(line, linenum):
#self.repo.add(match_type, match)
yield(match_type, match)
def extract_and_store_matches(self):
for match_type, match in self.extract_matches():
self.repo.add(match_type, match)
def get_filtered_lines(self):
self.infile.seek(0)
for linenum, line in enumerate(self.infile.readlines()):
if self.has_header and linenum == 0:
yield(line)
else:
for match_type, match in self.get_line_matches(line, linenum):
if self.repo.belongs_to(datatype=match_type, data=match, asn_filters=self.asn_filters, cc_filters=self.cc_filters):
yield(line)
logging.debug("'%s' matches filter %s", match, match_type)
break
def output_matches(self):
for line in self.get_filtered_lines():
self.outfile.write(line)
def get_line_matches(self, line, line_num, fetch_only_one=False):
try:
match = False
for m_key, m_dict in self.matchers.items():
if "chk_func" in m_dict and "rex" in m_dict:
for m in re.findall(m_dict["rex"], line):
if m_dict["chk_func"](m):
match = True
logging.debug("matched '%s' as %s" % (m, m_key))
yield((m_dict["type"], m))
if match and fetch_only_one:
break
elif "chk_func" in m_dict and m_dict["chk_func"](line):
match = True
yield((m_dict["type"], line))
elif "rex" in m_dict:
for m in re.findall(m_dict["rex"], line):
match = True
yield((m_dict["type"], m))
if match and fetch_only_one:
break
except csv.Error:
logging.warn("Error parsing line %d, skipping" % line_num)
def _is_valid_domain(self, domain):
if not str(domain):
return None
# don't want / need to resolve IPs
elif self._is_valid_ip(domain):
return None
else:
# using this PSL, known TLDs return at least one .
return self.get_tld(domain).find(".") >= 0
def get_tld(self, domain):
suffix = self.psl.get_public_suffix(domain)
logging.debug("Domain fetched: %s", suffix)
return suffix
def _is_valid_ip(self, ip):
for family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(family, ip)
except Exception:
pass
else:
return True
return False
def add_asn_cc_info(self):
def asn_lookup():
bw = BulkWhoisCymru()
ip_list = []
for ip_data in self.repo.get_ip_data():
ip_list.append(str(ip_data["ip"]))
return bw.lookup_ips(ip_list)
asn_info = asn_lookup()
for ip_data in self.repo.get_ip_data():
if ip_data["ip"] in asn_info:
ip = ip_data["ip"]
self.repo.add_ip_asn_cc(ip, asn=asn_info[ip]["asn"], cc=asn_info[ip]["cc"])
def add_domain_ccs(self):
for domain_data in self.repo.get_domain_data():
tld = self.get_tld(domain_data["domain"])
if tld:
self.repo.add_domain_cc(domain_data["domain"], cc=(tld.split(".")[-1]))
def process_file(self):
stime = time.time()
logging.info("Extracting matches")
self.extract_and_store_matches()
logging.debug("Got matches " + str(time.time() - stime))
if self.repo.get_domain_count() > 0:
logging.info("Resolving " + str(self.repo.get_domain_count()) + " unique domains")
self.domains_to_ips()
logging.debug("Resolved IPs " + str(time.time() - stime))
logging.info("Looking up ASNs")
if self.repo.get_ip_count() > 0:
self.add_asn_cc_info()
logging.debug("Got asns " + str(time.time() - stime))
logging.info("Getting domain CCs")
self.add_domain_ccs()
logging.debug("Added domain ccs " + str(time.time() - stime))
self.repo.dump()
if __name__ == "__main__":
feedfilter = FeedFilter({})
if not feedfilter:
exit
| {
"repo_name": "csirtfoundry/netgrep",
"path": "netgrep/feedfilter.py",
"copies": "1",
"size": "13908",
"license": "mit",
"hash": -3496848507795460000,
"line_mean": 36.3870967742,
"line_max": 135,
"alpha_frac": 0.5366695427,
"autogenerated": false,
"ratio": 3.8114551932036176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9796069824847013,
"avg_score": 0.010410982211320936,
"num_lines": 372
} |
__all__ = ['fetch_arxiv', 'arxiv_re']
import re
import time
from urllib import urlopen
import xml.etree.ElementTree as ET
_url_base = 'http://export.arxiv.org/api/query?'
_prefix = '{http://www.w3.org/2005/Atom}'
_arxiv_re = re.compile(r'\d{4}\.\d{4,5}|[a-z-]+(?:\.[A-Za-z-]+)?\/\d{7}')
arxiv_re = _arxiv_re
class arxiv_entry:
def __init__(self, entry):
self.entry = entry
title = entry.findtext(_prefix+'title')
if not title or title == 'Error':
self.entry = None
def __getitem__(self, key):
if self.entry is None:
return None
if key == 'authors':
return [author.findtext(_prefix+'name') \
for author in self.entry.iterfind(_prefix+'author')]
if key == 'first_author':
return self.entry.find(_prefix+'author').findtext(_prefix+'name')
if key == 'key':
return _arxiv_re.search(self.entry.findtext(_prefix+'id')).group()
return self.entry.findtext(_prefix+key)
def get(self, key, default=None):
ret = self.__getitem__(key)
return default if ret is None else ret
class fetch_arxiv:
def __init__(self, **keywords):
"""
search_query=cat:astro-ph*+AND+au:%s
max_results=50
sortBy=submittedDate
sortOrder=descending
id_list=[comma-delimited ids]
"""
url = _url_base + '&'.join([k+'='+str(v) \
for k, v in keywords.iteritems()])
for i in range(5):
try:
f = urlopen(url)
except IOError:
time.sleep(2)
continue
else:
break
else:
raise IOError('cannot connect to arXiv')
self.root = ET.parse(f).getroot()
def getentries(self):
return map(arxiv_entry, self.root.iterfind(_prefix+'entry'))
def iterentries(self):
for entry in self.root.iterfind(_prefix+'entry'):
yield arxiv_entry(entry)
| {
"repo_name": "yymao/slackbots",
"path": "fetch_arxiv.py",
"copies": "1",
"size": "2013",
"license": "mit",
"hash": 8636349152416474000,
"line_mean": 29.9692307692,
"line_max": 78,
"alpha_frac": 0.5434674615,
"autogenerated": false,
"ratio": 3.6868131868131866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47302806483131865,
"avg_score": null,
"num_lines": null
} |
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
from numpy import arange
from numpy.fft.helper import fftshift, ifftshift, fftfreq
def rfftfreq(n, d=1.0):
"""DFT sample frequencies (for usage with rfft, irfft).
The returned float array contains the frequency bins in
cycles/unit (with zero at the start) given a window length `n` and a
sample spacing `d`::
f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even
f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing. Default is 1.
Returns
-------
out : ndarray
The array of length `n`, containing the sample frequencies.
Examples
--------
>>> from scipy import fftpack
>>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> sig_fft = fftpack.rfft(sig)
>>> n = sig_fft.size
>>> timestep = 0.1
>>> freq = fftpack.rfftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ])
"""
if not isinstance(n, int) or n < 0:
raise ValueError("n = %s is not valid. n must be a nonnegative integer." % n)
return (arange(1, n + 1, dtype=int) // 2) / float(n * d)
| {
"repo_name": "teoliphant/scipy",
"path": "scipy/fftpack/helper.py",
"copies": "1",
"size": "1291",
"license": "bsd-3-clause",
"hash": -8170169336249610000,
"line_mean": 29.023255814,
"line_max": 86,
"alpha_frac": 0.5530596437,
"autogenerated": false,
"ratio": 2.888143176733781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39412028204337807,
"avg_score": null,
"num_lines": null
} |
__all__=['Fiddle']
from IPython.display import HTML, display, clear_output
import uuid
from jinja2 import (Template, DebugUndefined)
import lesscpy
from six import StringIO
class Fiddle(object):
def __init__(self, html='', css='', div_css='', js='', jslibs=tuple(), csslibs=tuple(),
extra_vars=dict()):
self.html = html
self.css = css
self.div_css = div_css
self.js = js
self.jslibs = jslibs
self.csslibs = csslibs
self.extra_vars = extra_vars
def _repr_html_(self):
return self._to_html_()
def _to_html_(self):
div_id = u'i' + unicode(uuid.uuid4())
HTML_ = Template(u"""<div id="{{div_id}}">
{{html}}
</div>
""").render(div_id=div_id, html=self.html)
# compute nested css
div_css_wrapped = Template(
"""
#{{div_id}} { {{div_css}} }
""").render(div_id=div_id, div_css=self.div_css)
div_css_expanded = lesscpy.compile(StringIO(div_css_wrapped), minify=True)
CSS = Template(u"""<style type="text/css">
{{css}}
{{div_css_expanded}}
</style>
""").render(css=self.css, div_css_expanded=div_css_expanded)
# JS
JS = u"""
<script type="text/javascript">
{% if csslibs %}
// load css if it's not already there: http://stackoverflow.com/a/4724676/7782
function loadcss(url) {
if (!$("link[href='" + url + "']").length)
$('<link href="' + url + '" rel="stylesheet">').appendTo("head");
}
{% endif %}
{% for item in csslibs %}
loadcss('{{item}}');
{% endfor %}
require.config({
paths: {
{% for item in jslibs %}
'{{item.0}}': "{{item.1}}",
{% endfor %}
}
});
require({{jslibs_names}}, function({{jslibs_objs}}) {
var element = $('#{{div_id}}');
""" + self.js + u"""
});
</script>
"""
template = Template(HTML_ + CSS + JS)
return template.render(div_id=div_id,
csslibs = self.csslibs,
jslibs = self.jslibs,
jslibs_names = "[{}]".format(",".join(['"{}"'.format(jslib[0]) for jslib in self.jslibs])),
jslibs_objs = ",".join([jslib[2] for jslib in self.jslibs]),
**self.extra_vars
)
| {
"repo_name": "rdhyee/nbfiddle",
"path": "nbfiddle/__init__.py",
"copies": "1",
"size": "2463",
"license": "apache-2.0",
"hash": 7677391554610668000,
"line_mean": 24.9263157895,
"line_max": 113,
"alpha_frac": 0.4839626472,
"autogenerated": false,
"ratio": 3.5851528384279474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.941641246438884,
"avg_score": 0.03054060424782132,
"num_lines": 95
} |
__all__ = ['fiducial_deconvolute']
import os
import numpy as np
import numpy.ctypeslib as C
# define types
_double_ctype = C.ndpointer(np.float64, ndim=1, flags='C')
# load the c library
here = os.path.abspath(os.path.dirname(__file__))
_C_LIB = C.load_library('fiducial_deconvolute', here)
_C_LIB.convolved_fit.restype = None
_C_LIB.convolved_fit.argtypes = [_double_ctype, _double_ctype, C.ctypes.c_int, \
_double_ctype, _double_ctype, C.ctypes.c_int, C.ctypes.c_double,\
C.ctypes.c_int, C.ctypes.c_double]
def fiducial_deconvolute(af_key, af_val, smm, mf, scatter, repeat=40, sm_step=0.01):
if len(smm) != len(mf):
raise ValueError('`smf` and `mf` must have the same size!')
sm_step = np.fabs(float(sm_step))
sm_min = min(af_key.min(), smm.min())
if sm_min <= 0:
offset = sm_step-sm_min
af_key += offset
smm += offset
_C_LIB.convolved_fit(af_key, af_val, len(af_key), smm, mf, len(mf), float(scatter), \
int(repeat), sm_step)
if sm_min <= 0:
smm -= offset
af_key -= offset
return smm
| {
"repo_name": "manodeep/yymao-abundancematching",
"path": "AbundanceMatching/fiducial_deconv_wrapper.py",
"copies": "1",
"size": "1091",
"license": "mit",
"hash": 3568967562265203000,
"line_mean": 33.09375,
"line_max": 89,
"alpha_frac": 0.6214482126,
"autogenerated": false,
"ratio": 2.748110831234257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8689237299095903,
"avg_score": 0.03606434894767088,
"num_lines": 32
} |
'''All fields described
'''
import lighty.validators
from .functor import BaseField, NumericField, SequenceField
def add_validator(validator, options):
'''Helper function for adding validator into the validator's list inside
options dictionary. Written to make some field types constructor shorter.
'''
if 'validators' in options:
if isinstance(options['validators'], list):
options['validators'].append(validator)
else:
options['validators'] += (validator, )
else:
options['validators'] = (validator, )
return options
class Field(BaseField):
'''Base field class. Declares basic methods an fields for all the field
classes and fields
Args:
verbose_name: human readable field name
primary_key: is field would be a primary key
db_index: is field a part of database index
unique: add checking for unique value
blank: is field can be empty on model saving
null: it field can be None on model saving
choices: list of values available for this field
default: default value
editable: check is value can be changed
validators: list of validators used to check field value before
store it into database
help_text: field description
db_column: name of the field inside database
db_tablespace: can be used to set additional datastorage parameter
'''
__slots__ = ('name', 'model', 'verbose_name', 'primary_key', 'db_index',
'unique', 'blank', 'null', 'choices', 'default', 'editable',
'validators', 'help_text', 'db_column', 'db_tablespace', )
def __init__(self, verbose_name=None, primary_key=False, db_index=False,
unique=False, blank=False, null=False, choices=None,
default=None, editable=True, validators=(),
help_text="", db_column=None, db_tablespace=False):
'''Create new Field instance
'''
self.null = null
self.blank = blank
self.choices = choices
self.default = default
self.editable = editable
self.unique = unique
self.db_column = db_column
self.db_tablespace = db_tablespace
self.db_index = db_index
self.primary_key = primary_key
self.help_text = help_text
self.verbose_name = verbose_name
# Add additional validator for choices if needed
if choices is not None:
validators += (lighty.validators.ChoicesValidator(choices),)
if not self.null and not (self.blank and self.default is not None):
validators += (lighty.validators.NotNoneValidator(), )
self.validators = validators
def configure(self, model_name, field_name):
"""Configure field with model-depended paramenters
Args:
mmodel: model class
model_name: model class name
field_name: name of this field
"""
self.name = field_name
self.model = model_name
def get_value_for_datastore(self, model):
"""Get value prepared for saving in datastore
Args:
model: model instance to take value from
"""
value = getattr(model, self.name)
if value is None and not self.null:
if self.blank and self.default is not None:
return self.default
raise ValueError('%s does not accept None value' % str(self))
return value
def make_value_from_datastore(self, value):
"""Create object from value was taken from datastore
"""
return value
def __str__(self, model=None):
'''Get string representation
'''
return self.model + '.' + self.name
class FieldDescriptor(Field):
'''Field that will be used as a descriptor - on creation creates a field
'_field_name' to store the name of the attribute of the model class
instance used to store the field value.
'''
def configure(self, model_name, field_name):
'''Set default value
'''
super(FieldDescriptor, self).configure(model_name, field_name)
self._field_name = '_field_%s_value' % field_name
def __get__(self, instance, owner):
'''Get field value from instance
'''
if instance is None:
return self
return getattr(instance, self._field_name)
def __set__(self, instance, value):
'''Store field value in instance
'''
setattr(instance, self._field_name, value)
class IntegerField(FieldDescriptor, NumericField):
'''An integer. The admin represents this as an <input type="text"> (a
single-line input).
'''
def __init__(self, **options):
'''Create new field. Adds validator that checks is value passed integer
'''
options = add_validator(lighty.validators.INTEGER_VALIDATOR, options)
super(IntegerField, self).__init__(**options)
def __set__(self, instance, value):
'''Set value as int
'''
super(IntegerField, self).__set__(instance, int(value))
class PositiveIntegerField(IntegerField):
'''Like an IntegerField, but must be positive.
'''
def __init__(self, **options):
'''Create new field. Adds validator that checks value to be greater
than 0 into the validator's list
'''
options = add_validator(lighty.validators.MinValueValidator(1),
options)
super(PositiveIntegerField, self).__init__(**options)
class AutoField(IntegerField):
'''An IntegerField that automatically increments according to available
IDs. You usually won't need to use this directly; a primary key field will
automatically be added to your model if you don't specify otherwise. See
Automatic primary key fields.
'''
class FloatField(FieldDescriptor, NumericField):
'''A floating-point number represented in Python by a float instance.
The admin represents this as an <input type="text"> (a single-line input).
'''
def __init__(self, **options):
'''Create new field. Adds validator that checks is value passed integer
'''
options = add_validator(lighty.validators.FLOAT_VALIDATOR, options)
super(FloatField, self).__init__(**options)
def __set__(self, instance, value):
'''Set value as int
'''
super(FloatField, self).__set__(instance, float(value))
class DecimalField(Field, NumericField):
'''A fixed-precision decimal number, represented in Python by a Decimal
instance.
The admin represents this as an <input type="text"> (a single-line input).
'''
def __init__(self, max_digits=None, decimal_places=None, **options):
'''Create new fixed-precision decimal number
Args:
max_digits: The maximum number of digits allowed in the number
decimal_places: The number of decimal places to store with the
number
'''
self.max_digits = max_digits
self.decimal_places = decimal_places
super(DecimalField, self).__init__(**options)
class BooleanField(FieldDescriptor):
'''A true/false field
The admin represents this as a checkbox
'''
def __set__(self, instance, value):
'''Convert value to boolean
'''
from ..utils import string_types
if isinstance(value, string_types):
value = value == 'True' or value == 'true' or value == 'TRUE'
elif not isinstance(value, bool):
value = bool(value)
super(BooleanField, self).__set__(instance, value)
class NullBooleanField(BooleanField):
'''Like a BooleanField, but allows NULL as one of the options. Use this
instead of a BooleanField with null=True. The admin represents this as a
<select> box with "Unknown", "Yes" and "No" choices.
'''
def __init__(self, **options):
'''Create new BooleanField field with null=True
'''
options['null'] = True
super(NullBooleanField, self).__init__(**options)
def __set__(self, instance, value):
'''Convert value to boolean
'''
from ..utils import string_types
if value is None:
value = None
elif isinstance(value, string_types):
value = value == 'True' or value == 'true' or value == 'TRUE'
elif not isinstance(value, bool):
value = bool(value)
super(BooleanField, self).__set__(instance, value)
class CharField(Field, SequenceField):
'''A string field, for small- to large-sized strings.
For large amounts of text, use TextField.
The admin represents this as an <input type="text"> (a single-line input).
'''
def __init__(self, max_length=None, **options):
'''Create new CharField with one extra required argument:
Args:
max_length: The maximum length (in characters) of the field.
The max_length is enforced at the database level and in validation.
'''
# Process max_length option
if max_length is None:
max_length = 255
self.max_length = max_length
# Add MaxLengthValidator
options = add_validator(
lighty.validators.MaxLengthValidator(max_length), options)
# Then create usual field
super(CharField, self).__init__(**options)
class EmailField(CharField):
'''A CharField that checks that the value is a valid e-mail address.
'''
def __init__(self, max_length=75, **options):
'''Create CharField that checks that the value is a valid e-mail
address.
'''
options = add_validator(lighty.validators.validate_email, options)
super(EmailField, self).__init__(max_length, **options)
class URLField(CharField):
'''A CharField for a URL.
The admin represents this as an <input type="text"> (a single-line input).
'''
def __init__(self, verify_exists=True, max_length=200, **options):
'''Create new CharField to store URL
Args:
verify_exists: If True (the default), the URL given will be
checked for existence (i.e., the URL actually loads and doesn't
give a 404 response).
Note that when you're using the single-threaded development server,
validating a URL being served by the same server will hang. This
should not be a problem for multithreaded servers.
max_length: Like all CharField subclasses, URLField takes the
optional max_length, a default of 200 is used.
'''
options = add_validator(
lighty.validators.URLValidator(verify_exists=verify_exists),
options)
super(URLField, self).__init__(max_length, **options)
class IPAddressField(Field):
'''An IP address, in string format (e.g. "192.0.2.30").
The admin represents this as an <input type="text"> (a single-line input).
'''
def __init__(self, **options):
'''Create Field with addition validation for store IP address
'''
options = add_validator(lighty.validators.validate_ipv4_address,
options)
super(IPAddressField, self).__init__(**options)
class SlugField(CharField):
'''Slug is a newspaper term. A slug is a short label for something,
containing only letters, numbers, underscores or hyphens. They're generally
used in URLs.
Implies setting Field.db_index to True.
It is often useful to automatically prepopulate a SlugField based on the
value of some other value. You can do this automatically in the admin using
prepopulated_fields.
'''
def __init__(self, max_length=50, **options):
'''Create new CharField field with additional validator for slug
Like a CharField, you can specify max_length (read the note about
database portability and max_length in that section, too). If
max_length is not specified, SlugField will use a default length of 50.
'''
options['unique'] = True
options = add_validator(lighty.validators.validate_slug, options)
super(SlugField, self).__init__(max_length, **options)
class DateField(Field, NumericField):
'''A date, represented in Python by a datetime.date instance.
'''
def __init__(self, auto_now=False, auto_now_add=False, **options):
'''Create new DateField. Has a few extra, optional arguments
Args:
auto_now: Automatically set the field to now every time the object
is saved. Useful for "last-modified" timestamps. Note that the
current date is always used; it's not just a default value that
you can override.
auto_now_add: Automatically set the field to now when the object is
first created. Useful for creation of timestamps. Note that the
current date is always used; it's not just a default value that
you can override.
'''
self.auto_now = auto_now
self.auto_now_add = auto_now_add
super(DateField, self).__init__(**options)
def get_value_for_datastore(self, model):
import datetime
if self.auto_now or (self.auto_now_add and not model.is_saved()):
setattr(model, self.name, datetime.date.today())
value = super(DateField, self).get_value_for_datastore(model)
return datetime.datetime(*value.timetuple()[:-2]) if value else None
def make_value_from_datastore(self, value):
return value.date() if value else value
class DateTimeField(DateField, NumericField):
'''A date and time, represented in Python by a datetime.datetime instance.
Takes the same extra arguments as DateField.
'''
def get_value_for_datastore(self, model):
'''Prepare value to store it into datastore.
Returns:
string represents field value in format "%Y-%m-%d %H:%M:%S"
'''
if self.auto_now or (self.auto_now_add and not model.is_saved()):
from datetime import datetime
setattr(model, self.name, datetime.now())
return Field.get_value_for_datastore(self, model)
def make_value_from_datastore(self, value):
return value
class TimeField(DateField, NumericField):
'''A time, represented in Python by a datetime.time instance. Accepts the
same auto-population options as DateField.
'''
def get_value_for_datastore(self, model):
'''Prepare value to store it into datastore.
Returns:
string represents field value in format "%Y-%m-%d %H:%M:%S"
'''
from time import gmtime
if self.auto_now or (self.auto_now_add and not model.is_saved()):
setattr(model, self.name, gmtime())
return super(DateField, self).get_value_for_datastore(model)
class TextField(Field, SequenceField):
'''A large text field. The admin represents this as a <textarea> (a
multi-line input).
'''
pass
#
# TODO:
# write code for FileField, FilePathField and ImageField
# write code for ForeignKey, ManyToManyField, OneToOneField
#
class ForeignKey(Field):
"""An object field can contains
"""
def __init__(self, model, **kwargs):
"""
"""
super(ForeignKey, self).__init__(kwargs)
self.foreign = model
def get_value_for_datastore(self, model):
"""Get object's key
"""
return model.__dict__[self.name].key()
def make_value_from_datastore(self, value):
"""Get object for key
"""
return self.foreign.get(value)
| {
"repo_name": "GrAndSE/lighty",
"path": "lighty/db/fields.py",
"copies": "1",
"size": "15911",
"license": "bsd-3-clause",
"hash": 321841072293282940,
"line_mean": 34.2013274336,
"line_max": 79,
"alpha_frac": 0.6185029225,
"autogenerated": false,
"ratio": 4.455614673760851,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009162964868253053,
"num_lines": 452
} |
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields.related import add_lazy_relation
from django.db.models.fields.subclassing import Creator
from django.db.utils import IntegrityError
from django.utils import six
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'SetField', 'DictField',
'EmbeddedModelField', 'BlobField')
EMPTY_ITER = ()
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class RawField(models.Field):
"""
Generic field to store anything your database backend allows you
to. No validation or conversions are done for this field.
"""
def get_internal_type(self):
"""
Returns this field's kind. Nonrel fields are meant to extend
the set of standard fields, so fields subclassing them should
get the same internal type, rather than their own class name.
"""
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like
``list``, ``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed
field's validation and conversion routines, converting the items
to the appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
default = kwargs.get(
'default', None if kwargs.get('null') else EMPTY_ITER)
# Ensure a new object is created every time the default is
# accessed.
if default is not None and not callable(default):
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
# Either use the provided item_field or a RawField.
if item_field is None:
item_field = RawField()
elif callable(item_field):
item_field = item_field()
self.item_field = item_field
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field, 'attname')
self.item_field.set_attributes_from_name('value')
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field, '__metaclass__', None)
if item_metaclass and issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
if isinstance(self.item_field, models.ForeignKey) and isinstance(self.item_field.rel.to, six.string_types):
"""
If rel.to is a string because the actual class is not yet defined, look up the
actual class later. Refer to django.models.fields.related.RelatedField.contribute_to_class.
"""
def _resolve_lookup(_, resolved_model, __):
self.item_field.rel.to = resolved_model
self.item_field.do_related_class(self, cls)
add_lazy_relation(cls, self, self.item_field.rel.to, _resolve_lookup)
def _map(self, function, iterable, *args, **kwargs):
"""
Applies the function to items of the iterable and returns
an iterable of the proper type for the field.
Overriden by DictField to only apply the function to values.
"""
return self._type(function(element, *args, **kwargs)
for element in iterable)
def to_python(self, value):
"""
Passes value items through item_field's to_python.
"""
if value is None:
return None
return self._map(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(
lambda item: self.item_field.pre_save(
_FakeModel(self.item_field, item), add),
value)
def get_db_prep_save(self, value, connection):
"""
Applies get_db_prep_save of item_field on value items.
"""
if value is None:
return None
return self._map(self.item_field.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
# TODO/XXX: Remove as_lookup_value() once we have a cleaner
# solution for dot-notation queries.
# See: https://groups.google.com/group/django-non-relational/browse_thread/thread/6056f8384c9caf04/89eeb9fb22ad16f3).
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError("Value of type %r is not iterable." %
type(values))
def formfield(self, **kwargs):
raise NotImplementedError("No form field implemented for %r." %
type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a
callable that is passed to :meth:`list.sort` as `key` argument. If
`ordering` is given, the items in the list will be sorted before
sending them to the database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r." % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'ListField'
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if value is None:
return None
if value and self.ordering:
value.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
def get_internal_type(self):
return 'SetField'
def value_to_string(self, obj):
"""
Custom method for serialization, as JSON doesn't support
serializing sets.
"""
return list(self._get_val_from_obj(obj))
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
Type conversions described in :class:`AbstractIterableField` only
affect values of the dictionary, not keys. Depending on the
back-end, keys that aren't strings might not be allowed.
"""
_type = dict
def get_internal_type(self):
return 'DictField'
def _map(self, function, iterable, *args, **kwargs):
return self._type((key, function(value, *args, **kwargs))
for key, value in six.iteritems(iterable))
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError("Value is of type %r. Should be a dict." %
type(values))
@six.add_metaclass(models.SubfieldBase)
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param embedded_model: (optional) The model class of instances we
will be embedding; may also be passed as a
string, similar to relation fields
TODO: Make sure to delegate all signals and other field methods to
the embedded instance (not just pre_save, get_db_prep_* and
to_python).
"""
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'EmbeddedModelField'
def _set_model(self, model):
"""
Resolves embedded model class once the field knows the model it
belongs to.
If the model argument passed to __init__ was a string, we need
to make sure to resolve that string to the corresponding model
class, similar to relation fields.
However, we need to know our own model to generate a valid key
for the embedded model class lookup and EmbeddedModelFields are
not contributed_to_class if used in iterable fields. Thus we
rely on the collection field telling us its model (by setting
our "model" attribute in its contribute_to_class method).
"""
self._model = model
if model is not None and isinstance(self.embedded_model, six.string_types):
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
model = property(lambda self: self._model, _set_model)
def stored_model(self, column_values):
"""
Returns the fixed embedded_model this field was initialized
with (typed embedding) or tries to determine the model from
_module / _model keys stored together with column_values
(untyped embedding).
We give precedence to the field's definition model, as silently
using a differing serialized one could hide some data integrity
problems.
Note that a single untyped EmbeddedModelField may process
instances of different models (especially when used as a type
of a collection field).
"""
module = column_values.pop('_module', None)
model = column_values.pop('_model', None)
if self.embedded_model is not None:
return self.embedded_model
elif module is not None:
return getattr(import_module(module), model)
else:
raise IntegrityError("Untyped EmbeddedModelField trying to load "
"data without serialized model class info.")
def to_python(self, value):
"""
Passes embedded model fields' values through embedded fields
to_python methods and reinstiatates the embedded instance.
We expect to receive a field.attname => value dict together
with a model class from back-end database deconversion (which
needs to know fields of the model beforehand).
"""
# Either the model class has already been determined during
# deconverting values from the database or we've got a dict
# from a deserializer that may contain model class info.
if isinstance(value, tuple):
embedded_model, attribute_values = value
elif isinstance(value, dict):
embedded_model = self.stored_model(value)
attribute_values = value
else:
return value
# Pass values through respective fields' to_python, leaving
# fields for which no value is specified uninitialized.
attribute_values = dict(
(field.attname, field.to_python(attribute_values[field.attname]))
for field in embedded_model._meta.fields
if field.attname in attribute_values)
# Create the model instance.
instance = embedded_model(**attribute_values)
instance._state.adding = False
return instance
def get_db_prep_save(self, embedded_instance, connection):
"""
Applies pre_save and get_db_prep_save of embedded instance
fields and passes a field => value mapping down to database
type conversions.
The embedded instance will be saved as a column => value dict
in the end (possibly augmented with info about instance's model
for untyped embedding), but because we need to apply database
type conversions on embedded instance fields' values and for
these we need to know fields those values come from, we need to
entrust the database layer with creating the dict.
"""
if embedded_instance is None:
return None
# The field's value should be an instance of the model given in
# its declaration or at least of some model.
embedded_model = self.embedded_model or models.Model
if not isinstance(embedded_instance, embedded_model):
raise TypeError("Expected instance of type %r, not %r." %
(embedded_model, type(embedded_instance)))
# Apply pre_save and get_db_prep_save of embedded instance
# fields, create the field => value mapping to be passed to
# storage preprocessing.
field_values = {}
add = embedded_instance._state.adding
for field in embedded_instance._meta.fields:
value = field.get_db_prep_save(
field.pre_save(embedded_instance, add), connection=connection)
# Exclude unset primary keys (e.g. {'id': None}).
if field.primary_key and value is None:
continue
field_values[field] = value
# Let untyped fields store model info alongside values.
# We use fake RawFields for additional values to avoid passing
# embedded_instance to database conversions and to give
# back-ends a chance to apply generic conversions.
if self.embedded_model is None:
module_field = RawField()
module_field.set_attributes_from_name('_module')
model_field = RawField()
model_field.set_attributes_from_name('_model')
field_values.update(
((module_field, embedded_instance.__class__.__module__),
(model_field, embedded_instance.__class__.__name__)))
# This instance will exist in the database soon.
# TODO.XXX: Ensure that this doesn't cause race conditions.
embedded_instance._state.adding = False
return field_values
# TODO/XXX: Remove this once we have a cleaner solution.
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be
converted to a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method
from which the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
"""
A file widget is provided, but use model FileField or
ImageField for storing specific files most of the time.
"""
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_save(self, value, connection):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
raise TypeError("BlobFields do not support lookups.")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
| {
"repo_name": "kavdev/djangotoolbox",
"path": "djangotoolbox/fields.py",
"copies": "1",
"size": "16758",
"license": "bsd-3-clause",
"hash": -6807843032986078000,
"line_mean": 36.9140271493,
"line_max": 125,
"alpha_frac": 0.6277002029,
"autogenerated": false,
"ratio": 4.423970432946145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001663248438299275,
"num_lines": 442
} |
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields.subclassing import Creator
from django.db.utils import IntegrityError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'SetField', 'DictField',
'EmbeddedModelField', 'BlobField')
EMPTY_ITER = ()
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class RawField(models.Field):
"""
Generic field to store anything your database backend allows you
to. No validation or conversions are done for this field.
"""
def get_internal_type(self):
"""
Returns this field's kind. Nonrel fields are meant to extend
the set of standard fields, so fields subclassing them should
get the same internal type, rather than their own class name.
"""
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like
``list``, ``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed
field's validation and conversion routines, converting the items
to the appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
default = kwargs.get(
'default', None if kwargs.get('null') else EMPTY_ITER)
# Ensure a new object is created every time the default is
# accessed.
if default is not None and not callable(default):
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
# Either use the provided item_field or a RawField.
if item_field is None:
item_field = RawField()
elif callable(item_field):
item_field = item_field()
self.item_field = item_field
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field, 'attname')
self.item_field.set_attributes_from_name('value')
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
def _map(self, function, iterable, *args, **kwargs):
"""
Applies the function to items of the iterable and returns
an iterable of the proper type for the field.
Overriden by DictField to only apply the function to values.
"""
return self._type(function(element, *args, **kwargs)
for element in iterable)
def to_python(self, value):
"""
Passes value items through item_field's to_python.
"""
if value is None:
return None
return self._map(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(
lambda item: self.item_field.pre_save(
_FakeModel(self.item_field, item), add),
value)
def get_db_prep_save(self, value, connection):
"""
Applies get_db_prep_save of item_field on value items.
"""
if value is None:
return None
return self._map(self.item_field.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
# TODO/XXX: Remove as_lookup_value() once we have a cleaner
# solution for dot-notation queries.
# See: https://groups.google.com/group/django-non-relational/browse_thread/thread/6056f8384c9caf04/89eeb9fb22ad16f3).
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError("Value of type %r is not iterable." %
type(values))
def formfield(self, **kwargs):
raise NotImplementedError("No form field implemented for %r." %
type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a
callable that is passed to :meth:`list.sort` as `key` argument. If
`ordering` is given, the items in the list will be sorted before
sending them to the database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r." % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'ListField'
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if value is None:
return None
if value and self.ordering:
value.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
def get_internal_type(self):
return 'SetField'
def value_to_string(self, obj):
"""
Custom method for serialization, as JSON doesn't support
serializing sets.
"""
return list(self._get_val_from_obj(obj))
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
Type conversions described in :class:`AbstractIterableField` only
affect values of the dictionary, not keys. Depending on the
back-end, keys that aren't strings might not be allowed.
"""
_type = dict
def get_internal_type(self):
return 'DictField'
def _map(self, function, iterable, *args, **kwargs):
return self._type((key, function(value, *args, **kwargs))
for key, value in iterable.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError("Value is of type %r. Should be a dict." %
type(values))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param embedded_model: (optional) The model class of instances we
will be embedding; may also be passed as a
string, similar to relation fields
TODO: Make sure to delegate all signals and other field methods to
the embedded instance (not just pre_save, get_db_prep_* and
to_python).
"""
__metaclass__ = models.SubfieldBase
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'EmbeddedModelField'
def _set_model(self, model):
"""
Resolves embedded model class once the field knows the model it
belongs to.
If the model argument passed to __init__ was a string, we need
to make sure to resolve that string to the corresponding model
class, similar to relation fields.
However, we need to know our own model to generate a valid key
for the embedded model class lookup and EmbeddedModelFields are
not contributed_to_class if used in iterable fields. Thus we
rely on the collection field telling us its model (by setting
our "model" attribute in its contribute_to_class method).
"""
if model is not None and isinstance(self.embedded_model, basestring):
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model,
_resolve_lookup)
self._model = model
model = property(lambda self: self._model, _set_model)
def stored_model(self, column_values):
"""
Returns the fixed embedded_model this field was initialized
with (typed embedding) or tries to determine the model from
_module / _model keys stored together with column_values
(untyped embedding).
We give precedence to the field's definition model, as silently
using a differing serialized one could hide some data integrity
problems.
Note that a single untyped EmbeddedModelField may process
instances of different models (especially when used as a type
of a collection field).
"""
module = column_values.pop('_module', None)
model = column_values.pop('_model', None)
if self.embedded_model is not None:
return self.embedded_model
elif module is not None:
return getattr(import_module(module), model)
else:
raise IntegrityError("Untyped EmbeddedModelField trying to load "
"data without serialized model class info.")
def to_python(self, value):
"""
Passes embedded model fields' values through embedded fields
to_python methods and reinstiatates the embedded instance.
We expect to receive a field.attname => value dict together
with a model class from back-end database deconversion (which
needs to know fields of the model beforehand).
"""
# Either the model class has already been determined during
# deconverting values from the database or we've got a dict
# from a deserializer that may contain model class info.
if isinstance(value, tuple):
embedded_model, attribute_values = value
elif isinstance(value, dict):
embedded_model = self.stored_model(value)
attribute_values = value
else:
return value
# Pass values through respective fields' to_python, leaving
# fields for which no value is specified uninitialized.
attribute_values = dict(
(field.attname, field.to_python(attribute_values[field.attname]))
for field in embedded_model._meta.fields
if field.attname in attribute_values)
# Create the model instance.
# Note: the double underline is not a typo -- this lets the
# model know that the object already exists in the database.
return embedded_model(__entity_exists=True, **attribute_values)
def get_db_prep_save(self, embedded_instance, connection):
"""
Applies pre_save and get_db_prep_save of embedded instance
fields and passes a field => value mapping down to database
type conversions.
The embedded instance will be saved as a column => value dict
in the end (possibly augmented with info about instance's model
for untyped embedding), but because we need to apply database
type conversions on embedded instance fields' values and for
these we need to know fields those values come from, we need to
entrust the database layer with creating the dict.
"""
if embedded_instance is None:
return None
# The field's value should be an instance of the model given in
# its declaration or at least of some model.
embedded_model = self.embedded_model or models.Model
if not isinstance(embedded_instance, embedded_model):
raise TypeError("Expected instance of type %r, not %r." %
(embedded_model, type(embedded_instance)))
# Apply pre_save and get_db_prep_save of embedded instance
# fields, create the field => value mapping to be passed to
# storage preprocessing.
field_values = {}
add = not embedded_instance._entity_exists
for field in embedded_instance._meta.fields:
value = field.get_db_prep_save(
field.pre_save(embedded_instance, add), connection=connection)
# Exclude unset primary keys (e.g. {'id': None}).
if field.primary_key and value is None:
continue
field_values[field] = value
# Let untyped fields store model info alongside values.
# We use fake RawFields for additional values to avoid passing
# embedded_instance to database conversions and to give
# back-ends a chance to apply generic conversions.
if self.embedded_model is None:
module_field = RawField()
module_field.set_attributes_from_name('_module')
model_field = RawField()
model_field.set_attributes_from_name('_model')
field_values.update(
((module_field, embedded_instance.__class__.__module__),
(model_field, embedded_instance.__class__.__name__)))
# This instance will exist in the database soon.
# TODO.XXX: Ensure that this doesn't cause race conditions.
embedded_instance._entity_exists = True
return field_values
# TODO/XXX: Remove this once we have a cleaner solution.
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be
converted to a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method
from which the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
"""
A file widget is provided, but use model FileField or
ImageField for storing specific files most of the time.
"""
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_save(self, value, connection):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
raise TypeError("BlobFields do not support lookups.")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
| {
"repo_name": "wd5/jangr",
"path": "djangotoolbox/fields.py",
"copies": "1",
"size": "16241",
"license": "bsd-3-clause",
"hash": -3968221534638436400,
"line_mean": 36.5949074074,
"line_max": 125,
"alpha_frac": 0.6259466782,
"autogenerated": false,
"ratio": 4.464266080263881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5590212758463882,
"avg_score": null,
"num_lines": null
} |
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
from django.db import models
from django.db.models.fields.subclassing import Creator
from django.db.utils import IntegrityError
from django.db.models.fields.related import add_lazy_relation
__all__ = ('RawField', 'ListField', 'SetField', 'DictField',
'EmbeddedModelField', 'BlobField')
EMPTY_ITER = ()
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class RawField(models.Field):
"""
Generic field to store anything your database backend allows you
to. No validation or conversions are done for this field.
"""
def get_internal_type(self):
"""
Returns this field's kind. Nonrel fields are meant to extend
the set of standard fields, so fields subclassing them should
get the same internal type, rather than their own class name.
"""
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like
``list``, ``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed
field's validation and conversion routines, converting the items
to the appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
default = kwargs.get(
'default', None if kwargs.get('null') else EMPTY_ITER)
# Ensure a new object is created every time the default is
# accessed.
if default is not None and not callable(default):
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
# Either use the provided item_field or a RawField.
if item_field is None:
item_field = RawField()
elif callable(item_field):
item_field = item_field()
self.item_field = item_field
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field, 'attname')
self.item_field.set_attributes_from_name('value')
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
if isinstance(self.item_field, models.ForeignKey) and isinstance(self.item_field.rel.to, basestring):
"""
If rel.to is a string because the actual class is not yet defined, look up the
actual class later. Refer to django.models.fields.related.RelatedField.contribute_to_class.
"""
def _resolve_lookup(_, resolved_model, __):
self.item_field.rel.to = resolved_model
self.item_field.do_related_class(self, cls)
add_lazy_relation(cls, self, self.item_field.rel.to, _resolve_lookup)
def _map(self, function, iterable, *args, **kwargs):
"""
Applies the function to items of the iterable and returns
an iterable of the proper type for the field.
Overriden by DictField to only apply the function to values.
"""
return self._type(function(element, *args, **kwargs)
for element in iterable)
def to_python(self, value):
"""
Passes value items through item_field's to_python.
"""
if value is None:
return None
return self._map(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(
lambda item: self.item_field.pre_save(
_FakeModel(self.item_field, item), add),
value)
def get_db_prep_save(self, value, connection):
"""
Applies get_db_prep_save of item_field on value items.
"""
if value is None:
return None
return self._map(self.item_field.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
# TODO/XXX: Remove as_lookup_value() once we have a cleaner
# solution for dot-notation queries.
# See: https://groups.google.com/group/django-non-relational/browse_thread/thread/6056f8384c9caf04/89eeb9fb22ad16f3).
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError("Value of type %r is not iterable." %
type(values))
def formfield(self, **kwargs):
from django.forms import CharField
defaults = {'form_class': CharField}
defaults.update(kwargs)
return super(AbstractIterableField, self).formfield(**defaults)
# def formfield(self, **kwargs):
# raise NotImplementedError("No form field implemented for %r." %
# type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a
callable that is passed to :meth:`list.sort` as `key` argument. If
`ordering` is given, the items in the list will be sorted before
sending them to the database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r." % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'ListField'
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if value is None:
return None
if value and self.ordering:
value.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
def get_internal_type(self):
return 'SetField'
def value_to_string(self, obj):
"""
Custom method for serialization, as JSON doesn't support
serializing sets.
"""
return list(self._get_val_from_obj(obj))
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
Type conversions described in :class:`AbstractIterableField` only
affect values of the dictionary, not keys. Depending on the
back-end, keys that aren't strings might not be allowed.
"""
_type = dict
def get_internal_type(self):
return 'DictField'
def _map(self, function, iterable, *args, **kwargs):
return self._type((key, function(value, *args, **kwargs))
for key, value in iterable.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError("Value is of type %r. Should be a dict." %
type(values))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param embedded_model: (optional) The model class of instances we
will be embedding; may also be passed as a
string, similar to relation fields
TODO: Make sure to delegate all signals and other field methods to
the embedded instance (not just pre_save, get_db_prep_* and
to_python).
"""
__metaclass__ = models.SubfieldBase
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'EmbeddedModelField'
def _set_model(self, model):
"""
Resolves embedded model class once the field knows the model it
belongs to.
If the model argument passed to __init__ was a string, we need
to make sure to resolve that string to the corresponding model
class, similar to relation fields.
However, we need to know our own model to generate a valid key
for the embedded model class lookup and EmbeddedModelFields are
not contributed_to_class if used in iterable fields. Thus we
rely on the collection field telling us its model (by setting
our "model" attribute in its contribute_to_class method).
"""
self._model = model
if model is not None and isinstance(self.embedded_model, basestring):
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
model = property(lambda self: self._model, _set_model)
def stored_model(self, column_values):
"""
Returns the fixed embedded_model this field was initialized
with (typed embedding) or tries to determine the model from
_module / _model keys stored together with column_values
(untyped embedding).
We give precedence to the field's definition model, as silently
using a differing serialized one could hide some data integrity
problems.
Note that a single untyped EmbeddedModelField may process
instances of different models (especially when used as a type
of a collection field).
"""
module = column_values.pop('_module', None)
model = column_values.pop('_model', None)
if self.embedded_model is not None:
return self.embedded_model
elif module is not None:
return getattr(import_module(module), model)
else:
raise IntegrityError("Untyped EmbeddedModelField trying to load "
"data without serialized model class info.")
def to_python(self, value):
"""
Passes embedded model fields' values through embedded fields
to_python methods and reinstiatates the embedded instance.
We expect to receive a field.attname => value dict together
with a model class from back-end database deconversion (which
needs to know fields of the model beforehand).
"""
# Either the model class has already been determined during
# deconverting values from the database or we've got a dict
# from a deserializer that may contain model class info.
if isinstance(value, tuple):
embedded_model, attribute_values = value
elif isinstance(value, dict):
embedded_model = self.stored_model(value)
attribute_values = value
else:
return value
# Pass values through respective fields' to_python, leaving
# fields for which no value is specified uninitialized.
attribute_values = dict(
(field.attname, field.to_python(attribute_values[field.attname]))
for field in embedded_model._meta.fields
if field.attname in attribute_values)
# Create the model instance.
# Note: the double underline is not a typo -- this lets the
# model know that the object already exists in the database.
return embedded_model(__entity_exists=True, **attribute_values)
def get_db_prep_save(self, embedded_instance, connection):
"""
Applies pre_save and get_db_prep_save of embedded instance
fields and passes a field => value mapping down to database
type conversions.
The embedded instance will be saved as a column => value dict
in the end (possibly augmented with info about instance's model
for untyped embedding), but because we need to apply database
type conversions on embedded instance fields' values and for
these we need to know fields those values come from, we need to
entrust the database layer with creating the dict.
"""
if embedded_instance is None:
return None
# The field's value should be an instance of the model given in
# its declaration or at least of some model.
embedded_model = self.embedded_model or models.Model
if not isinstance(embedded_instance, embedded_model):
raise TypeError("Expected instance of type %r, not %r." %
(embedded_model, type(embedded_instance)))
# Apply pre_save and get_db_prep_save of embedded instance
# fields, create the field => value mapping to be passed to
# storage preprocessing.
field_values = {}
add = not embedded_instance._entity_exists
for field in embedded_instance._meta.fields:
value = field.get_db_prep_save(
field.pre_save(embedded_instance, add), connection=connection)
# Exclude unset primary keys (e.g. {'id': None}).
if field.primary_key and value is None:
continue
field_values[field] = value
# Let untyped fields store model info alongside values.
# We use fake RawFields for additional values to avoid passing
# embedded_instance to database conversions and to give
# back-ends a chance to apply generic conversions.
if self.embedded_model is None:
module_field = RawField()
module_field.set_attributes_from_name('_module')
model_field = RawField()
model_field.set_attributes_from_name('_model')
field_values.update(
((module_field, embedded_instance.__class__.__module__),
(model_field, embedded_instance.__class__.__name__)))
# This instance will exist in the database soon.
# TODO.XXX: Ensure that this doesn't cause race conditions.
embedded_instance._entity_exists = True
return field_values
# TODO/XXX: Remove this once we have a cleaner solution.
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be
converted to a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method
from which the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
"""
A file widget is provided, but use model FileField or
ImageField for storing specific files most of the time.
"""
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_save(self, value, connection):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
raise TypeError("BlobFields do not support lookups.")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
| {
"repo_name": "Laimiux/mydeatree",
"path": "djangotoolbox/fields.py",
"copies": "1",
"size": "17034",
"license": "bsd-3-clause",
"hash": -5847117290310126000,
"line_mean": 36.9376391982,
"line_max": 125,
"alpha_frac": 0.6272161559,
"autogenerated": false,
"ratio": 4.434782608695652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009618835726952588,
"num_lines": 449
} |
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'DictField', 'SetField',
'BlobField', 'EmbeddedModelField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.__class__.__name__, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
class fake_instance(object):
pass
fake_instance = fake_instance()
def wrapper(value):
assert not hasattr(self.item_field, 'attname')
fake_instance.value = value
self.item_field.attname = 'value'
try:
return self.item_field.pre_save(fake_instance, add)
finally:
del self.item_field.attname
return self._convert(wrapper, getattr(model_instance, self.attname))
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# TODO/XXX: Remove as_lookup_value() once we have a cleaner solution
# for dot-notation queries
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
values = getattr(model_instance, self.attname)
if values is None:
return None
if values and self.ordering:
values.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param model: (optional) The model class that shall be embedded
(may also be passed as string similar to relation fields)
"""
__metaclass__ = models.SubfieldBase
def __init__(self, model=None, *args, **kwargs):
self.embedded_model = model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'DictField:RawField'
def _set_model(self, model):
# EmbeddedModelFields are not contribute[d]_to_class if using within
# ListFields (and friends), so we can only know the model field is
# used in when the IterableField sets our 'model' attribute in its
# contribute_to_class method.
# We need to know the model to generate a valid key for the lookup.
if model is not None and isinstance(self.embedded_model, basestring):
# The model argument passed to __init__ was a string, so we need
# to make sure to resolve that string to the corresponding model
# class, similar to relation fields. We abuse some of the
# relation fields' code to do the lookup here:
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
self._model = model
model = property(lambda self:self._model, _set_model)
def pre_save(self, model_instance, add):
embedded_instance = super(EmbeddedModelField, self).pre_save(model_instance, add)
if embedded_instance is None:
return None, None
model = self.embedded_model or models.Model
if not isinstance(embedded_instance, model):
raise TypeError("Expected instance of type %r, not %r" % (
type(model), type(embedded_instance)))
data = dict((field.name, field.pre_save(embedded_instance, add))
for field in embedded_instance._meta.fields)
return embedded_instance, data
def get_db_prep_value(self, (embedded_instance, embedded_dict), **kwargs):
if embedded_dict is None:
return None
values = {}
for name, value in embedded_dict.iteritems():
field = embedded_instance._meta.get_field(name)
values[field.column] = field.get_db_prep_value(value, **kwargs)
if self.embedded_model is None:
values.update({'_module' : embedded_instance.__class__.__module__,
'_model' : embedded_instance.__class__.__name__})
return values
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def to_python(self, values):
if not isinstance(values, dict):
return values
module, model = values.pop('_module', None), values.pop('_model', None)
# TODO/XXX: Workaround for old Python releases. Remove this someday.
# Let's make sure keys are instances of str
values = dict([(str(k), v) for k,v in values.items()])
if module is not None:
return getattr(import_module(module), model)(**values)
return self.embedded_model(**values)
| {
"repo_name": "viniciusgama/blog_gae",
"path": "djangotoolbox/fields.py",
"copies": "6",
"size": "11104",
"license": "bsd-3-clause",
"hash": 5930454212293020000,
"line_mean": 38.6571428571,
"line_max": 90,
"alpha_frac": 0.6316642651,
"autogenerated": false,
"ratio": 4.166604127579737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017294138272009615,
"num_lines": 280
} |
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.db import models
from django.core.exceptions import ValidationError
__all__ = ('RawField', 'ListField', 'DictField', 'SetField', 'BlobField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.__class__.__name__, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def _convert(self, func, values, *args, **kwargs):
values = super(ListField, self)._convert(func, values, *args, **kwargs)
if values is not None and self.ordering is not None:
values.sort(key=self.ordering)
return values
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
| {
"repo_name": "gimler/techism2",
"path": "djangotoolbox/fields.py",
"copies": "2",
"size": "6347",
"license": "apache-2.0",
"hash": -9196883060828752000,
"line_mean": 36.5562130178,
"line_max": 90,
"alpha_frac": 0.6376240744,
"autogenerated": false,
"ratio": 4.1080906148867316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016897981332460441,
"num_lines": 169
} |
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'DictField', 'SetField',
'BlobField', 'EmbeddedModelField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.__class__.__name__, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
class fake_instance(object):
pass
fake_instance = fake_instance()
def wrapper(value):
assert not hasattr(self.item_field, 'attname')
fake_instance.value = value
self.item_field.attname = 'value'
try:
return self.item_field.pre_save(fake_instance, add)
finally:
del self.item_field.attname
return self._convert(wrapper, getattr(model_instance, self.attname))
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def _convert(self, func, values, *args, **kwargs):
values = super(ListField, self)._convert(func, values, *args, **kwargs)
if values is not None and self.ordering is not None:
values.sort(key=self.ordering)
return values
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param model: (optional) The model class that shall be embedded
(may also be passed as string similar to relation fields)
"""
__metaclass__ = models.SubfieldBase
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'DictField:RawField'
def _set_model(self, model):
# EmbeddedModelFields are not contribute[d]_to_class if using within
# ListFields (and friends), so we can only know the model field is
# used in when the IterableField sets our 'model' attribute in its
# contribute_to_class method.
# We need to know the model to generate a valid key for the lookup.
if model is not None and isinstance(self.embedded_model, basestring):
# The model argument passed to __init__ was a string, so we need
# to make sure to resolve that string to the corresponding model
# class, similar to relation fields. We abuse some of the
# relation fields' code to do the lookup here:
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
self._model = model
model = property(lambda self:self._model, _set_model)
def pre_save(self, model_instance, add):
embedded_instance = super(EmbeddedModelField, self).pre_save(model_instance, add)
if embedded_instance is None:
return None, None
if self.embedded_model is not None and \
not isinstance(embedded_instance, self.embedded_model):
raise TypeError("Expected instance of type %r, not %r"
% (type(self.embedded_model), type(embedded_instance)))
data = dict((field.name, field.pre_save(embedded_instance, add))
for field in embedded_instance._meta.fields)
return embedded_instance, data
def get_db_prep_value(self, (embedded_instance, embedded_dict), **kwargs):
if embedded_dict is None:
return None
values = dict()
for name, value in embedded_dict.iteritems():
field = embedded_instance._meta.get_field(name)
values[name] = field.get_db_prep_value(value, **kwargs)
if self.embedded_model is None:
values.update({'_module' : embedded_instance.__class__.__module__,
'_model' : embedded_instance.__class__.__name__})
return values
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def to_python(self, values):
if not isinstance(values, dict):
return values
module, model = values.pop('_module', None), values.pop('_model', None)
if module is not None:
return getattr(import_module(module), model)(**values)
return self.embedded_model(**values)
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "djangotoolbox/fields.py",
"copies": "3",
"size": "11027",
"license": "bsd-3-clause",
"hash": 6752567719843808000,
"line_mean": 38.9925650558,
"line_max": 90,
"alpha_frac": 0.6164868051,
"autogenerated": false,
"ratio": 4.267414860681114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6383901665781115,
"avg_score": null,
"num_lines": null
} |
__all__ = ['FileChar', 'FileCharRange']
import attr
@attr.s(frozen=True)
class FileChar(object):
"""
Encodes the location of a given character in a source code file.
"""
"""The name of the file to which this character belongs."""
filename = attr.ib(type=str)
"""The number of characters that precede this character in the file."""
offset = attr.ib(type=int)
def __str__(self) -> str:
return "{}[{}]".format(self.filename, self.offset)
@attr.s(frozen=True)
class FileCharRange(object):
"""
Describes a contiguous range of characters in a single file.
"""
"""The location of the character at which this range begins."""
start = attr.ib(type=FileChar)
"""The location of the character at which this range ends."""
stop = attr.ib(type=FileChar)
def __str__(self) -> str:
return "{}[{}..{}]".format(self.filename,
self.start.offset,
self.stop.offset)
@property
def filename(self) -> str:
"""
The name of the file that this character range covers.
"""
return self.start.filename
def __len__(self) -> int:
"""
Returns the length of this range, as measured by the number of
characters that it spans.
"""
return self.stop.offset - self.start.offset
| {
"repo_name": "squaresLab/RepairBox",
"path": "bugzoo/core/filechar.py",
"copies": "3",
"size": "1387",
"license": "mit",
"hash": -2194638100631474700,
"line_mean": 26.1960784314,
"line_max": 75,
"alpha_frac": 0.5825522711,
"autogenerated": false,
"ratio": 4.103550295857988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6186102566957988,
"avg_score": null,
"num_lines": null
} |
__all__ = ["FileFetcher", "ImageCrawlerLoaderTest"]
from abc import ABC, abstractmethod
from collections import OrderedDict
from http.client import HTTPResponse
from os.path import join as path_join
from pathlib import Path
from typing import Dict, Tuple, Type, Union
from unittest import TestCase
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
from urllib.response import addinfourl
from nichtparasoup.core.imagecrawler import BaseImageCrawler, RemoteFetcher
from nichtparasoup.imagecrawler import get_imagecrawlers
class FileFetcher(RemoteFetcher):
r"""
A file fetcher that can be used for testing with local files.
URI are modified so query params are sorted - which makes same URL unique.
Remember the FileSystem restrictions:
* some FS do not support CaseSensitivity
* some FS do not allow these characters: \/:*?"<>|
"""
def __init__(self, known_files: Dict[str, str], base_dir: str = '') -> None: # pragma: no cover
super().__init__()
self._known = {
self.__class__._uri_sort_query(k): self._build_uri(v, base_dir)
for k, v
in known_files.items()
} # type: Dict[str, str]
@classmethod
def _build_uri(cls, file: str, base_dir: str = '') -> str:
file_path = Path(path_join(base_dir, file) if base_dir else file)
cls._test_path(file_path)
return file_path.as_uri()
@staticmethod
def _test_path(file_path: Path) -> None:
if not file_path.is_absolute():
raise FileNotFoundError('Path not absolute: {!r}'.format(file_path))
if not file_path.is_file():
raise FileNotFoundError('Not a file: {!r}'.format(file_path))
# test if readable. will raise errors on its own
file_path.open('r').close()
@classmethod
def _uri_sort_query(cls, uri: str) -> str:
scheme, netloc, path, params, query, fragment = urlparse(uri)
if query == '':
query_sorted = query
else:
query_dict = parse_qs(query, keep_blank_values=True)
query_dict_sorted = OrderedDict((k, query_dict[k]) for k in sorted(query_dict))
query_sorted = urlencode(query_dict_sorted, doseq=True)
uri_sorted = urlunparse((scheme, netloc, path, params, query_sorted, fragment))
return uri_sorted
def _get_file_uri(self, uri: str) -> str:
_, _, url, params, query, fragment = urlparse(uri)
uri_abs = urlunparse(('', '', url, params, query, fragment))
uri_sorted = self.__class__._uri_sort_query(uri_abs)
known = self._known.get(uri_sorted)
if not known:
raise FileNotFoundError('uri unexpected: {}'.format(uri_sorted))
return known
@staticmethod
def _valid_uri(uri: str) -> bool:
scheme, _, _, _, _, _ = urlparse(uri)
return scheme == 'file'
def get_stream(self, uri: str) -> Tuple[Union[HTTPResponse, addinfourl], str]:
response, _ = super().get_stream(self._get_file_uri(uri))
return response, uri
class ImageCrawlerLoaderTest(TestCase, ABC):
"""
Helper fo testing if the loader finds the ImageCrawler plugin properly.
Just implement the abstract properties ``ic_name`` and ``ic_class``
and call the ``check`` method in a test.
"""
@property
@abstractmethod
def ic_name(self) -> str: # pragma: no cover
"""
Return the intended ImageCrawler's name.
That's basically the name you chose in the EntryPoint.
Example implementation:
return "MyImageCrawler"
"""
raise NotImplementedError()
@property
@abstractmethod
def ic_class(self) -> Type[BaseImageCrawler]: # pragma: no cover
"""the class of your ImageCrawler.
Example implementation:
return MyImageCrawler
"""
raise NotImplementedError()
def check(self) -> None:
self.check_get_imagecrawler_class()
self.check_get_imagecrawler_name()
def check_get_imagecrawler_class(self) -> None:
# act
imagecrawler_class = get_imagecrawlers().get_class(self.ic_name)
# assert
self.assertIs(imagecrawler_class, self.ic_class)
def check_get_imagecrawler_name(self) -> None:
# act
imagecrawler_name = get_imagecrawlers().get_name(self.ic_class)
# assert
self.assertEqual(imagecrawler_name, self.ic_name)
| {
"repo_name": "k4cg/nichtparasoup",
"path": "nichtparasoup/testing/imagecrawler.py",
"copies": "1",
"size": "4461",
"license": "mit",
"hash": 456110105430331400,
"line_mean": 34.125984252,
"line_max": 100,
"alpha_frac": 0.630576104,
"autogenerated": false,
"ratio": 3.8690372940156115,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9997387883706026,
"avg_score": 0.00044510286191706757,
"num_lines": 127
} |
__all__ = ["File", "FileData"]
import json
from flask import current_app
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.types import TypeDecorator, Unicode
from zeus.utils.imports import import_string
from zeus.utils.sentry import span
class FileData(Mutable):
def __init__(self, data=None, default_storage=None, default_path=None):
if data is None:
data = {}
self.filename = data.get("filename")
self.exists = bool(data and self.filename)
self.storage = data.get(
"storage", default_storage or current_app.config["FILE_STORAGE"]
)
self.path = data.get("path", default_path)
self.size = data.get("size", None)
def __repr__(self):
if not self.exists:
return "<%s: not present>" % (type(self).__name__,)
return "<%s: filename=%s>" % (type(self).__name__, self.filename)
def __bool__(self):
return self.exists
def get_storage(self):
storage = import_string(self.storage["backend"])
options = self.storage.get("options", {})
if self.path is not None:
options["path"] = self.path
return storage(**options)
def url_for(self, expire=300):
if self.filename is None:
return
return self.get_storage().url_for(self.filename, expire=expire)
@span("file.save")
def save(self, fp, filename=None, allow_ref=True):
# this is effectively a copy
if isinstance(fp, FileData):
self.size = fp.size
if filename:
self.filename = filename
self.get_storage().save(self.filename, fp.get_file())
elif fp.filename and allow_ref:
# we avoid re-saving anything at this point
self.filename = fp.filename
self.path = fp.path
self.storage = fp.storage
elif self.filename:
self.get_storage().save(self.filename, fp.get_file())
else:
raise ValueError("Missing filename")
self.exists = True
else:
if filename:
self.filename = filename
self.exists = True
elif self.filename is None:
raise ValueError("Missing filename")
# Flask's FileStorage object might give us an accurate content_length,
# otherwise we need to seek the underlying file to obtain its size.
# For in-memory files this will fail, so we just assume None as default
if hasattr(fp, "content_length") and fp.content_length:
self.size = fp.content_length
else:
try:
pos = fp.tell()
fp.seek(0, 2)
self.size = fp.tell()
fp.seek(pos)
except (AttributeError, IOError):
self.size = None
self.get_storage().save(self.filename, fp)
self.changed()
@span("file.delete")
def delete(self):
self.get_storage().delete(self.filename)
self.exists = False
self.filename = None
self.size = None
self.changed()
def get_file(self):
if self.filename is None:
raise ValueError("Missing filename")
return self.get_storage().get_file(self.filename)
@classmethod
def coerce(cls, key, value):
return value
class File(TypeDecorator):
impl = Unicode
python_type = FileData
def __init__(self, path="", storage=None, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self.path = path
self.storage = {"storage": storage}
def process_bind_param(self, value, dialect):
if value:
if isinstance(value, FileData):
if not value.exists:
value = {}
else:
value = {
"filename": value.filename,
"storage": value.storage,
"path": value.path,
"size": value.size,
}
return str(json.dumps(value))
return u"{}"
def process_result_value(self, value, dialect):
if value:
return FileData(json.loads(value), self.storage, self.path)
return FileData({}, self.storage, self.path)
FileData.associate_with(File)
| {
"repo_name": "getsentry/zeus",
"path": "zeus/db/types/file.py",
"copies": "1",
"size": "4474",
"license": "apache-2.0",
"hash": 4910951525360080000,
"line_mean": 30.2867132867,
"line_max": 83,
"alpha_frac": 0.5435851587,
"autogenerated": false,
"ratio": 4.356377799415774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5399962958115774,
"avg_score": null,
"num_lines": null
} |
__all__ = ['file', 'ftp', 'imap', 'irc', 'smtp']
import ssl
from urllib.parse import urlparse
from urllib.parse import parse_qs as urlqueryparse
from urllib.parse import unquote as urldecode
class Method(object):
"""
self.url: parsed url for this method
selfremoteurl: parsed url for the other method
self.path: url's decoded path
self.query: url's query string
self.query_params: url's query string, parsed
self.connection: the connection as defined by the method
self.host: url's host name
self.port: url's port number
self.user: url's decoded user name, if any, else None
self.password: url's decoded password, if any, else None
self.ssl_verify_mode: ssl.CERT_NONE|OPTIONAL|REQUIRED, from ssl_verify query parameter
"""
def __init__(self, url, remoteurl, *args, **kwargs):
super(Method, self).__init__(*args, **kwargs)
self.url = urlparse(url)
self.remoteurl = urlparse(remoteurl)
self.path = self.url.path
self.query = self.url.query
self.query_params = urlqueryparse(self.query)
self.connection = None
self._split_host_port()
self._split_user_password()
self._ssl_verify()
self._doc = None
def _ssl_verify(self):
ssl_verify_txt = self.query_params.get('ssl_verify', ['REQUIRED'])[0]
self.ssl_verify_mode = getattr(
ssl, 'CERT_{}'.format(ssl_verify_txt), ssl.CERT_REQUIRED)
def _split_host_port(self):
self.host = self.url.hostname
self.port = self.url.port
def _split_user_password(self):
try:
self.user = urldecode(self.url.username)
except TypeError:
self.user = None
try:
self.password = urldecode(self.url.password)
except TypeError:
self.password = None
def get_ssl_context(self):
"""
Get a new SSLContext based on the URL's ssl_verify parameter.
Default context will check for a valid certificate (CERT_REQUIRED).
"""
sslctx = ssl.create_default_context()
if self.ssl_verify_mode == ssl.CERT_NONE:
sslctx.check_hostname = False
sslctx.verify_mode = self.ssl_verify_mode
return sslctx
def connect_early(self):
"""
This method should be implemented for Receivers that must
be connected before Senders.
"""
pass
@property
def doc(self):
return self._doc
def connect(self):
raise NotImplementedError()
def send(self, message):
"""
Send the token over the current method.
"""
raise NotImplementedError()
def recv(self, message):
"""
Receive the token over the current method.
Must raise a CheckDeliveryException (OkException, CriticalException...).
"""
raise NotImplementedError()
def clean(self):
"""
Post send/recv task to clean things...
"""
pass
def close(self):
"""
Oposit of connect()
"""
raise NotImplementedError()
def close_late(self):
"""
If you cannot close the sender before using the receiver,
use this method.
"""
pass
| {
"repo_name": "Leryan/check_delivery",
"path": "check_delivery/delivery/__init__.py",
"copies": "1",
"size": "3293",
"license": "mit",
"hash": 6584544229409920000,
"line_mean": 26.2148760331,
"line_max": 90,
"alpha_frac": 0.5982386881,
"autogenerated": false,
"ratio": 4.184243964421855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5282482652521855,
"avg_score": null,
"num_lines": null
} |
__all__ = ['FileManager']
import os
from kivy.core.image import Image as CoreImage
from kivy.cache import Cache
# static class for accessing game paths and os methods
class FileManager:
_tag = "[FileManager] "
_initialized = False
IMAGE_FORMAT = ".png"
# to be set diectly after creation
root = {"game": None, "img": None, "mod": None}
# texture cache
TEXTURE_CACHE = "textures"
_texture_none = None
@classmethod
def set_game_root(self, path):
'''sets the game folder paths considering root path'''
self.root["game"] = path
self.root["img"] = path + "/img/"
self.root["mod"] = path + "/mods/"
self.initialize()
@classmethod
def initialize(self):
'''flag FileManager as initialized'''
# make texture cache
Cache.register(self.TEXTURE_CACHE)
# flag FileManager as initialized
self._initialized = True
@classmethod
def check_if_initialized(self):
'''aborts method if manager is uninitialized'''
if not self._initialized:
return False
@classmethod
def game_root(self):
'''returns root doler path'''
self.check_if_initialized()
return self.root["game"]
@classmethod
def img_root(self):
'''returns the image resource folder path'''
self.check_if_initialized()
return self.root["img"]
@classmethod
def img_format(self):
'''returns accepted image file extension'''
return ".png"
@classmethod
def mod_root(self):
'''returns the mod folder path'''
self.check_if_initialized()
return self.root["mod"]
@classmethod
def exists(self, file_name, is_image=False, auto_ext=True):
'''returns whether a file exists'''
if is_image:
file_path = self.img_root() + file_name
if auto_ext:
file_path += ".png"
return os.path.isfile(file_path)
return os.path.isfile(file_name)
@classmethod
def read_file(self, file_path):
'''returns content of file under path file_path'''
print(self._tag + "READ " + file_path)
content = ""
file = open(file_path, "r")
content = file.read()
file.close()
return content
@classmethod
def get_texture(self, name, type="", use_cache=True):
'''returns a kivy Texture loaded from'''
# argument rel_path is relative this games image directory
texture = None
# when manager is uninitialized
if not self._initialized:
print(self._tag + "ERROR manager is not initialized")
return None
# when parameter rel_path is None -> return default texture
elif name == None or name == "none":
print(self._tag + "ERROR while loading texture; is none.")
return
# when manager is initialized AND rel_path =/= None OR "none"
elif use_cache == True:
return self.get_texture_from_cache(name)
elif use_cache == False:
return self.get_texture_from_file(name)
@classmethod
def get_texture_from_file(self, name):
'''returns texture class from file name'''
full_name = str(name) + ".png"
abs_path = self.img_root() + full_name
texture = None
try:
if abs_path:
print(self._tag + "trying to load " + str(abs_path) + " from file")
image = CoreImage(abs_path)
if image:
texture = image.texture
except:
print(self._tag + "ABORT; can't load texture")
return
return texture
@classmethod
def add_texture_to_cache(self, name):
'''ads a texture to texture cache'''
texture = self.get_texture_from_file(name)
Cache.append(self.TEXTURE_CACHE, name, texture)
@classmethod
def get_texture_from_cache(self, name):
'''returns a texture under name from cache'''
#try to load texture from cache
texture = Cache.get(self.TEXTURE_CACHE, name)
if texture != None:
return texture
if texture == None:
# load from file & add to cache
texture = self.get_texture_from_file(name)
self.add_texture_to_cache(name)
# return texture
return texture
| {
"repo_name": "herrschr/pocket-throne",
"path": "pocketthrone/managers/filemanager.py",
"copies": "2",
"size": "3738",
"license": "bsd-2-clause",
"hash": 1965128167719563500,
"line_mean": 25.5106382979,
"line_max": 71,
"alpha_frac": 0.682718031,
"autogenerated": false,
"ratio": 3.216867469879518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48995855008795186,
"avg_score": null,
"num_lines": null
} |
import os, re, sys
# Http2d Project:
# Error generation and check
#
# This script loads a error definition file, and generates .h and .c
# from it. It also checks whether all the errors are actually being
# used, look for undefined errors, and check that the number of
# parameters matches between the error definition and the source code.
#
SOURCE_DIRS = ['.']
#
# Error reading
#
class Http2dError:
def __init__ (self, **kwargs):
self.id = kwargs['id']
self.title = kwargs['title']
self.description = kwargs.get('desc', '').replace('\n',' ')
self.url_admin = kwargs.get('admin', '')
self.help = kwargs.get('help', [])
self.debug = kwargs.get('debug', '')
self.show_bt = kwargs.get('show_bt', True)
_errors = []
def e (error_id, title, **kwargs):
global _errors
# Check dup. errors
for err in _errors:
if error_id == err.id:
raise ValueError, "ERROR: Duplicated error %s" %(error_id)
# New error
kwargs['id'] = error_id
kwargs['title'] = title
error = Http2dError (**kwargs)
_errors.append (error)
def read_errors():
exec (open ("error_list.py", 'r').read())
def check_source_code (dirs):
# Defined errors are unseed
for def_e in _errors:
def_e.__seen_in_grep = False
# Grep the source code
errors_seen = {}
for d in dirs:
for f in os.listdir(d):
fullpath = os.path.join (d, f)
if not fullpath.endswith('.c') or \
not os.path.isfile(fullpath):
continue
# Check the source code
content = open (fullpath, 'r').read()
for e in re.findall (r'HTTP2D_ERROR_([\w_]+)[ ,)]', content):
errors_seen[e] = fullpath
# Mark used object
for def_e in _errors:
if e == def_e.id:
def_e.__seen_in_grep = True
# Undefined errors in the source code
error_found = False
for s in errors_seen.keys():
found = False
for e in _errors:
if s == e.id:
found = True
break
if not found:
print >> sys.stderr, "Undefined Error: HTTP2D_ERROR_%s, used in %s" % (s, errors_seen[s])
error_found = True
# Unused errors in the definition file
for def_e in _errors:
if not def_e.__seen_in_grep:
print >> sys.stderr, "Unused Error: HTTP2D_ERROR_%s" % (def_e.id)
### TMP ### error_found = True
return error_found
def check_parameters (dirs):
known_errors_params = {}
source_errors_params = {}
# Known
for error in _errors:
tmp = error.title + error.description + error.url_admin + error.debug
tmp = tmp.replace('%%', '')
known_errors_params [error.id] = tmp.count('%')
# Source
for d in dirs:
for f in os.listdir(d):
fullpath = os.path.join (d, f)
if not fullpath.endswith('.c') or \
not os.path.isfile(fullpath):
continue
# Extract the HTTP2D_ERROR_*
content = open (fullpath, 'r').read()
matches_log = re.findall (r'LOG_[\w_]+ ?' +\
r'\(HTTP2D_ERROR_([\w_]+)[ ,\n\t\\]*' +\
r'(.*?)\);', content, re.MULTILINE | re.DOTALL)
matches_errno = re.findall (r'LOG_ERRNO[_S ]*' +\
r'\(.+?,.+?,[ \n\t]*HTTP2D_ERROR_([\w_]+)[ ,\n\t\\]*' +\
r'(.*?)\);', content, re.MULTILINE | re.DOTALL)
matches = matches_errno + matches_log
for match in matches:
error = match[0]
params = match[1].strip()
# Remove internal sub-parameters (function parameters)
tmp = params
while True:
internal_params = re.findall(r'(\(.*?\))', tmp)
if not internal_params:
break
for param in internal_params:
tmp = tmp.replace(param, '')
params_num = len (filter (lambda x: len(x), tmp.split(',')))
source_errors_params[error] = params_num
# Compare both
error_found = False
for error in _errors:
source_num = source_errors_params.get(error.id)
known_num = known_errors_params.get(error.id)
if not source_num or not known_num:
print >> sys.stderr, "ERROR: Invalid parameter number: %s (source %s, definition %s)" % (error.id, source_num, known_num)
#### TMP ##### error_found = True
elif source_num != known_num:
print >> sys.stderr, "ERROR: Parameter number mismatch: %s (source %d, definition %d)" % (error.id, source_num, known_num)
error_found = True
return error_found
#
# File generation
#
HEADER = """/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */
/* All files in http2d are Copyright (C) 2012 Alvaro Lopez Ortega.
*
* Authors:
* * Alvaro Lopez Ortega <alvaro@alobbs.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* NOTE: File automatically generated (by error_list.py). */
"""
def generate_C_defines ():
txt = ''
max_len = max([len(e.id) for e in _errors])
for num in range(len(_errors)):
err = _errors[num]
pad = " " * (max_len - len(err.id))
txt += '#define HTTP2D_ERROR_%s %s %d\n' %(err.id.upper(), pad, num)
return txt
def generate_C_errors ():
txt = ''
txt += 'static const http2d_error_t __http2d_errors[] =\n'
txt += '{\n'
for num in range(len(_errors)):
err = _errors[num]
txt += ' { % 3d, "%s", ' %(num, err.title)
if err.description:
txt += '"%s", ' % (err.description)
else:
txt += 'NULL, '
if err.url_admin:
txt += '"%s", ' % (err.url_admin)
else:
txt += 'NULL, '
if err.debug:
txt += '"%s", ' % (err.debug)
else:
txt += 'NULL, '
txt += ('false', 'true')[err.show_bt]
txt += ' },\n'
txt += ' { -1, NULL, NULL, NULL, NULL, true }\n'
txt += '};\n'
return txt
def main():
# Check parameters
error = False
do_defines = '--defines' in sys.argv
do_errors = '--errors' in sys.argv
dont_test = '--skip-tests' in sys.argv
if len(sys.argv) >= 1:
file = sys.argv[-1]
if file.startswith ('-'):
error = True
if not do_defines and not do_errors:
error = True
else:
error = True
if error:
print "USAGE:"
print
print " * Create the definitions file:"
print " %s [--skip-tests] --defines output_file" %(sys.argv[0])
print
print " * Create the error list file:"
print " %s [--skip-tests] --errors output_file" %(sys.argv[0])
print
sys.exit(1)
# Perform
read_errors()
if not dont_test:
error = check_source_code (SOURCE_DIRS)
error |= check_parameters (SOURCE_DIRS)
if error:
sys.exit(1)
if do_defines:
txt = HEADER
txt += generate_C_defines()
open (file, 'w+').write(txt)
if do_errors:
txt = HEADER
txt += generate_C_errors()
open (file, 'w+').write(txt)
if __name__ == '__main__':
main()
| {
"repo_name": "http2d/SPDY_test",
"path": "src/errors.py",
"copies": "1",
"size": "10323",
"license": "bsd-2-clause",
"hash": -640135865516899600,
"line_mean": 31.3605015674,
"line_max": 134,
"alpha_frac": 0.5728954761,
"autogenerated": false,
"ratio": 3.861952861952862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4934848338052862,
"avg_score": null,
"num_lines": null
} |
__all__ = ["FileSpec"]
import os
import time
from panda3d.core import Filename, HashVal, VirtualFileSystem
class FileSpec:
""" This class represents a disk file whose hash and size
etc. were read from an xml file. This class provides methods to
verify whether the file on disk matches the version demanded by
the xml. """
def __init__(self):
self.actualFile = None
self.filename = None
self.size = 0
self.timestamp = 0
self.hash = None
def fromFile(self, packageDir, filename, pathname = None, st = None):
""" Reads the file information from the indicated file. If st
is supplied, it is the result of os.stat on the filename. """
vfs = VirtualFileSystem.getGlobalPtr()
filename = Filename(filename)
if pathname is None:
pathname = Filename(packageDir, filename)
self.filename = str(filename)
self.basename = filename.getBasename()
if st is None:
st = os.stat(pathname.toOsSpecific())
self.size = st.st_size
self.timestamp = int(st.st_mtime)
self.readHash(pathname)
def readHash(self, pathname):
""" Reads the hash only from the indicated pathname. """
hv = HashVal()
hv.hashFile(pathname)
self.hash = hv.asHex()
def loadXml(self, xelement):
""" Reads the file information from the indicated XML
element. """
self.filename = xelement.Attribute('filename')
self.basename = None
if self.filename:
self.basename = Filename(self.filename).getBasename()
size = xelement.Attribute('size')
try:
self.size = int(size)
except:
self.size = 0
timestamp = xelement.Attribute('timestamp')
try:
self.timestamp = int(timestamp)
except:
self.timestamp = 0
self.hash = xelement.Attribute('hash')
def storeXml(self, xelement):
""" Adds the file information to the indicated XML
element. """
if self.filename:
xelement.SetAttribute('filename', self.filename)
if self.size:
xelement.SetAttribute('size', str(self.size))
if self.timestamp:
xelement.SetAttribute('timestamp', str(int(self.timestamp)))
if self.hash:
xelement.SetAttribute('hash', self.hash)
def storeMiniXml(self, xelement):
""" Adds the just the "mini" file information--size and
hash--to the indicated XML element. """
if self.size:
xelement.SetAttribute('size', str(self.size))
if self.hash:
xelement.SetAttribute('hash', self.hash)
def quickVerify(self, packageDir = None, pathname = None,
notify = None, correctSelf = False):
""" Performs a quick test to ensure the file has not been
modified. This test is vulnerable to people maliciously
attempting to fool the program (by setting datestamps etc.).
if correctSelf is True, then any discrepency is corrected by
updating the appropriate fields internally, making the
assumption that the file on disk is the authoritative version.
Returns true if it is intact, false if it is incorrect. If
correctSelf is true, raises OSError if the self-update is
impossible (for instance, because the file does not exist)."""
if not pathname:
pathname = Filename(packageDir, self.filename)
try:
st = os.stat(pathname.toOsSpecific())
except OSError:
# If the file is missing, the file fails.
if notify:
notify.debug("file not found: %s" % (pathname))
if correctSelf:
raise
return False
if st.st_size != self.size:
# If the size is wrong, the file fails.
if notify:
notify.debug("size wrong: %s" % (pathname))
if correctSelf:
self.__correctHash(packageDir, pathname, st, notify)
return False
if int(st.st_mtime) == self.timestamp:
# If the size is right and the timestamp is right, the
# file passes.
if notify:
notify.debug("file ok: %s" % (pathname))
return True
if notify:
notify.debug("modification time wrong: %s" % (pathname))
# If the size is right but the timestamp is wrong, the file
# soft-fails. We follow this up with a hash check.
if not self.checkHash(packageDir, pathname, st):
# Hard fail, the hash is wrong.
if notify:
notify.debug("hash check wrong: %s" % (pathname))
notify.debug(" found %s, expected %s" % (self.actualFile.hash, self.hash))
if correctSelf:
self.__correctHash(packageDir, pathname, st, notify)
return False
if notify:
notify.debug("hash check ok: %s" % (pathname))
# The hash is OK after all. Change the file's timestamp back
# to what we expect it to be, so we can quick-verify it
# successfully next time.
if correctSelf:
# Or update our own timestamp.
self.__correctTimestamp(pathname, st, notify)
return False
else:
self.__updateTimestamp(pathname, st)
return True
def fullVerify(self, packageDir = None, pathname = None, notify = None):
""" Performs a more thorough test to ensure the file has not
been modified. This test is less vulnerable to malicious
attacks, since it reads and verifies the entire file.
Returns true if it is intact, false if it needs to be
redownloaded. """
if not pathname:
pathname = Filename(packageDir, self.filename)
try:
st = os.stat(pathname.toOsSpecific())
except OSError:
# If the file is missing, the file fails.
if notify:
notify.debug("file not found: %s" % (pathname))
return False
if st.st_size != self.size:
# If the size is wrong, the file fails;
if notify:
notify.debug("size wrong: %s" % (pathname))
return False
if not self.checkHash(packageDir, pathname, st):
# Hard fail, the hash is wrong.
if notify:
notify.debug("hash check wrong: %s" % (pathname))
notify.debug(" found %s, expected %s" % (self.actualFile.hash, self.hash))
return False
if notify:
notify.debug("hash check ok: %s" % (pathname))
# The hash is OK. If the timestamp is wrong, change it back
# to what we expect it to be, so we can quick-verify it
# successfully next time.
if int(st.st_mtime) != self.timestamp:
self.__updateTimestamp(pathname, st)
return True
def __updateTimestamp(self, pathname, st):
# On Windows, we have to change the file to read-write before
# we can successfully update its timestamp.
try:
os.chmod(pathname.toOsSpecific(), 0o755)
os.utime(pathname.toOsSpecific(), (st.st_atime, self.timestamp))
os.chmod(pathname.toOsSpecific(), 0o555)
except OSError:
pass
def __correctTimestamp(self, pathname, st, notify):
""" Corrects the internal timestamp to match the one on
disk. """
if notify:
notify.info("Correcting timestamp of %s to %d (%s)" % (
self.filename, st.st_mtime, time.asctime(time.localtime(st.st_mtime))))
self.timestamp = int(st.st_mtime)
def checkHash(self, packageDir, pathname, st):
""" Returns true if the file has the expected md5 hash, false
otherwise. As a side effect, stores a FileSpec corresponding
to the on-disk file in self.actualFile. """
fileSpec = FileSpec()
fileSpec.fromFile(packageDir, self.filename,
pathname = pathname, st = st)
self.actualFile = fileSpec
return (fileSpec.hash == self.hash)
def __correctHash(self, packageDir, pathname, st, notify):
""" Corrects the internal hash to match the one on disk. """
if not self.actualFile:
self.checkHash(packageDir, pathname, st)
if notify:
notify.info("Correcting hash %s to %s" % (
self.filename, self.actualFile.hash))
self.hash = self.actualFile.hash
self.size = self.actualFile.size
self.timestamp = self.actualFile.timestamp
| {
"repo_name": "chandler14362/panda3d",
"path": "direct/src/p3d/FileSpec.py",
"copies": "9",
"size": "8783",
"license": "bsd-3-clause",
"hash": 6287939664230332000,
"line_mean": 34.7032520325,
"line_max": 91,
"alpha_frac": 0.5852214505,
"autogenerated": false,
"ratio": 4.3523290386521305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437550489152131,
"avg_score": null,
"num_lines": null
} |
__all__ = ['FIND', 'FIND_WUSER', 'CREATE', 'NAME_EXISTS', 'EXISTS', 'FOLLOW', 'UNFOLLOW',
'FOLLOWING', 'FOLLOWERS', 'FIND_ALL_BY_USER', 'SEARCH_BEGINNING_WITH',
'FIND_ALL_BY_USERNAME', 'COUNT_ALL_BY_USER', 'COUNT_ALL_BY_USERNAME',
'SEARCH_BEGINNING_WITH_WUSER', 'SEARCH_BY_USER', 'build_find_all_query',
'COUNT_FOLLOWERS', 'build_count_all_query']
# followers_count, conversations_count, following (user is)
FIND = "SELECT id, name FROM interests WHERE id = %s"
FIND_WUSER = """
SELECT i.id, i.name, ins.conversations_count, ins.followers_count,
EXISTS(SELECT * FROM users_interests WHERE user_id = %s AND interest_id = %s) as logged_user_follows
FROM interests i
INNER JOIN interests_summary ins ON ins.interest_id = i.id
WHERE id = %s
"""
CREATE = "INSERT INTO interests(name) VALUES(%s) RETURNING id"
NAME_EXISTS = "SELECT id FROM interests WHERE name = %s"
EXISTS = "SELECT id FROM interests WHERE id = %s"
FOLLOW = "INSERT INTO users_interests(user_id, interest_id) VALUES(%s, %s)"
UNFOLLOW = "DELETE FROM users_interests WHERE user_id = %s AND interest_id = %s"
FOLLOWING = "SELECT user_id FROM users_interests WHERE user_id = %s AND interest_id = %s"
FOLLOWERS = """
SELECT u.id, u.username as name, u.avatar_url
FROM users_interests ui
INNER JOIN users u ON u.id = ui.user_id
WHERE ui.interest_id = %s
ORDER BY u.username
LIMIT %s OFFSET %s
"""
COUNT_FOLLOWERS = """
SELECT followers_count as count
FROM interests_summary
WHERE interest_id = %s
"""
FIND_ALL_BY_USER = """
SELECT i.id, i.name FROM interests i
INNER JOIN users_interests ui ON ui.interest_id = i.id
WHERE ui.user_id = %s
ORDER BY i.name
LIMIT %s OFFSET %s
"""
SEARCH_BY_USER = """
SELECT i.id, i.name
FROM interests i
INNER JOIN users_interests ui ON ui.interest_id = i.id
WHERE ui.user_id = %s AND LOWER(i.name) LIKE LOWER(%s)
ORDER BY i.name
LIMIT %s
"""
FIND_ALL_BY_USERNAME = """
SELECT i.id, i.name FROM interests i
INNER JOIN users_interests ui ON ui.interest_id = i.id
INNER JOIN users u ON u.id = ui.user_id
WHERE u.username = %s
ORDER BY i.name
LIMIT %s OFFSET %s
"""
COUNT_ALL_BY_USER = """
SELECT COUNT(*) AS count FROM interests i
INNER JOIN users_interests ui ON ui.interest_id = i.id
WHERE ui.user_id = %s
"""
COUNT_ALL_BY_USERNAME = """
SELECT COUNT(*) AS count FROM interests i
INNER JOIN users_interests ui ON ui.interest_id = i.id
INNER JOIN users u ON u.id = ui.user_id
WHERE u.username = %s
"""
SEARCH_BEGINNING_WITH = """
SELECT id, name
FROM interests
WHERE LOWER(name) LIKE LOWER(%s)
ORDER BY name
LIMIT %s
"""
SEARCH_BEGINNING_WITH_WUSER = """
SELECT i.id, i.name,
EXISTS(SELECT * FROM users_interests ui WHERE ui.user_id = %s AND ui.interest_id = i.id) as logged_user_follows
FROM interests i
WHERE LOWER(i.name) LIKE LOWER(%s)
ORDER BY i.name
LIMIT %s
"""
FIND_ALL_ORDER_BY = ['recent', 'popular', 'distance']
def build_find_all_query(order_by='recent', location=None, radius=None, offset=0, limit=20, user_id=None):
if order_by not in FIND_ALL_ORDER_BY:
order_by = 'recent'
if order_by is 'distance' and not location:
return []
args = []
sql = ["SELECT * FROM (SELECT DISTINCT ON (i.id) i.id, i.name "]
if order_by == 'recent':
sql.append(", c.created_at ")
elif order_by == 'popular':
sql.append(", ins.conversations_count ")
elif order_by == 'distance':
sql.append(", ST_Distance(c.location, transform(PointFromText('POINT(%s %s)', 4269), 32661)) as distance ")
args.extend([location[1], location[0]])
if user_id:
sql.append(", EXISTS(SELECT * FROM users_interests ui WHERE ui.user_id = %s AND ui.interest_id = i.id) as logged_user_follows ")
args.append(user_id)
sql.append("FROM interests i LEFT JOIN conversations c ON i.id = c.interest_id ")
if order_by == 'popular':
sql.append("LEFT JOIN interests_summary ins ON ins.interest_id = i.id ")
if location and radius:
sql.append("WHERE ST_DWithin(c.location, transform(PointFromText('POINT(%s %s)', 4269), 32661), %s) ")
args.extend([location[1], location[0], radius])
sql.append(") as interests ")
if order_by == 'recent':
sql.append("ORDER BY created_at DESC ")
elif order_by == 'popular':
sql.append("ORDER BY conversations_count DESC ")
elif order_by == 'distance' and location:
sql.append("ORDER BY distance ")
sql.append("LIMIT %s OFFSET %s")
args.extend([limit, offset])
return ''.join(sql), args
def build_count_all_query(location=None, radius=None,):
args = []
sql = ["""
SELECT COUNT(*) as count FROM (
SELECT DISTINCT ON (i.id) i.id
FROM interests i
LEFT JOIN conversations c ON i.id = c.interest_id
"""]
if location and radius:
sql.append("WHERE ST_DWithin(c.location, transform(PointFromText('POINT(%s %s)', 4269), 32661), %s) ")
args.extend([location[1], location[0], radius])
sql.append(") as interests ")
return ''.join(sql), args
| {
"repo_name": "mayconbordin/boardhood",
"path": "server_app/api/boardhood/sql/interests.py",
"copies": "1",
"size": "5475",
"license": "mit",
"hash": -7424711039920535000,
"line_mean": 32.3841463415,
"line_max": 136,
"alpha_frac": 0.6042009132,
"autogenerated": false,
"ratio": 3.324225865209472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9422962399143517,
"avg_score": 0.0010928758531909646,
"num_lines": 164
} |
__all__ = ['findHTMLMeta', 'MetaNotFound']
from html.parser import HTMLParser, HTMLParseError
import html.entities
import re
from openid.yadis.constants import YADIS_HEADER_NAME
# Size of the chunks to search at a time (also the amount that gets
# read at a time)
CHUNK_SIZE = 1024 * 16 # 16 KB
class ParseDone(Exception):
"""Exception to hold the URI that was located when the parse is
finished. If the parse finishes without finding the URI, set it to
None."""
class MetaNotFound(Exception):
"""Exception to hold the content of the page if we did not find
the appropriate <meta> tag"""
re_flags = re.IGNORECASE | re.UNICODE | re.VERBOSE
ent_pat = r'''
&
(?: \#x (?P<hex> [a-f0-9]+ )
| \# (?P<dec> \d+ )
| (?P<word> \w+ )
)
;'''
ent_re = re.compile(ent_pat, re_flags)
def substituteMO(mo):
if mo.lastgroup == 'hex':
codepoint = int(mo.group('hex'), 16)
elif mo.lastgroup == 'dec':
codepoint = int(mo.group('dec'))
else:
assert mo.lastgroup == 'word'
codepoint = html.entities.name2codepoint.get(mo.group('word'))
if codepoint is None:
return mo.group()
else:
return chr(codepoint)
def substituteEntities(s):
return ent_re.sub(substituteMO, s)
class YadisHTMLParser(HTMLParser):
"""Parser that finds a meta http-equiv tag in the head of a html
document.
When feeding in data, if the tag is matched or it will never be
found, the parser will raise ParseDone with the uri as the first
attribute.
Parsing state diagram
=====================
Any unlisted input does not affect the state::
1, 2, 5 8
+--------------------------+ +-+
| | | |
4 | 3 1, 2, 5, 7 v | v
TOP -> HTML -> HEAD ----------> TERMINATED
| | ^ | ^ ^
| | 3 | | | |
| +------------+ +-> FOUND ------+ |
| 6 8 |
| 1, 2 |
+------------------------------------+
1. any of </body>, </html>, </head> -> TERMINATE
2. <body> -> TERMINATE
3. <head> -> HEAD
4. <html> -> HTML
5. <html> -> TERMINATE
6. <meta http-equiv='X-XRDS-Location'> -> FOUND
7. <head> -> TERMINATE
8. Any input -> TERMINATE
"""
TOP = 0
HTML = 1
HEAD = 2
FOUND = 3
TERMINATED = 4
def __init__(self):
super(YadisHTMLParser, self).__init__(strict=False)
self.phase = self.TOP
def _terminate(self):
self.phase = self.TERMINATED
raise ParseDone(None)
def handle_endtag(self, tag):
# If we ever see an end of head, body, or html, bail out right away.
# [1]
if tag in ['head', 'body', 'html']:
self._terminate()
def handle_starttag(self, tag, attrs):
# if we ever see a start body tag, bail out right away, since
# we want to prevent the meta tag from appearing in the body
# [2]
if tag == 'body':
self._terminate()
if self.phase == self.TOP:
# At the top level, allow a html tag or a head tag to move
# to the head or html phase
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [4]
self.phase = self.HTML
elif self.phase == self.HTML:
# if we are in the html tag, allow a head tag to move to
# the HEAD phase. If we get another html tag, then bail
# out
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [5]
self._terminate()
elif self.phase == self.HEAD:
# If we are in the head phase, look for the appropriate
# meta tag. If we get a head or body tag, bail out.
if tag == 'meta':
attrs_d = dict(attrs)
http_equiv = attrs_d.get('http-equiv', '').lower()
if http_equiv == YADIS_HEADER_NAME.lower():
raw_attr = attrs_d.get('content')
yadis_loc = substituteEntities(raw_attr)
# [6]
self.phase = self.FOUND
raise ParseDone(yadis_loc)
elif tag in ('head', 'html'):
# [5], [7]
self._terminate()
def feed(self, chars):
# [8]
if self.phase in (self.TERMINATED, self.FOUND):
self._terminate()
return super(YadisHTMLParser, self).feed(chars)
def findHTMLMeta(stream):
"""Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter.
"""
parser = YadisHTMLParser()
chunks = []
while 1:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
# End of file
break
chunks.append(chunk)
try:
parser.feed(chunk)
except HTMLParseError as why:
# HTML parse error, so bail
chunks.append(stream.read())
break
except ParseDone as why:
uri = why.args[0]
if uri is None:
# Parse finished, but we may need the rest of the file
chunks.append(stream.read())
break
else:
return uri
content = ''.join(chunks)
raise MetaNotFound(content)
| {
"repo_name": "vivianli32/TravelConnect",
"path": "flask/lib/python3.4/site-packages/openid/yadis/parsehtml.py",
"copies": "4",
"size": "5903",
"license": "mit",
"hash": 3255330285023337000,
"line_mean": 27.9362745098,
"line_max": 76,
"alpha_frac": 0.506352702,
"autogenerated": false,
"ratio": 3.9885135135135137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6494866215513514,
"avg_score": null,
"num_lines": null
} |
__all__ = ['findHTMLMeta', 'MetaNotFound']
from HTMLParser import HTMLParser, HTMLParseError
import htmlentitydefs
import re
from openid.yadis.constants import YADIS_HEADER_NAME
# Size of the chunks to search at a time (also the amount that gets
# read at a time)
CHUNK_SIZE = 1024 * 16 # 16 KB
class ParseDone(Exception):
"""Exception to hold the URI that was located when the parse is
finished. If the parse finishes without finding the URI, set it to
None."""
class MetaNotFound(Exception):
"""Exception to hold the content of the page if we did not find
the appropriate <meta> tag"""
re_flags = re.IGNORECASE | re.UNICODE | re.VERBOSE
ent_pat = r'''
&
(?: \#x (?P<hex> [a-f0-9]+ )
| \# (?P<dec> \d+ )
| (?P<word> \w+ )
)
;'''
ent_re = re.compile(ent_pat, re_flags)
def substituteMO(mo):
if mo.lastgroup == 'hex':
codepoint = int(mo.group('hex'), 16)
elif mo.lastgroup == 'dec':
codepoint = int(mo.group('dec'))
else:
assert mo.lastgroup == 'word'
codepoint = htmlentitydefs.name2codepoint.get(mo.group('word'))
if codepoint is None:
return mo.group()
else:
return unichr(codepoint)
def substituteEntities(s):
return ent_re.sub(substituteMO, s)
class YadisHTMLParser(HTMLParser):
"""Parser that finds a meta http-equiv tag in the head of a html
document.
When feeding in data, if the tag is matched or it will never be
found, the parser will raise ParseDone with the uri as the first
attribute.
Parsing state diagram
=====================
Any unlisted input does not affect the state::
1, 2, 5 8
+--------------------------+ +-+
| | | |
4 | 3 1, 2, 5, 7 v | v
TOP -> HTML -> HEAD ----------> TERMINATED
| | ^ | ^ ^
| | 3 | | | |
| +------------+ +-> FOUND ------+ |
| 6 8 |
| 1, 2 |
+------------------------------------+
1. any of </body>, </html>, </head> -> TERMINATE
2. <body> -> TERMINATE
3. <head> -> HEAD
4. <html> -> HTML
5. <html> -> TERMINATE
6. <meta http-equiv='X-XRDS-Location'> -> FOUND
7. <head> -> TERMINATE
8. Any input -> TERMINATE
"""
TOP = 0
HTML = 1
HEAD = 2
FOUND = 3
TERMINATED = 4
def __init__(self):
HTMLParser.__init__(self)
self.phase = self.TOP
def _terminate(self):
self.phase = self.TERMINATED
raise ParseDone(None)
def handle_endtag(self, tag):
# If we ever see an end of head, body, or html, bail out right away.
# [1]
if tag in ['head', 'body', 'html']:
self._terminate()
def handle_starttag(self, tag, attrs):
# if we ever see a start body tag, bail out right away, since
# we want to prevent the meta tag from appearing in the body
# [2]
if tag=='body':
self._terminate()
if self.phase == self.TOP:
# At the top level, allow a html tag or a head tag to move
# to the head or html phase
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [4]
self.phase = self.HTML
elif self.phase == self.HTML:
# if we are in the html tag, allow a head tag to move to
# the HEAD phase. If we get another html tag, then bail
# out
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [5]
self._terminate()
elif self.phase == self.HEAD:
# If we are in the head phase, look for the appropriate
# meta tag. If we get a head or body tag, bail out.
if tag == 'meta':
attrs_d = dict(attrs)
http_equiv = attrs_d.get('http-equiv', '').lower()
if http_equiv == YADIS_HEADER_NAME.lower():
raw_attr = attrs_d.get('content')
yadis_loc = substituteEntities(raw_attr)
# [6]
self.phase = self.FOUND
raise ParseDone(yadis_loc)
elif tag in ['head', 'html']:
# [5], [7]
self._terminate()
def feed(self, chars):
# [8]
if self.phase in [self.TERMINATED, self.FOUND]:
self._terminate()
return HTMLParser.feed(self, chars)
def findHTMLMeta(stream):
"""Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter.
"""
parser = YadisHTMLParser()
chunks = []
while 1:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
# End of file
break
chunks.append(chunk)
try:
parser.feed(chunk)
except HTMLParseError, why:
# HTML parse error, so bail
chunks.append(stream.read())
break
except ParseDone, why:
uri = why[0]
if uri is None:
# Parse finished, but we may need the rest of the file
chunks.append(stream.read())
break
else:
return uri
content = ''.join(chunks)
raise MetaNotFound(content)
| {
"repo_name": "ccellis/WHACK2016",
"path": "flask/lib/python2.7/site-packages/openid/yadis/parsehtml.py",
"copies": "167",
"size": "5850",
"license": "bsd-3-clause",
"hash": -4376288822586797600,
"line_mean": 28.6954314721,
"line_max": 76,
"alpha_frac": 0.5054700855,
"autogenerated": false,
"ratio": 3.9959016393442623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['findHTMLMeta', 'MetaNotFound']
from html.parser import HTMLParser
import html.entities
import re
import sys
from openid.yadis.constants import YADIS_HEADER_NAME
# Size of the chunks to search at a time (also the amount that gets
# read at a time)
CHUNK_SIZE = 1024 * 16 # 16 KB
class ParseDone(Exception):
"""Exception to hold the URI that was located when the parse is
finished. If the parse finishes without finding the URI, set it to
None."""
class MetaNotFound(Exception):
"""Exception to hold the content of the page if we did not find
the appropriate <meta> tag"""
re_flags = re.IGNORECASE | re.UNICODE | re.VERBOSE
ent_pat = r'''
&
(?: \#x (?P<hex> [a-f0-9]+ )
| \# (?P<dec> \d+ )
| (?P<word> \w+ )
)
;'''
ent_re = re.compile(ent_pat, re_flags)
def substituteMO(mo):
if mo.lastgroup == 'hex':
codepoint = int(mo.group('hex'), 16)
elif mo.lastgroup == 'dec':
codepoint = int(mo.group('dec'))
else:
assert mo.lastgroup == 'word'
codepoint = html.entities.name2codepoint.get(mo.group('word'))
if codepoint is None:
return mo.group()
else:
return chr(codepoint)
def substituteEntities(s):
return ent_re.sub(substituteMO, s)
class YadisHTMLParser(HTMLParser):
"""Parser that finds a meta http-equiv tag in the head of a html
document.
When feeding in data, if the tag is matched or it will never be
found, the parser will raise ParseDone with the uri as the first
attribute.
Parsing state diagram
=====================
Any unlisted input does not affect the state::
1, 2, 5 8
+--------------------------+ +-+
| | | |
4 | 3 1, 2, 5, 7 v | v
TOP -> HTML -> HEAD ----------> TERMINATED
| | ^ | ^ ^
| | 3 | | | |
| +------------+ +-> FOUND ------+ |
| 6 8 |
| 1, 2 |
+------------------------------------+
1. any of </body>, </html>, </head> -> TERMINATE
2. <body> -> TERMINATE
3. <head> -> HEAD
4. <html> -> HTML
5. <html> -> TERMINATE
6. <meta http-equiv='X-XRDS-Location'> -> FOUND
7. <head> -> TERMINATE
8. Any input -> TERMINATE
"""
TOP = 0
HTML = 1
HEAD = 2
FOUND = 3
TERMINATED = 4
def __init__(self):
if (sys.version_info.minor <= 2):
# Python 3.2 and below actually require the `strict` argument
# to `html.parser.HTMLParser` -- otherwise it's deprecated and
# we don't want to pass it
super(YadisHTMLParser, self).__init__(strict=False)
else:
super(YadisHTMLParser, self).__init__()
self.phase = self.TOP
def _terminate(self):
self.phase = self.TERMINATED
raise ParseDone(None)
def handle_endtag(self, tag):
# If we ever see an end of head, body, or html, bail out right away.
# [1]
if tag in ['head', 'body', 'html']:
self._terminate()
def handle_starttag(self, tag, attrs):
# if we ever see a start body tag, bail out right away, since
# we want to prevent the meta tag from appearing in the body
# [2]
if tag == 'body':
self._terminate()
if self.phase == self.TOP:
# At the top level, allow a html tag or a head tag to move
# to the head or html phase
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [4]
self.phase = self.HTML
elif self.phase == self.HTML:
# if we are in the html tag, allow a head tag to move to
# the HEAD phase. If we get another html tag, then bail
# out
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [5]
self._terminate()
elif self.phase == self.HEAD:
# If we are in the head phase, look for the appropriate
# meta tag. If we get a head or body tag, bail out.
if tag == 'meta':
attrs_d = dict(attrs)
http_equiv = attrs_d.get('http-equiv', '').lower()
if http_equiv == YADIS_HEADER_NAME.lower():
raw_attr = attrs_d.get('content')
yadis_loc = substituteEntities(raw_attr)
# [6]
self.phase = self.FOUND
raise ParseDone(yadis_loc)
elif tag in ('head', 'html'):
# [5], [7]
self._terminate()
def feed(self, chars):
# [8]
if self.phase in (self.TERMINATED, self.FOUND):
self._terminate()
return super(YadisHTMLParser, self).feed(chars)
def findHTMLMeta(stream):
"""Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter.
"""
parser = YadisHTMLParser()
chunks = []
while 1:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
# End of file
break
chunks.append(chunk)
try:
parser.feed(chunk)
except ParseDone as why:
uri = why.args[0]
if uri is None:
# Parse finished, but we may need the rest of the file
chunks.append(stream.read())
break
else:
return uri
content = ''.join(chunks)
raise MetaNotFound(content)
| {
"repo_name": "IKholopov/HackUPC2017",
"path": "hackupc/env/lib/python3.5/site-packages/openid/yadis/parsehtml.py",
"copies": "13",
"size": "6061",
"license": "apache-2.0",
"hash": -7996668829391029000,
"line_mean": 28.2801932367,
"line_max": 76,
"alpha_frac": 0.5080019799,
"autogenerated": false,
"ratio": 3.984878369493754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['findLagrangianVolume']
import numpy as np
from readGadgetSnapshot import readGadgetSnapshot
from findSpheresInSnapshots import getParticlesWithinSphere
from mvee import mvee
def findLagrangianVolume(c, r, snapshot_prefix, ic_prefix, snapshot_edges=None,
id_int64=None, rec_only=False):
ids_all = getParticlesWithinSphere(c, r, snapshot_prefix, snapshot_edges, \
output_dtype=np.dtype([('id', np.uint64)]))['id']
id_min = ids_all.min()
id_max = ids_all.max()
header = readGadgetSnapshot(ic_prefix+'.0')
ic_subregion_count = header.num_files
L = header.BoxSize
#for Gadget, particle id starts at 1
current_ic_id_start = 1
current_ic_id_end = 1
total_count = 0
for x in xrange(ic_subregion_count):
ic_snapshot_file = '%s.%d'%(ic_prefix, x)
header = readGadgetSnapshot(ic_snapshot_file)
current_ic_id_start = current_ic_id_end
current_ic_id_end += sum(header.npart)
if(id_max < current_ic_id_start or id_min >= current_ic_id_end):
continue
find = ids_all[(ids_all >= current_ic_id_start) & \
(ids_all < current_ic_id_end)]
ic_ids = np.arange(current_ic_id_start, current_ic_id_end, \
dtype=np.uint64)
find = np.searchsorted(ic_ids, ids_all)
find[find>=len(ic_ids)] = -1
find = find[ic_ids[find]==ids_all]
if(len(find)==0): continue
header, ic_pos = readGadgetSnapshot(ic_snapshot_file, read_pos=True)
pos_selected = ic_pos[find]
if(total_count == 0):
pos_all = np.zeros((len(pos_selected), 3), np.float32)
else:
pos_all.resize((total_count+len(pos_selected), 3))
pos_all[total_count:] = pos_selected
total_count += len(pos_selected)
if total_count != len(ids_all):
raise ValueError("Something went wrong!")
for pos in pos_all.T:
p = np.sort(pos)
gaps = np.ediff1d(p)
j = np.argmax(gaps)
max_gap = gaps[j]
gap_start = p[j]
pos_range = p[-1] - p[0]
if L - max_gap < pos_range:
pos[pos <= gap_start] += L
pos_min = pos_all.min(axis=0)
pos_max = pos_all.max(axis=0)
if rec_only: return pos_min, pos_max-pos_min
A, c = mvee(pos_all)
return pos_min, pos_max-pos_min, c, A
| {
"repo_name": "manodeep/yymao-helpers",
"path": "helpers/findLagrangianVolume.py",
"copies": "1",
"size": "2386",
"license": "mit",
"hash": -995040306558588800,
"line_mean": 34.6119402985,
"line_max": 79,
"alpha_frac": 0.5884325231,
"autogenerated": false,
"ratio": 3.0629011553273426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41513336784273425,
"avg_score": null,
"num_lines": null
} |
__all__ = ["fit_bspline_displacement_field"]
import numpy as np
from ..core import ants_image as iio
from .. import core
from .. import utils
def fit_bspline_displacement_field(displacement_field=None,
displacement_weight_image=None,
displacement_origins=None,
displacements=None,
displacement_weights=None,
origin=None,
spacing=None,
size=None,
direction=None,
number_of_fitting_levels=4,
mesh_size=1,
spline_order=3,
enforce_stationary_boundary=True,
estimate_inverse=False):
"""
Fit a b-spline object to a dense displacement field image and/or a set of points
with associated displacements and smooths them using B-splines. The inverse
can also be estimated.. This is basically a wrapper for the ITK filter
https://itk.org/Doxygen/html/classitk_1_1DisplacementFieldToBSplineImageFilter.html}
which, in turn is a wrapper for the ITK filter used for the function
fit_bspline_object_to_scattered_data.
ANTsR function: `fitBsplineToDisplacementField`
Arguments
---------
displacement_field : ANTs image
Input displacement field. Either this and/or the points must be specified.
displacement_weight_image : ANTs image
Input image defining weighting of the voxelwise displacements in the displacement_field. I
If None, defaults to identity weighting for each displacement. Default = None.
displacement_origins : 2-D numpy array
Matrix (number_of_points x dimension) defining the origins of the input
displacement points. Default = None.
displacements : 2-D numpy array
Matrix (number_of_points x dimension) defining the displacements of the input
displacement points. Default = None.
displacement_weights : 1-D numpy array
Array defining the individual weighting of the corresponding scattered data value.
Default = None meaning all values are weighted the same.
origin : n-D tuple
Defines the physical origin of the B-spline object.
spacing : n-D tuple
Defines the physical spacing of the B-spline object.
size : n-D tuple
Defines the size (length) of the B-spline object. Note that the length of the
B-spline object in dimension d is defined as
spacing[d] * size[d]-1.
direction : 2-D numpy array
Booleans defining whether or not the corresponding parametric dimension is
closed (e.g., closed loop). Default = None.
number_of_fitting_levels : integer
Specifies the number of fitting levels.
mesh_size : n-D tuple
Defines the mesh size at the initial fitting level.
spline_order : integer
Spline order of the B-spline object. Default = 3.
enforce_stationary_boundary : boolean
Ensure no displacements on the image boundary. Default = True.
estimate_inverse : boolean
Estimate the inverse displacement field. Default = False.
Returns
-------
Returns an ANTsImage.
Example
-------
>>> # Perform 2-D fitting
>>>
>>> import ants, numpy
>>>
>>> points = numpy.array([[-50, -50]])
>>> deltas = numpy.array([[10, 10]])
>>>
>>> bspline_field = ants.fit_bspline_displacement_field(
>>> displacement_origins=points, displacements=deltas,
>>> origin=[0.0], spacing=[spacing], size=[100, 100],
>>> direction=numpy.array([[-1, 0], [0, -1]]),
>>> number_of_fitting_levels=4, mesh_size=(1, 1))
"""
if displacement_field is None and (displacement_origins is None or displacements is None):
raise ValueError("Missing input. Either a displacement field or input point set (origins + displacements) needs to be specified.")
if displacement_field is None:
if origin is None or spacing is None or size is None or direction is None:
raise ValueError("If the displacement field is not specified, one must fully specify the input physical domain.")
if displacement_field is not None and displacement_weight_image is None:
displacement_weight_image = core.make_image(displacement_field.shape, voxval=1,
spacing=displacement_field.spacing, origin=displacement_field.origin,
direction=displacement_field.direction, has_components=False, pixeltype='float')
dimensionality = None
if displacement_field is not None:
dimensionality = displacement_field.dimension
else:
dimensionality = displacement_origins.shape[1]
if displacements.shape[1] != dimensionality:
raise ValueError("Dimensionality between origins and displacements does not match.")
if displacement_origins is not None:
if displacement_weights is not None and (len(displacement_weights) != displacement_origins.shape[0]):
raise ValueError("Length of displacement weights must match the number of displacement points.")
else:
displacement_weights = np.ones(displacement_origins.shape[0])
if isinstance(mesh_size, int) == False and len(mesh_size) != dimensionality:
raise ValueError("Incorrect specification for mesh_size.")
if origin is not None and len(origin) != dimensionality:
raise ValueError("Origin is not of length dimensionality.")
if spacing is not None and len(spacing) != dimensionality:
raise ValueError("Spacing is not of length dimensionality.")
if size is not None and len(size) != dimensionality:
raise ValueError("Size is not of length dimensionality.")
if direction is not None and (direction.shape[0] != dimensionality and direction.shape[1] != dimensionality):
raise ValueError("Direction is not of shape dimensionality x dimensionality.")
# It would seem that pybind11 doesn't really play nicely when the
# arguments are 'None'
if origin is None:
origin = np.empty(0)
if spacing is None:
spacing = np.empty(0)
if size is None:
size = np.empty(0)
if direction is None:
direction = np.empty((0, 0))
if displacement_origins is None:
displacement_origins = np.empty((0, 0))
if displacements is None:
displacements = np.empty((0, 0))
number_of_control_points = mesh_size + spline_order
if isinstance(number_of_control_points, int) == True:
number_of_control_points = np.repeat(number_of_control_points, dimensionality)
bspline_field = None
if displacement_field is not None:
libfn = utils.get_lib_fn("fitBsplineDisplacementFieldD%i" % (dimensionality))
bspline_field = libfn(displacement_field.pointer, displacement_weight_image.pointer,
displacement_origins, displacements, displacement_weights,
origin, spacing, size, direction,
number_of_fitting_levels, number_of_control_points, spline_order,
enforce_stationary_boundary, estimate_inverse)
elif displacement_field is None and displacements is not None:
libfn = utils.get_lib_fn("fitBsplineDisplacementFieldToScatteredDataD%i" % (dimensionality))
bspline_field = libfn(displacement_origins, displacements, displacement_weights,
origin, spacing, size, direction,
number_of_fitting_levels, number_of_control_points, spline_order,
enforce_stationary_boundary, estimate_inverse)
bspline_displacement_field = iio.ANTsImage(pixeltype='float',
dimension=dimensionality, components=dimensionality,
pointer=bspline_field).clone('float')
return bspline_displacement_field
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/fit_bspline_displacement_field.py",
"copies": "1",
"size": "8108",
"license": "apache-2.0",
"hash": 4744438135431193000,
"line_mean": 40.5794871795,
"line_max": 139,
"alpha_frac": 0.6428219043,
"autogenerated": false,
"ratio": 4.3150612027674295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5457883107067429,
"avg_score": null,
"num_lines": null
} |
__all__ = ["fit_bspline_object_to_scattered_data"]
import numpy as np
from ..core import ants_image as iio
from .. import utils
def fit_bspline_object_to_scattered_data(scattered_data,
parametric_data,
parametric_domain_origin,
parametric_domain_spacing,
parametric_domain_size,
is_parametric_dimension_closed=None,
data_weights=None,
number_of_fitting_levels=4,
mesh_size=1,
spline_order=3):
"""
Fit a b-spline object to scattered data. This is basically a wrapper
for the ITK filter
https://itk.org/Doxygen/html/classitk_1_1BSplineScatteredDataPointSetToImageFilter.html
This filter is flexible in the possible objects that can be approximated.
Possibilities include:
* 1/2/3/4-D curve
* 2-D surface in 3-D space (not available/templated)
* 2/3/4-D scalar field
* 2/3-D displacement field
In order to understand the input parameters, it is important to understand
the difference between the parametric and data dimensions. A curve as one
parametric dimension but the data dimension can be 1-D, 2-D, 3-D, or 4-D.
In contrast, a 3-D displacement field has a parametric and data dimension
of 3. The scattered data is what's approximated by the B-spline object and
the parametric point is the location of scattered data within the domain of
the B-spline object.
ANTsR function: `fitBsplineObjectToScatteredData`
Arguments
---------
scattered_data : 2-D numpy array
Defines the scattered data input to be approximated. Data is organized
by row --> data v, column ---> data dimension.
parametric_data : 2-D numpy array
Defines the parametric location of the scattered data. Data is organized
by row --> parametric point, column --> parametric dimension. Note that
each row corresponds to the same row in the scatteredData.
data_weights : 1-D numpy array
Defines the individual weighting of the corresponding scattered data value.
Default = None meaning all values are weighted the same.
parametric_domain_origin : n-D tuple
Defines the parametric origin of the B-spline object.
parametric_domain_spacing : n-D tuple
Defines the parametric spacing of the B-spline object. Defines the sampling
rate in the parametric domain.
parametric_domain_size : n-D tuple
Defines the size (length) of the B-spline object. Note that the length of the
B-spline object in dimension d is defined as
parametric_domain_spacing[d] * parametric_domain_size[d]-1.
is_parametric_dimension_closed : n-D tuple
Booleans defining whether or not the corresponding parametric dimension is
closed (e.g., closed loop). Default = None.
number_of_fitting_levels : integer
Specifies the number of fitting levels.
mesh_size : n-D tuple
Defines the mesh size at the initial fitting level.
spline_order : integer
Spline order of the B-spline object. Default = 3.
Returns
-------
returns numpy array for B-spline curve (parametric dimension = 1). Otherwise,
returns an ANTsImage.
Example
-------
>>> # Perform 2-D curve example
>>>
>>> import ants, numpy
>>> import matplotlib.pyplot as plt
>>>
>>> x = numpy.linspace(-4, 4, num=100)
>>> y = numpy.exp(-numpy.multiply(x, x)) + numpy.random.uniform(-0.1, 0.1, len(x))
>>> u = numpy.linspace(0, 1.0, num=len(x))
>>> scattered_data = numpy.column_stack((x, y))
>>> parametric_data = numpy.expand_dims(u, axis=-1)
>>> spacing = 1/(len(x)-1) * 1.0;
>>>
>>> bspline_curve = ants.fit_bspline_object_to_scattered_data(scattered_data, parametric_data,
>>> parametric_domain_origin=[0.0], parametric_domain_spacing=[spacing],
>>> parametric_domain_size=[len(x)], is_parametric_dimension_closed=None,
>>> number_of_fitting_levels=5, mesh_size=1)
>>>
>>> plt.plot(x, y, label='Noisy points')
>>> plt.plot(bspline_curve[:,0], bspline_curve[:,1], label='B-spline curve')
>>> plt.grid(True)
>>> plt.axis('tight')
>>> plt.legend(loc='upper left')
>>> plt.show()
>>>
>>> ###########################################################################
>>>
>>> # Perform 2-D scalar field (i.e., image) example
>>>
>>> import ants, numpy
>>>
>>> number_of_random_points = 10000
>>>
>>> img = ants.image_read( ants.get_ants_data("r16"))
>>> img_array = img.numpy()
>>> row_indices = numpy.random.choice(range(2, img_array.shape[0]), number_of_random_points)
>>> col_indices = numpy.random.choice(range(2, img_array.shape[1]), number_of_random_points)
>>>
>>> scattered_data = numpy.zeros((number_of_random_points, 1))
>>> parametric_data = numpy.zeros((number_of_random_points, 2))
>>>
>>> for i in range(number_of_random_points):
>>> scattered_data[i,0] = img_array[row_indices[i], col_indices[i]]
>>> parametric_data[i,0] = row_indices[i]
>>> parametric_data[i,1] = col_indices[i]
>>>
>>> bspline_img = ants.fit_bspline_object_to_scattered_data(
>>> scattered_data, parametric_data,
>>> parametric_domain_origin=[0.0, 0.0],
>>> parametric_domain_spacing=[1.0, 1.0],
>>> parametric_domain_size = img.shape,
>>> number_of_fitting_levels=7, mesh_size=1)
>>>
>>> ants.plot(img, title="Original")
>>> ants.plot(bspline_img, title="B-spline approximation")
"""
parametric_dimension = parametric_data.shape[1]
data_dimension = scattered_data.shape[1]
if is_parametric_dimension_closed is None:
is_parametric_dimension_closed = np.repeat(False, parametric_dimension)
if isinstance(mesh_size, int) == False and len(mesh_size) != parametric_dimension:
raise ValueError("Incorrect specification for mesh_size.")
if len(parametric_domain_origin) != parametric_dimension:
raise ValueError("Origin is not of length parametric_dimension.")
if len(parametric_domain_spacing) != parametric_dimension:
raise ValueError("Spacing is not of length parametric_dimension.")
if len(parametric_domain_size) != parametric_dimension:
raise ValueError("Size is not of length parametric_dimension.")
if len(is_parametric_dimension_closed) != parametric_dimension:
raise ValueError("Closed is not of length parametric_dimension.")
number_of_control_points = mesh_size + spline_order
if isinstance(number_of_control_points, int) == True:
number_of_control_points = np.repeat(number_of_control_points, parametric_dimension)
if parametric_data.shape[0] != scattered_data.shape[0]:
raise ValueError("The number of points is not equal to the number of scattered data values.")
if data_weights is None:
data_weights = np.repeat(1.0, parametric_data.shape[0])
if len(data_weights) != parametric_data.shape[0]:
raise ValueError("The number of weights is not the same as the number of points.")
libfn = utils.get_lib_fn("fitBsplineObjectToScatteredDataP%iD%i" % (parametric_dimension, data_dimension))
bspline_object = libfn(scattered_data, parametric_data, data_weights,
parametric_domain_origin, parametric_domain_spacing,
parametric_domain_size, is_parametric_dimension_closed,
number_of_fitting_levels, number_of_control_points,
spline_order)
if parametric_dimension == 1:
return bspline_object
else:
bspline_image = iio.ANTsImage(pixeltype='float',
dimension=parametric_dimension, components=data_dimension,
pointer=bspline_object).clone('float')
return bspline_image
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/fit_bspline_object_to_scattered_data.py",
"copies": "1",
"size": "8235",
"license": "apache-2.0",
"hash": -4388597969903938000,
"line_mean": 41.0153061224,
"line_max": 110,
"alpha_frac": 0.6201578628,
"autogenerated": false,
"ratio": 3.796680497925311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9821436938734447,
"avg_score": 0.019080284398172623,
"num_lines": 196
} |
__all__ = ["fit_transform_to_paired_points"]
import os
import numpy as np
from tempfile import mktemp
from .apply_transforms import apply_transforms
from ..core import ants_image_io as iio
from ..core import ants_transform_io as txio
def fit_transform_to_paired_points(
moving_points, fixed_points, transform_type="Affine", regularization=1e-4
):
"""
Estimate an optimal matrix transformation from paired points, potentially landmarks
ANTsR (actually patchMatchR) function: fitTransformToPairedPoints
Arguments
---------
moving_points : array
points in the moving image domain defined in physical space,
number of points by dimensionality
fixed_points : array
points in the fixed image domain defined in physical space,
number of points by dimensionality
transform_type : character
affine, rigid or similarity
regularization : scalar
regularization parameter
Returns
-------
ANTs transform
Example
-------
>>> import ants
>>> fixed_points = np.array([[1,2],[4,5],[6,7],[8,9]])
>>> moving_points = np.array([[1.1,2.3],[4.1,5.4],[6.1,7],[8,9]])
>>> tx = ants.fit_transform_to_paired_points( mpts, fpts )
"""
n = fixed_points.shape[0]
idim = fixed_points.shape[1]
centerX = fixed_points.mean(axis=0)
centerY = moving_points.mean(axis=0)
x = fixed_points - centerX
y = moving_points - centerY
myones = np.ones(n)
x11 = np.c_[x, myones] # or np.concatenate( (x, myones.reshape(4,1) ),axis=1 )
temp = np.linalg.lstsq(x11, y, rcond=None)
A = temp[0].transpose()[:idim, :idim]
trans = temp[0][idim, :] + centerY - centerX
if transform_type == "Rigid" or transform_type == "Similarity":
covmat = np.dot(y.transpose(), x)
scaleDiag = np.zeros((idim, idim))
np.fill_diagonal(scaleDiag, regularization)
x_svd = np.linalg.svd(covmat + scaleDiag)
myd = np.linalg.det(np.dot(x_svd[0].T, x_svd[2].T))
if myd < 0:
x_svd[2][idim - 1, :] *= -1
A = np.dot(x_svd[0], x_svd[2].T)
if transform_type == "Similarity":
scaling = np.math.sqrt(
(np.power(y, 2).sum(axis=1) / n).mean()
) / np.math.sqrt((np.power(x, 2).sum(axis=1) / n).mean())
scaleDiag = np.zeros((idim, idim))
np.fill_diagonal(scaleDiag, scaling)
A = np.dot(A, scaleDiag)
aff = txio.create_ants_transform(
matrix=A, translation=trans, dimension=idim, center=centerX
)
return aff
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/registration/landmark_transforms.py",
"copies": "1",
"size": "2581",
"license": "apache-2.0",
"hash": -5487409831206645000,
"line_mean": 31.6708860759,
"line_max": 87,
"alpha_frac": 0.6094537001,
"autogenerated": false,
"ratio": 3.3005115089514065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44099652090514063,
"avg_score": null,
"num_lines": null
} |
__all__ = ["Fixtures"]
from datetime import datetime
from uuid import uuid4
from freight.config import db
from freight.constants import PROJECT_ROOT
from freight.models import (
App,
Repository,
Task,
DeploySequence,
Deploy,
TaskStatus,
User,
TaskConfig,
TaskConfigType,
)
class Fixtures(object):
def create_taskconfig(self, app, **kwargs):
kwargs.setdefault("type", TaskConfigType.deploy)
kwargs.setdefault("provider", "shell")
kwargs.setdefault(
"data",
{
"provider_config": {"command": "/bin/echo helloworld"},
"notifiers": [
{"type": "slack", "config": {"webhook_url": "https://example.com"}},
{
"type": "datadog",
"config": {"webhook_url": "https://example.com"},
},
],
},
)
task_config = TaskConfig(app_id=app.id, **kwargs)
db.session.add(task_config)
db.session.commit()
return task_config
def create_app(self, repository, **kwargs):
if not kwargs.get("name"):
kwargs["name"] = uuid4().hex
kwargs.setdefault(
"data",
{
"environments": {
"production": {"default_ref": "master"},
"staging": {"default_ref": "HEAD"},
}
},
)
app = App(repository_id=repository.id, **kwargs)
db.session.add(app)
db.session.commit()
return app
def create_task(self, app, user, **kwargs):
kwargs.setdefault("provider", "shell")
kwargs.setdefault("ref", "master")
kwargs.setdefault("sha", "HEAD")
kwargs.setdefault("status", TaskStatus.in_progress)
kwargs.setdefault(
"data", {"provider_config": app.deploy_config.provider_config}
)
kwargs.setdefault("params", {"task": "deploy"})
kwargs.setdefault("date_started", datetime.utcnow())
task = Task(app_id=app.id, user_id=user.id, **kwargs)
db.session.add(task)
db.session.commit()
return task
def create_deploy(self, task, app, **kwargs):
kwargs.setdefault("environment", "production")
deploy = Deploy(
task_id=task.id,
app_id=app.id,
number=DeploySequence.get_clause(app.id, kwargs["environment"]),
**kwargs,
)
db.session.add(deploy)
db.session.commit()
return deploy
def create_repo(self, **kwargs):
kwargs.setdefault("url", PROJECT_ROOT)
kwargs.setdefault("vcs", "git")
repo = Repository(**kwargs)
db.session.add(repo)
db.session.commit()
return repo
def create_user(self, **kwargs):
if not kwargs.get("name"):
kwargs["name"] = uuid4().hex
user = User(**kwargs)
db.session.add(user)
db.session.commit()
return user
| {
"repo_name": "getsentry/freight",
"path": "freight/testutils/fixtures.py",
"copies": "1",
"size": "3066",
"license": "apache-2.0",
"hash": -8205142257454481000,
"line_mean": 25.8947368421,
"line_max": 88,
"alpha_frac": 0.5260926288,
"autogenerated": false,
"ratio": 4.171428571428572,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5197521200228571,
"avg_score": null,
"num_lines": null
} |
__all__ = ['fkdict']
__author__ = 'George Sakkis <george.sakkis AT gmail DOT com>'
import UserDict
from itertools import chain,izip
class fkdict(object):
'''A dict-like class for mappings with fixed keys.
The main feature of this class is memory efficiency in the scenario of
having many dicts with the same keys. Example use cases are rows read from a
csv.DictReader or constructed out of rows fetched from a database. All such
rows share the same keys() and therefore each row has to only hold the
values().
An additional feature is predictable ordering:
fkdict.fromkeys('abcd').keys() == list('abcd')
Currently fkdict does not support the dict operations that change the size
of the container. Therefore, __delitem__(), pop(), popitem(), clear() and
setdefault() are not provided. Also __setitem__() raises KeyError if the key
does not already exist.
'''
__slots__ = ['_values']
def __init__(self, seq=(), **kwds):
# normalize arguments to a (key,value) iterable
if hasattr(seq, 'keys'):
get = seq.__getitem__
seq = ((k,get(k)) for k in seq.keys())
if kwds:
seq = chain(seq, kwds.iteritems())
# scan the items keeping track of the keys' order
keys,values = [],[]
keys_set = set()
for k,v in seq:
if k not in keys_set:
keys_set.add(k)
keys.append(k)
values.append(v)
else:
values[keys.index(k)] = v
# mutate self to the appropriate subclass
self.__class__ = self._subcls_factory(*keys)
self._values = values
@classmethod
def fromkeys(cls, keys, default=None):
self = cls()
self.__class__ = cls._subcls_factory(*keys)
self._values = [default] * len(self)
return self
def __len__(self):
return len(self._keys)
def __contains__(self, key):
return key in self._key2index
def has_key(self, key):
return key in self._key2index
def __getitem__(self, key):
return self._values[self._key2index[key]]
def get(self, key, default=None):
try: return self[key]
except KeyError: return default
def __setitem__(self, key, value):
self._values[self._key2index[key]] = value
def update(self, other=None, **kwargs):
# copied from UserDict.DictMixin
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'):
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
for k, v in kwargs.iteritems():
self[k] = v
def __iter__(self):
return iter(self._keys)
def iterkeys(self):
return iter(self._keys)
def itervalues(self):
return iter(self._values)
def iteritems(self):
return izip(self._keys, self._values)
def keys(self):
return list(self._keys)
def values(self):
return list(self._values)
def items(self):
return zip(self._keys, self._values)
def copy(self):
return self.__class__(self.iteritems())
def __cmp__(self, other):
# copied from UserDict.DictMixin
if other is None:
return 1
if isinstance(other, UserDict.DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __repr__(self):
return '{%s}' % ', '.join('%r: %r' % item for item in self.iteritems())
@classmethod
def _subcls_factory(cls, *keys):
# if cls is hidden, find its first non hidden ancestor
cls = (c for c in cls.mro() if not issubclass(c,_Hidden)).next()
# each (non hidden) class maintains its own registry
try: registry = cls.__dict__['_Registry']
except KeyError:
registry = cls._Registry = {}
try: return registry[keys]
except KeyError:
cls_name = '%s_%d' % (cls.__name__, abs(hash(keys)))
cls_dict = {
'__slots__' : (),
'_keys' : keys,
'_key2index' : dict((k,i) for i,k in enumerate(keys))
}
registry[keys] = sub = type(cls_name, (cls,_Hidden), cls_dict)
return sub
class _Hidden(object):
__slots__ = ()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/499373_Fixed_keys_mapping_type/recipe-499373.py",
"copies": "1",
"size": "4584",
"license": "mit",
"hash": -1751400529715616000,
"line_mean": 30.1836734694,
"line_max": 80,
"alpha_frac": 0.557591623,
"autogenerated": false,
"ratio": 4.060230292294065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034654655607248377,
"num_lines": 147
} |
__all__ = ['Flags']
__version__ = 0.1
import sys
class Flag(object):
def __init__(self, flag, help, value, opt, coll):
self.flag = flag
self.help = help
self.value = value
self.opt = opt
self.coll = coll
class Positional(object):
def __init__(self, name, help, opt, coll):
self.name = name
self.help = help
self.opt = opt
self.coll = coll
class Flags(object):
def __init__(self, help=True, help_start=None, help_end=None):
'''
Arguments:
help: Whether or not to automatically add a help flag.
help_start: Show this text preceding the help info.
help_end: Show this text after the help info.
'''
self.flags = {}
self.pos = []
self.help = help
if help:
self.add('h', help='Show this screen')
self.help_start = help_start
self.help_end = help_end
else:
assert help_start is None, 'cannot have help_start without help'
assert help_end is None, 'cannot have help_start without help'
def add(self, flag, help='', value=False, opt=False, coll=False):
'''
Add a flag.
Arguments:
flag: The flag to add.
help: The help string associated with the flag.
value: Whether or not the flag is a value flag (takes a value).
opt: Whether or not the flag is optional.
coll: Whether or not the flag can be given multiple times.
'''
assert len(flag) == 1, 'flag must be 1 character long'
assert not value or not opt, 'only value flags can have optional values'
if value == True:
value = 'value'
self.flags[flag] = Flag(flag, help, value, opt, coll)
def add_positional(self, name, help='', opt=False, coll=False):
'''
Add a positional argument.
Arguments:
name: The name of the positional argument.
help: The help string associated with the argument.
opt: Whether or not it is optional.
coll: If True, any excess arguments will be labeled under this one.
'''
assert not self.pos or not self.pos[-1].coll,\
'only last positional can be collective'
assert opt or (not self.pos or self.pos[-1].opt),\
'non-optional positional cannot follow optional positionals'
assert len(name) > 1, 'positional ids must have a length greater than 1'
self.pos.append(Positional(name, help, opt, coll))
def _usage(self, this):
flags = []
posx = []
for flag in self.flags.values():
if flag.value:
s = '[-%s<%s>]' % (flag.flag, flag.value)
else:
s = '[-%s]' % flag.flag
if flag.coll:
s += '...'
flags.append(s)
for pos in self.pos:
s = '<%s>' % pos.name
if pos.coll:
s += '...'
if pos.opt:
s = '[%s]' % s
posx.append(s)
usage = 'usage: %s ' % this
if posx:
usage += ' '.join(posx)
if flags:
usage += ' '
if flags:
usage += ' '.join(flags)
return usage
def _show_help(self, this):
print(self._usage(this))
if self.help_start is not None:
print(self.help_start)
if self.pos:
print('')
print('Positional arguments:')
print('')
lng = max(len(pos.name) for pos in self.pos)
for pos in self.pos:
print(' %s %s' % (pos.name.ljust(lng), pos.help))
if self.flags:
print('')
print('Flags:')
print('')
lng = max(len(flag.value or '') for flag in self.flags.values())
if lng:
lng += 4
for flag in self.flags.values():
fl = flag.flag
if flag.value:
fl += ('<%s>' % flag.value)
print(' -%s %s' % (fl.ljust(lng), flag.help))
if self.help_end is not None:
print('')
print(self.help_end)
def _err_need_value(self, flag):
sys.exit('flag %s needs a value' % flag)
def parse(self, args=None):
'''
Parse an arguments list.
Arguments:
args: A list of arguments to parse. Defaults to sys.argv.
This function is an iterator, returning a sequence of pairs
``(type, argument, value)``:
type: A string specifying the type of argument (flag or positional).
argument: If ``type`` is ``flag``, then this is the single-character flag.
If it is ``positional``, then this is the positional argument
name.
value: If the argument takes a value, then this is the value. If the
argument takes no value, or if the value is optional and not given,
then it is ``None``.
'''
if args is None:
args = sys.argv
args = args[:]
this = args.pop(0)
passed = set()
poscount = 0
get_value = None
ponly = False
gotcoll = False
req = 0
for p in self.pos:
if not p.opt:
req += 1
else:
break
for piece in args:
if piece[0] == '-' and not ponly:
if get_value is not None:
self._err_need_value(get_value)
if piece == '--':
ponly = True
continue
piece = piece[1:]
for i, fl in enumerate(piece):
if self.help and fl == 'h':
self._show_help(this)
sys.exit()
try:
flag = self.flags[fl]
except:
sys.exit('unknown flag: -%s' % fl)
if flag in passed:
sys.exit('duplicate flag: -%s' % flag.flag)
if flag.value:
if i+1 == len(piece):
if flag.opt:
yield ('flag', fl, None)
else:
get_value = flag.flag
else:
yield ('flag', fl, piece[i+1:])
break
else:
yield ('flag', fl, None)
if not flag.coll:
passed.add(fl)
else:
if get_value is not None:
yield ('flag', get_value, piece)
get_value = None
else:
if poscount >= len(self.pos) and not self.pos[-1].coll:
sys.exit('too many positional arguments')
yield ('positional', self.pos[poscount].name, piece)
if not self.pos[poscount].coll:
poscount += 1
else:
gotcoll = True
if get_value is not None:
self._err_need_value(get_value)
if self.pos[-1].coll and gotcoll:
poscount += 1
if poscount < req:
sys.exit('too few positional arguments')
def parse_dict(self, *args, **kw):
'''
Parse an argument list into a dictionary. Takes the same arguments as
``parse``.
Returns a dictionary of arguments, formatted ``{flag: flaginfo}``. See
the included examples for more info.
'''
res = {}
for flag in self.flags.values():
if flag.coll:
if flag.value:
res[flag.flag] = []
else:
res[flag.flag] = 0
elif flag.value:
res[flag.flag] = None
else:
res[flag.flag] = False
for pos in self.pos:
if pos.coll:
res[pos.name] = []
else:
res[pos.name] = None
posi = 0
for tp, x, value in self.parse(*args, **kw):
if tp == 'flag':
if self.flags[x].coll:
if self.flags[x].value:
res[x].append(value)
else:
res[x] += 1
else:
res[x] = value if self.flags[x].value else True
else:
if self.pos[posi].coll:
res[x].append(value)
else:
res[x] = value
posi += 1
if self.help:
res.pop('h')
return res
| {
"repo_name": "kirbyfan64/pyflags",
"path": "flags.py",
"copies": "1",
"size": "8872",
"license": "mit",
"hash": 3515175403680843000,
"line_mean": 32.6060606061,
"line_max": 82,
"alpha_frac": 0.4570559062,
"autogenerated": false,
"ratio": 4.394254581475979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009812278553911753,
"num_lines": 264
} |
"""All Flask blueprints for the entire application.
All blueprints for all views go here. They shall be imported by the views themselves and by application.py. Blueprint
URL paths are defined here as well.
"""
from flask import Blueprint
def _factory(partial_module_string, url_prefix):
"""Generates blueprint objects for view modules.
Positional arguments:
partial_module_string -- string representing a view module without the absolute path (e.g. 'home.index' for
pypi_portal.views.home.index).
url_prefix -- URL prefix passed to the blueprint.
Returns:
Blueprint instance for a view module.
"""
name = partial_module_string
import_name = 'pypi_portal.views.{}'.format(partial_module_string)
template_folder = 'templates'
blueprint = Blueprint(name, import_name, template_folder=template_folder, url_prefix=url_prefix)
return blueprint
examples_alerts = _factory('examples.alerts', '/examples/alerts')
examples_exception = _factory('examples.exception', '/examples/exception')
home_index = _factory('home.index', '/')
pypi_packages = _factory('pypi.packages', '/pypi')
all_blueprints = (examples_alerts, examples_exception, home_index, pypi_packages,)
| {
"repo_name": "Robpol86/Flask-Large-Application-Example",
"path": "pypi_portal/blueprints.py",
"copies": "1",
"size": "1220",
"license": "mit",
"hash": 2091000013973038000,
"line_mean": 34.8823529412,
"line_max": 117,
"alpha_frac": 0.7295081967,
"autogenerated": false,
"ratio": 3.973941368078176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203449564778175,
"avg_score": null,
"num_lines": null
} |
"""All Flask blueprints for the entire application.
All blueprints for all views go here. They shall be imported by the views themselves and by application.py. Blueprint
URL paths are defined here as well.
"""
from flask import Blueprint
def blueprint_factory(partial_module_string, url_prefix):
"""Generates blueprint objects for view modules.
Positional arguments:
partial_module_string -- string representing a view module without the absolute path (e.g. 'home.index' for
imp_flask.views.home.index).
url_prefix -- URL prefix passed to the blueprint.
Returns:
Blueprint instance for a view module.
"""
name = partial_module_string
import_name = 'imp_flask.views.{}'.format(partial_module_string)
template_folder = 'templates'
blueprint = Blueprint(name, import_name, template_folder=template_folder, url_prefix=url_prefix)
return blueprint
home_index = blueprint_factory('home.index', '')
relations = blueprint_factory('imp_flask.relations', '/relations')
transactions = blueprint_factory('imp_flask.transactions', '/transactions')
products = blueprint_factory('imp_flask.products', '/products')
mods = blueprint_factory('imp_flask.mods', '/mods')
#pos = blueprint_factory('imp_flask.pos', '/pos')
#conscribo = blueprint_factory('imp_flask.conscribo', '/conscribo')
#settings = blueprint_factory('imp_flask.settings', '/settings')
all_blueprints = [home_index, products, relations, transactions, mods] # conscribo, settings,)
| {
"repo_name": "thijsmie/imp_flask",
"path": "imp_flask/blueprints.py",
"copies": "1",
"size": "1532",
"license": "mit",
"hash": 6226727953987790000,
"line_mean": 39.4054054054,
"line_max": 117,
"alpha_frac": 0.7154046997,
"autogenerated": false,
"ratio": 3.9081632653061225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5123567965006123,
"avg_score": null,
"num_lines": null
} |
__all__ = ['flatten_dict', 'unflatten_dict']
import collections
def flatten_dict(dct):
"""
Take a nested dictionary and "flatten" it::
{
'aaa': {
'A1': 'Foo',
'A2': 'Bar',
},
'bbb': {
'B1': {'B11': 'foo'},
'B2': 'bar',
},
'ccc': 'BAZ',
}
Becomes::
{
('aaa', 'A1'): 'Foo',
('aaa', 'A2'): 'Bar',
('bbb', 'B1', 'B11'): 'foo',
('bbb', 'B2'): 'bar',
('ccc',): 'BAZ',
}
"""
output = {}
def _flatten(obj, trail):
for key, value in obj.iteritems():
_trail = trail + (key,)
if isinstance(value, collections.Mapping):
# We want to keep digging
_flatten(value, _trail)
else:
# We reached an end -> write the value
output[_trail] = value
_flatten(dct, tuple())
return output
def unflatten_dict(dct):
"""
Opposite of ``flatten_dict``::
{
('aaa', 'A1'): 'Foo',
('aaa', 'A2'): 'Bar',
('bbb', 'B1', 'B11'): 'foo',
('bbb', 'B2'): 'bar',
('ccc',): 'BAZ',
}
Becomes::
{
'aaa': {
'A1': 'Foo',
'A2': 'Bar',
},
'bbb': {
'B1': {'B11': 'foo'},
'B2': 'bar',
},
'ccc': 'BAZ',
}
"""
output = {}
def _put_value(obj, path, value):
if len(path) == 1:
# Found!
obj[path[0]] = value
return
if len(path) == 0:
raise ValueError("Invalid empty key")
if path[0] not in obj:
obj[path[0]] = {}
_put_value(obj[path[0]], path[1:], value)
for key, value in dct.iteritems():
_put_value(output, key, value)
return output
| {
"repo_name": "opendatatrentino/opendata-harvester",
"path": "harvester/utils/flattening.py",
"copies": "1",
"size": "2005",
"license": "bsd-2-clause",
"hash": -9062755230239045000,
"line_mean": 19.6701030928,
"line_max": 54,
"alpha_frac": 0.3581047382,
"autogenerated": false,
"ratio": 3.768796992481203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9626901730681203,
"avg_score": 0,
"num_lines": 97
} |
__all__ = [ 'FlatTextInput', 'FlatButton', 'RaisedButton', 'FloatingAction' ]
"""
Please refer to Google's Material UI guidelines :
http://www.google.com/design
Guidelines for buttons :
http://www.google.com/design/spec/components/buttons.html
"""
import sys
sys.path.append( '..' )
from kivy.animation import Animation
from kivy.adapters.listadapter import ListAdapter
from kivy.base import EventLoop
from kivy.config import Config
from kivy.core.window import Window
from kivy.event import EventDispatcher
from kivy.graphics import Color, Rectangle
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import *
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.listview import ListItemButton, ListView
from kivy.uix.modalview import ModalView
from kivy.uix.textinput import TextInput
from . import labels
from pkg_resources import resource_filename
#KV Files
path = resource_filename( __name__, 'flatui.kv' )
Builder.load_file( path )
class FlatTextInput( TextInput ) :
'''
Flat version of the standard TextInput.
'''
show_underline = BooleanProperty( True )
'''
If true a line of the same color of the cursor will be drawn under the text.
'''
cursor_color = ListProperty( [ 1, 0, 0, .8 ] )
'''Represents the rgba color used to render the cursor.
.. versionadded:: 1.0
:attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [ 1, 0, 0, .8 ].
'''
def __init__( self, **kargs ) :
if not 'background_color' in kargs.keys() :
kargs['background_color'] = [0,0,0,0]
super( FlatTextInput, self ).__init__( **kargs )
class _MaterialButton( ButtonBehavior, Label ) : #labels.BindedLabel ) :
'''
Replacement for Button class, just more flexible...
'''
background_color = ListProperty( [ 1, 1, 1, 1 ] )
'''Represents the rgba color used to render the frame in the normal state.
.. versionadded:: 1.0
The :attr:`background_color` is a
:class:`~kivy.properties.ListProperty` and defaults to [ 0, 0, 0, 0 ].
'''
background_color_down = ListProperty( [ 0, 0, 0, .2 ] )
'''Represents the rgba color used to render the frame in the down state.
.. versionadded:: 1.0
:attr:`background_color_down` is a :class:`~kivy.properties.ListProperty`.
'''
color_down = ListProperty( [ 0, 0, 0, .8 ] )
'''Represents the rgba color used to render the button text in the down state.
.. versionadded:: 1.0
:attr:`color_down` is a :class:`~kivy.properties.ListProperty`.
'''
background_color_disabled = ListProperty( [ 0, 0, 0, .1 ] )
'''Represents the rgba color used to render the button when disabled.
.. versionadded:: 1.0
:attr:`background_color_down` is a :class:`~kivy.properties.ListProperty`
'''
icon = StringProperty( '' )
'''Icon image file.
.. versionadded:: 1.0
:attr:`icon` is a :class:`~kivy.properties.StringProperty`, default to ''.
'''
shadow_alpha = NumericProperty( 0.05 )
'''Alpha channel used to render the rgba shadow.
.. versionadded:: 1.0
:attr:`shadow_alpha` is a :class:`~kivy.properties.NumericProperty`, default to 0.4.
'''
corner_radius = NumericProperty( dp(2) )
'''Button corner radius.
.. versionadded:: 1.0
:attr:`corner_radius` is a :class:`~kivy.properties.NumericProperty`.
'''
def __init__( self, **kargs ) :
if not 'valign' in kargs.keys() : kargs['valign'] = 'middle'
if not 'halign' in kargs.keys() : kargs['halign'] = 'center'
super( _MaterialButton, self ).__init__( **kargs )
for key in kargs.keys() :
self.__setattr__( key, kargs[key] )
class FlatButton( _MaterialButton ) :
'''
Material UI flat button.
'''
pass
class RaisedButton( _MaterialButton ) :
'''
Material UI raised button.
'''
pass
class FloatingAction( _MaterialButton ) :
'''
Round button with frame.
'''
diameter = NumericProperty( dp(1) )
'''Represents the diameter of the button.
Will update widget size.
.. versionadded:: 1.0
:attr:`diameter` is a :class:`~kivy.properties.NumericProperty`.
'''
shadow_offset_x = NumericProperty( 0 )
'''Use this to move the shadow.
.. versionadded:: 1.0
:attr:`shadow_offset_x` is a :class:`~kivy.properties.NumericProperty`, default to 0.
'''
shadow_offset_y = NumericProperty( dp(1) )
'''Use this to move the shadow.
.. versionadded:: 1.0
:attr:`shadow_offset_y` is a :class:`~kivy.properties.NumericProperty`, default to 1.
'''
animation_duracy = NumericProperty( .1 )
'''Used to move button when loading a new view
.. versionadded:: 1.0
:attr:`animation_duracy` is a :class:`~kivy.properties.NumericProperty`, default to 0.1.
'''
entrance = OptionProperty('', options=['', 'down', 'up', 'left','right'])
'''Direction the button will come from.
:attr:`entrance` is a :class:`~kivy.properties.OptionProperty` and
defaults to ''. Available options are '', down, up, left, right
'''
def __init__( self, **kargs ) :
if not 'diameter' in kargs.keys() :
kargs[ 'diameter' ] = dp(56)
if not 'color' in kargs.keys() :
kargs[ 'color' ] = [ 1, 1, 1, 1 ]
if not 'background_color' in kargs.keys() :
kargs[ 'background_color' ] = [ 0.88, 0.2, 0.15, 1 ]
if not 'background_color_down' in kargs.keys() :
kargs[ 'background_color_down' ] = [ 0.88, 0.3, 0.2, 1 ]
super( FloatingAction, self ).__init__( **kargs )
def add_to_bottom_right( self, parent ) :
nx = parent.width-self.diameter*1.2
ny = self.diameter*0.3
parent.bind( size=self._repose )
parent.add_widget( self )
duracy = self.animation_duracy if self.entrance != '' else 0
if duracy > 0 :
if self.entrance == 'down' : self.pos = [ nx, -self.height ]
if self.entrance == 'up' : self.pos = [ nx, self.pos[1]+self.height ]
if self.entrance == 'left' : self.pos = [ -self.width, ny ]
if self.entrance == 'right' : self.pos = [ +self.width, ny ]
animation = Animation( x=nx, y=ny, duration=duracy )
animation.start( self )
else :
self.pos = nx, ny
self.parent = parent
def remove_from_parent( self ) :
duracy = self.animation_duracy if self.entrance != '' else 0
nx, ny = self.pos
if self.entrance == 'down' : ny = 0
if self.entrance == 'up' : ny = self.parent.height+self.height
if self.entrance == 'left' : nx = -self.width
if self.entrance == 'right' : nx = self.parent.width
animation = Animation( x=nx, y=ny, duration=duracy )
animation.bind( on_complete=self._remove_from_parent )
animation.start( self )
def _remove_from_parent( self, *args ) :
self.parent.unbind( size=self._repose )
self.parent.remove_widget( self )
def _repose( self, i, v ) :
self.pos = [ v[0]-self.diameter*1.2, self.diameter*0.3 ]
| {
"repo_name": "Cuuuurzel/kivy-material-ui",
"path": "material_ui/flatui/flatui.py",
"copies": "1",
"size": "7419",
"license": "mit",
"hash": -6400910947235387000,
"line_mean": 26.4777777778,
"line_max": 92,
"alpha_frac": 0.6140989352,
"autogenerated": false,
"ratio": 3.5979631425800194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47120620777800193,
"avg_score": null,
"num_lines": null
} |
__all__ = ['FlowStation']
from os.path import dirname, join
from openmdao.main.api import VariableTree
from openmdao.lib.datatypes.api import Float, VarTree, Enum
from Cantera import *
import pycycle #used to find file paths
GAS_CONSTANT = 0.0685592 #BTU/lbm-R
#secant solver with a limit on overall step size
def secant(func, x0, TOL=1e-7, x_min=1e15, x_max=1e15 ):
if x0 >= 0:
x1 = x0*(1 + 1e-4) + 1e-4
else:
x1 = x0*(1 + 1e-4) - 1e-4
f1, f = func(x1), func(x0)
if (abs(f) > abs(f1)):
x1, x0 = x0, x1
f1, f = f, f1
dx = f * (x0 - x1) / float(f - f1)
count = 0
while 1:
if abs(dx) < TOL * (1 + abs(x0)):
#if abs((f1-f)/(f+1e-10)) < TOL:
return x0 -dx
dx = f * (x0 - x1) / float(f - f1)
df = abs((f1-f)/(f+1e-10))
if x0-dx < x_min:
#x1, x0 = x0, x0*(1+.01*abs(dx)/dx)
x1, x0 = x0, (x_min+x0)/2
elif x0-dx > x_max:
x1, x0 = x0, (x_max+x0)/2
else:
x1, x0 = x0, x0 - dx
f1, f = f, func(x0)
count = count + 1
class FlowStation(VariableTree):
reactants = []
reactantNames = [[0 for x in xrange(6)] for x in xrange(6)]
reactantSplits =[[0 for x in xrange(6)] for x in xrange(6)]
numreacts = 0
ht=Float(0.0, desc='total enthalpy', units='Btu/lbm')
Tt=Float(0.0, desc='total temperature', units='degR')
Pt=Float(0.0, desc='total pressure', units='lbf/inch**2')
rhot=Float(0.0, desc='total density', units='lbm/ft**3')
gamt=Float(0.0, desc='total gamma')
Cp = Float(0.0, desc='Specific heat at constant pressure', units='Btu/(lbm*degR)')
Cv = Float(0.0, desc='Specific heat at constant volume', units='Btu/(lbm*degR)')
s =Float(0.0, desc='entropy', units='Btu/(lbm*R)')
W =Float(0.0, desc='weight flow', units='lbm/s')
FAR =Float(0.0, desc='fuel to air ratio')
WAR =Float(0.0, desc='water to air ratio')
hs=Float(0.0, desc='static enthalpy', units='Btu/lbm')
Ts=Float(0.0, desc='static temperature', units='degR')
Ps=Float(0.0, desc='static pressure', units='lbf/inch**2')
rhos=Float(0.0, desc='static density', units='lbm/ft**3')
gams=Float(0.0, desc='static gamma')
Vflow =Float(0.0, desc='Velocity', units='ft/s')
Vsonic=Float(0.0, desc='Speed of sound', units='ft/s')
Mach=Float(0.0, desc='Mach number')
area =Float(0.0, desc='flow area', units='inch**2')
#mu = Float(0.0, desc='dynamic viscosity', units='lbm/(s*ft)')
sub_or_super = Enum(('sub','super'), desc="selects preference for subsonic or supersonice solution when setting area")
Wc = Float(0.0, desc='corrected weight flow', units='lbm/s')
#intialize station
def __init__(self,*args,**kwargs):
super(FlowStation, self).__init__(*args,**kwargs)
#properties file path
_dir = dirname(pycycle.__file__)
_prop_file = join(_dir,'gri1000.cti')
self.reactantNames=[[0 for x in xrange(6)] for x in xrange(6)]
self.reactantSplits=[[0 for x in xrange(6)] for x in xrange(6)]
self.numreacts = 0
self._trigger = 0
self._species=[1.0, 0, 0, 0, 0, 0, 0, 0]
self._mach_or_area=0
self._flow=importPhase(_prop_file)
self._flowS=importPhase(_prop_file)
#add a reactant that can be mixed in
@staticmethod
def add_reactant(reactants, splits ):
FlowStation.reactantNames[FlowStation.numreacts][0] = reactants[0]
FlowStation.reactantNames[FlowStation.numreacts][1] = reactants[1]
FlowStation.reactantNames[FlowStation.numreacts][2] = reactants[2]
FlowStation.reactantNames[FlowStation.numreacts][3] = reactants[3]
FlowStation.reactantNames[FlowStation.numreacts][4] = reactants[4]
FlowStation.reactantNames[FlowStation.numreacts][5] = reactants[5]
FlowStation.reactantSplits[FlowStation.numreacts][0] = splits[0]
FlowStation.reactantSplits[FlowStation.numreacts][1] = splits[1]
FlowStation.reactantSplits[FlowStation.numreacts][2] = splits[2]
FlowStation.reactantSplits[FlowStation.numreacts][3] = splits[3]
FlowStation.reactantSplits[FlowStation.numreacts][4] = splits[4]
FlowStation.reactantSplits[FlowStation.numreacts][5] = splits[5]
FlowStation.numreacts = FlowStation.numreacts + 1
def _W_changed(self):
if self._trigger == 0:
self._trigger=1
self.setStatic()
self._trigger=0
#trigger action on Mach
def _Mach_changed(self):
if self._trigger == 0:
self._trigger=1
self._mach_or_area=1
self.setStatic()
self._trigger=0
#trigger action on area
def _area_changed(self):
if self._trigger == 0:
self._trigger=1
self._mach_or_area=2
self.setStatic()
self._trigger=0
#trigger action on static pressure
def _Ps_changed(self):
if self._trigger == 0:
self._trigger=1
self._mach_or_area=3
self.setStatic()
self._trigger=0
def _setComp(self):
global reactantNames
global reactantSplits
global numreacts
tempcomp = ''
compname = ['', '', '', '', '', '', '', '', '', '', '', '']
fract = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
numcurrent = 0;
for cName in range ( 0, FlowStation.numreacts ):
for cSpecies in range( 0, 6 ):
if FlowStation.reactantSplits[cName][cSpecies]*self._species[cName] > 0.00001:
fract[numcurrent]=FlowStation.reactantSplits[cName][cSpecies]*self._species[cName];
compname[numcurrent] = FlowStation.reactantNames[cName][cSpecies];
numcurrent = numcurrent+1;
count1 = numcurrent-1
while count1 > -1:
count2 = numcurrent-1
while count2 > -1:
if compname[count2] == compname[count1] and count1 != count2:
fract[count1]=fract[count1]+fract[count2]
fract[count2] = 0
count2 = count2 - 1
count1 = count1 - 1
count1 = numcurrent-1
while count1 > -1:
if fract[count1] > .000001:
tempcomp = tempcomp + str(compname[count1])+':' +str(fract[count1])+ ' '
count1 = count1 - 1
self._flow.setMassFractions( tempcomp )
#set the composition to dry air
def setDryAir(self):
self._species[0]=1
self.WAR=0
self.FAR=0
self._setComp()
self._trigger=0
#set the composition to pure mixture of one of the reactants
def setReactant(self, i):
self._species= [0,0,0,0,0,0]
self._species[i-1] = 1
#set the compositon to air with water
def setWAR(self, WAR):
self._trigger=1
self.WAR=WAR
self.FAR=0
self._species[0]=(1)/(1+WAR)
self._species[1]=(WAR)/(1+WAR)
self._setComp()
self.setStatic()
self._trigger=0
def _total_calcs(self):
self.ht=self._flow.enthalpy_mass()*0.0004302099943161011
self.s=self._flow.entropy_mass()*0.000238845896627
self.rhot=self._flow.density()*.0624
self.Tt=self._flow.temperature()*9./5.
self.Cp = self._flow.cp_mass()*2.388459e-4
self.Cv = self._flow.cv_mass()*2.388459e-4
self.gamt=self.Cp/self.Cv
self._flowS=self._flow
self.setStatic()
self.Wc = self.W*(self.Tt/518.67)**.5/(self.Pt/14.696)
self.Vsonic=math.sqrt(self.gams*GasConstant*self._flowS.temperature()/self._flowS.meanMolecularWeight())*3.28084
#self.mu = self._flow.viscosity()*0.671968975
self._trigger=0
#set total conditions based on T an P
def setTotalTP(self, Tin, Pin):
self._setComp()
self._trigger=1
self.Tt=Tin
self.Pt=Pin
self._flow.set(T=Tin*5./9., P=Pin*6894.75729)
self._flow.equilibrate('TP')
self._total_calcs()
#set total conditions based on h and P
def setTotal_hP(self, hin, Pin):
self._setComp()
self._trigger=1
self.ht=hin
self.Pt=Pin
self._flow.set(H=hin/.0004302099943161011, P=Pin*6894.75729)
self._flow.equilibrate('HP')
self._total_calcs()
#set total condition based on S and P
def setTotalSP(self, sin, Pin):
self._setComp()
self._trigger=1
self.s=sin
self.Pt=Pin
self._flow.set(S=sin/0.000238845896627, P=Pin*6894.75729)
self._flow.equilibrate('SP', loglevel=1)
self._total_calcs()
self._trigger=0
#add another station to this one
#mix enthalpies and keep pressure and this stations value
def add(self, FS2):
temp =""
for i in range(0, len(self._species)):
self._species[i]=(self.W*self._species[i]+FS2.W*FS2._species[i])/(self.W + FS2.W)
self._setComp()
air1 = self.W * ( 1. / ( 1. + self.FAR + self.WAR ))
air2 = FS2.W *( 1. / ( 1 + FS2.WAR + FS2.FAR ))
self.FAR = ( air1 * self.FAR + air2*FS2.FAR )/( air1 + air2 )
self.WAR = ( air1 * self.WAR + air2*FS2.WAR )/( air1 + air2 )
self.ht=(self.W*self.ht+FS2.W*FS2.ht)/(self.W+FS2.W)
self.W=self.W +(FS2.W)
self._flow.set(T=self.Tt*5./9., P=self.Pt*6894.75729)
self._flow.equilibrate('TP')
self._flow.set(H=self.ht/0.0004302099943161011, P=self.Pt*6894.75729)
self._flow.equilibrate('HP')
self.Tt=self._flow.temperature()*9./5.
self.s=self._flow.entropy_mass()* 0.000238845896627
self.rhot=self._flow.density()*.0624
self.gamt=self._flow.cp_mass()/self._flow.cv_mass()
def copy_from(self, FS2):
"""duplicates total properties from another flow station"""
self.ht=FS2.ht
self.Tt=FS2.Tt
self.Pt=FS2.Pt
self.rhot=FS2.rhot
self.gamt=FS2.gamt
self.s =FS2.s
self.W =FS2.W
self.FAR =FS2.FAR
self.WAR =FS2.WAR
temp =""
for i in range(0, len(self.reactants)):
self._species[i]=FS2._species[i]
temp=temp+self.reactants[i]+":"+str(self._species[i])+" "
self._flow.setMassFractions(temp)
self._flow.set(T=self.Tt*5./9., P=self.Pt*6894.75729)
self._flow.equilibrate('TP')
#burn a fuel with this station
def burn(self, fuel, Wfuel, hfuel):
flow_1=self.W
self.W=self.W + Wfuel
for i in range(0, len(self._species)):
if ( fuel - 1 ) == i:
self._species[i]=(flow_1*self._species[i]+Wfuel)/ self.W
else:
self._species[i]=(flow_1*self._species[i])/ self.W
self.ht= (flow_1 * self.ht + Wfuel * hfuel)/ self.W
air1=flow_1 * (1. / (1. + self.FAR + self.WAR))
self.FAR=(air1 * self.FAR + Wfuel)/(air1)
self._setComp()
self._flow.set(T=2660*5/9, P=self.Pt*6894.75729)
self._flow.equilibrate('TP')
self._flow.set(H=self.ht/0.0004302099943161011, P=self.Pt*6894.75729)
self._flow.equilibrate('HP')
self.Tt=self._flow.temperature()*9./5.
self.s=self._flow.entropy_mass()*0.000238845896627
self.rhot=self._flow.density()*.0624
self.gamt=self._flow.cp_mass()/self._flow.cv_mass()
#set the statics based on Mach
def setStaticMach(self):
mach_target = self.Mach
def f(Ps):
self.Ps=Ps
self.setStaticPs()
return self.Mach - mach_target
Ps_guess = self.Pt*(1 + (self.gamt-1)/2*mach_target**2)**(self.gamt/(1-self.gamt))*.9
secant(f, Ps_guess, x_min=0, x_max=self.Pt)
#set the statics based on pressure
def setStaticPs(self):
self._flowS=self._flow
self._flowS.set(S=self.s/0.000238845896627, P=self.Ps*6894.75729)
self._flowS.equilibrate('SP')
self.Ts=self._flowS.temperature()*9./5.
self.rhos=self._flowS.density()*.0624
self.gams=self._flowS.cp_mass()/self._flowS.cv_mass()
self.hs=self._flowS.enthalpy_mass()*0.0004302099943161011
self.Vflow=(778.169*32.1740*2*(self.ht-self.hs))**.5
self.Vsonic=math.sqrt(self.gams*GasConstant*self._flowS.temperature()/self._flowS.meanMolecularWeight())*3.28084
self.Mach=self.Vflow / self.Vsonic
self.area= self.W / (self.rhos*self.Vflow)*144.
def setStaticArea(self):
target_area = self.area
Ps_guess=self.Pt*(1 + (self.gamt-1)/2)**(self.gamt/(1-self.gamt)) #at mach 1
def f(Ps):
self.Ps = Ps
self.setStaticPs()
return 1-self.Mach
Ps_M1 = secant(f,Ps_guess,x_min=0,x_max=self.Pt)
#find the subsonic solution first
guess = (self.Pt+Ps_M1)/2
def f(Ps):
self.Ps = Ps
self.setStaticPs()
return self.W/(self.rhos*self.Vflow)*144.-target_area
secant(f, guess, x_min=Ps_M1, x_max=self.Pt)
#if you want the supersonic one, just keep going with a little lower initial guess
if self.sub_or_super == "super":
#jsg: wild guess of 1/M_subsonic
mach_guess = 1/self.Mach
Ps_guess=self.Pt*(1 + (self.gamt-1)/2*mach_guess**2)**(self.gamt/(1-self.gamt))
secant(f, Ps_guess, x_min=0, x_max=Ps_M1)
#determine which static calc to use
def setStatic(self):
if (self.Tt and self.Pt): # if non zero
self.Wc = self.W*(self.Tt/518.67)**.5/(self.Pt/14.696)
if self._mach_or_area == 0:
self.Ps = self.Pt
self.Ts = self.Tt
self.rhos = self.rhot
self.gams = self.gamt
self.hs = self.ht
self.Vflow = 0
self.Mach = 0
elif self._mach_or_area == 1:
self.setStaticMach()
elif self._mach_or_area ==2:
self.setStaticArea()
elif self._mach_or_area == 3:
self.setStaticPs()
#set the statics based on Ts, Ps, and MN
#UPDGRAEDE TO USE LOOPS
def setStaticTsPsMN(self, Ts, Ps, MN):
self._trigger=1
self.Tt=Ts*(1+(self.gamt - 1) /2.* MN**2)
self.Pt=Ps*(1+(self.gamt - 1) /2.* MN**2)**(self.gamt /(self.gamt -1))
self.setTotalTP(self.Tt, self.Pt)
#do this once more beacause gamt changed... very crude iteration
self.Tt=Ts*(1+(self.gamt - 1) /2.* MN**2)
self.Pt=Ps*(1+(self.gamt - 1) /2.* MN**2)**(self.gamt /(self.gamt -1))
self.setTotalTP(self.Tt, self.Pt)
self._trigger=1
self.Mach=MN
self.setStaticMach()
self.area= self.W / (self.rhos * self.Vflow)*144.
self._trigger=0
#For right now, all FlowStations are Air/Fuel FlowStations
FlowStation.add_reactant( ['N2', 'O2', 'AR', 'CO2', '', ''],[.755184, .231416, .012916, 0.000485, 0., 0. ] )
FlowStation.add_reactant( ['H2O', '', '', '', '', ''], [1., 0., 0., 0., 0., 0. ] )
FlowStation.add_reactant( ['CH2', 'CH', '', '', '', ''], [.922189, 0.07781, 0., 0., 0., 0. ] )
FlowStation.add_reactant( ['C', 'H', '', '', '', ''], [.86144,.13856, 0., 0., 0., 0. ] )
FlowStation.add_reactant( ['Jet-A(g)', '', '', '', '', ''], [1., 0., 0., 0., 0., 0. ] )
FlowStation.add_reactant( ['H2', '', '', '', '', ''], [1., 0., 0., 0., 0., 0. ] )
#variable class used in components
class FlowStationVar(VarTree):
def __init__(self,*args,**metadata):
super(FlowStationVar,self).__init__(FlowStation(), *args, **metadata)
| {
"repo_name": "kishenr12/pyCycle",
"path": "src/pycycle/flowstation.py",
"copies": "3",
"size": "16038",
"license": "apache-2.0",
"hash": 3962038984062622000,
"line_mean": 36.6478873239,
"line_max": 122,
"alpha_frac": 0.5505674024,
"autogenerated": false,
"ratio": 2.9661549842796373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5016722386679637,
"avg_score": null,
"num_lines": null
} |
__all__ = ["FontBuilder"]
"""
This module is *experimental*, meaning it still may evolve and change.
The `FontBuilder` class is a convenient helper to construct working TTF or
OTF fonts from scratch.
Note that the various setup methods cannot be called in arbitrary order,
due to various interdependencies between OpenType tables. Here is an order
that works:
fb = FontBuilder(...)
fb.setupGlyphOrder(...)
fb.setupCharacterMap(...)
fb.setupGlyf(...) --or-- fb.setupCFF(...)
fb.setupHorizontalMetrics(...)
fb.setupHorizontalHeader()
fb.setupNameTable(...)
fb.setupOS2()
fb.addOpenTypeFeatures(...)
fb.setupPost()
fb.save(...)
Here is how to build a minimal TTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.qCurveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=True)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = TTGlyphPen(None)
drawTestGlyph(pen)
glyph = pen.glyph()
glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph}
fb.setupGlyf(glyphs)
metrics = {}
glyphTable = fb.font["glyf"]
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, glyphTable[gn].xMin)
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=-200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.ttf")
```
And here's how to build a minimal OTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.t2CharStringPen import T2CharStringPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.curveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=False)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = T2CharStringPen(600, None)
drawTestGlyph(pen)
charString = pen.getCharString()
charStrings = {
".notdef": charString,
"space": charString,
"A": charString,
"a": charString,
".null": charString,
}
fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {})
lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()}
metrics = {}
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, lsb[gn])
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.otf")
```
"""
from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
_headDefaults = dict(
tableVersion=1.0,
fontRevision=1.0,
checkSumAdjustment=0,
magicNumber=0x5F0F3CF5,
flags=0x0003,
unitsPerEm=1000,
created=0,
modified=0,
xMin=0,
yMin=0,
xMax=0,
yMax=0,
macStyle=0,
lowestRecPPEM=3,
fontDirectionHint=2,
indexToLocFormat=0,
glyphDataFormat=0,
)
_maxpDefaultsTTF = dict(
tableVersion=0x00010000,
numGlyphs=0,
maxPoints=0,
maxContours=0,
maxCompositePoints=0,
maxCompositeContours=0,
maxZones=2,
maxTwilightPoints=0,
maxStorage=0,
maxFunctionDefs=0,
maxInstructionDefs=0,
maxStackElements=0,
maxSizeOfInstructions=0,
maxComponentElements=0,
maxComponentDepth=0,
)
_maxpDefaultsOTF = dict(
tableVersion=0x00005000,
numGlyphs=0,
)
_postDefaults = dict(
formatType=3.0,
italicAngle=0,
underlinePosition=0,
underlineThickness=0,
isFixedPitch=0,
minMemType42=0,
maxMemType42=0,
minMemType1=0,
maxMemType1=0,
)
_hheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceWidthMax=0,
minLeftSideBearing=0,
minRightSideBearing=0,
xMaxExtent=0,
caretSlopeRise=1,
caretSlopeRun=0,
caretOffset=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
metricDataFormat=0,
numberOfHMetrics=0,
)
_vheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceHeightMax=0,
minTopSideBearing=0,
minBottomSideBearing=0,
yMaxExtent=0,
caretSlopeRise=0,
caretSlopeRun=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
reserved4=0,
metricDataFormat=0,
numberOfVMetrics=0,
)
_nameIDs = dict(
copyright=0,
familyName=1,
styleName=2,
uniqueFontIdentifier=3,
fullName=4,
version=5,
psName=6,
trademark=7,
manufacturer=8,
designer=9,
description=10,
vendorURL=11,
designerURL=12,
licenseDescription=13,
licenseInfoURL=14,
# reserved = 15,
typographicFamily=16,
typographicSubfamily=17,
compatibleFullName=18,
sampleText=19,
postScriptCIDFindfontName=20,
wwsFamilyName=21,
wwsSubfamilyName=22,
lightBackgroundPalette=23,
darkBackgroundPalette=24,
variationsPostScriptNamePrefix=25,
)
# to insert in setupNameTable doc string:
# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1])))
_panoseDefaults = dict(
bFamilyType=0,
bSerifStyle=0,
bWeight=0,
bProportion=0,
bContrast=0,
bStrokeVariation=0,
bArmStyle=0,
bLetterForm=0,
bMidline=0,
bXHeight=0,
)
_OS2Defaults = dict(
version=3,
xAvgCharWidth=0,
usWeightClass=400,
usWidthClass=5,
fsType=0x0004, # default: Preview & Print embedding
ySubscriptXSize=0,
ySubscriptYSize=0,
ySubscriptXOffset=0,
ySubscriptYOffset=0,
ySuperscriptXSize=0,
ySuperscriptYSize=0,
ySuperscriptXOffset=0,
ySuperscriptYOffset=0,
yStrikeoutSize=0,
yStrikeoutPosition=0,
sFamilyClass=0,
panose=_panoseDefaults,
ulUnicodeRange1=0,
ulUnicodeRange2=0,
ulUnicodeRange3=0,
ulUnicodeRange4=0,
achVendID="????",
fsSelection=0,
usFirstCharIndex=0,
usLastCharIndex=0,
sTypoAscender=0,
sTypoDescender=0,
sTypoLineGap=0,
usWinAscent=0,
usWinDescent=0,
ulCodePageRange1=0,
ulCodePageRange2=0,
sxHeight=0,
sCapHeight=0,
usDefaultChar=0, # .notdef
usBreakChar=32, # space
usMaxContext=0,
usLowerOpticalPointSize=0,
usUpperOpticalPointSize=0,
)
class FontBuilder(object):
def __init__(self, unitsPerEm=None, font=None, isTTF=True):
"""Initialize a FontBuilder instance.
If the `font` argument is not given, a new `TTFont` will be
constructed, and `unitsPerEm` must be given. If `isTTF` is True,
the font will be a glyf-based TTF; if `isTTF` is False it will be
a CFF-based OTF.
If `font` is given, it must be a `TTFont` instance and `unitsPerEm`
must _not_ be given. The `isTTF` argument will be ignored.
"""
if font is None:
self.font = TTFont(recalcTimestamp=False)
self.isTTF = isTTF
now = timestampNow()
assert unitsPerEm is not None
self.setupHead(unitsPerEm=unitsPerEm, created=now, modified=now)
self.setupMaxp()
else:
assert unitsPerEm is None
self.font = font
self.isTTF = "glyf" in font
def save(self, file):
"""Save the font. The 'file' argument can be either a pathname or a
writable file object.
"""
self.font.save(file)
def _initTableWithValues(self, tableTag, defaults, values):
table = self.font[tableTag] = newTable(tableTag)
for k, v in defaults.items():
setattr(table, k, v)
for k, v in values.items():
setattr(table, k, v)
return table
def _updateTableWithValues(self, tableTag, values):
table = self.font[tableTag]
for k, v in values.items():
setattr(table, k, v)
def setupHead(self, **values):
"""Create a new `head` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("head", _headDefaults, values)
def updateHead(self, **values):
"""Update the head table with the fields and values passed as
keyword arguments.
"""
self._updateTableWithValues("head", values)
def setupGlyphOrder(self, glyphOrder):
"""Set the glyph order for the font."""
self.font.setGlyphOrder(glyphOrder)
def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False):
"""Build the `cmap` table for the font. The `cmapping` argument should
be a dict mapping unicode code points as integers to glyph names.
The `uvs` argument, when passed, must be a list of tuples, describing
Unicode Variation Sequences. These tuples have three elements:
(unicodeValue, variationSelector, glyphName)
`unicodeValue` and `variationSelector` are integer code points.
`glyphName` may be None, to indicate this is the default variation.
Text processors will then use the cmap to find the glyph name.
Each Unicode Variation Sequence should be an officially supported
sequence, but this is not policed.
"""
subTables = []
highestUnicode = max(cmapping)
if highestUnicode > 0xFFFF:
cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000)
subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10)
subTables.append(subTable_3_10)
else:
cmapping_3_1 = cmapping
format = 4
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
try:
subTable_3_1.compile(self.font)
except struct.error:
# format 4 overflowed, fall back to format 12
if not allowFallback:
raise ValueError(
"cmap format 4 subtable overflowed; sort glyph order by unicode to fix."
)
format = 12
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
subTables.append(subTable_3_1)
subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3)
subTables.append(subTable_0_3)
if uvs is not None:
uvsDict = {}
for unicodeValue, variationSelector, glyphName in uvs:
if cmapping.get(unicodeValue) == glyphName:
# this is a default variation
glyphName = None
if variationSelector not in uvsDict:
uvsDict[variationSelector] = []
uvsDict[variationSelector].append((unicodeValue, glyphName))
uvsSubTable = buildCmapSubTable({}, 14, 0, 5)
uvsSubTable.uvsDict = uvsDict
subTables.append(uvsSubTable)
self.font["cmap"] = newTable("cmap")
self.font["cmap"].tableVersion = 0
self.font["cmap"].tables = subTables
def setupNameTable(self, nameStrings, windows=True, mac=True):
"""Create the `name` table for the font. The `nameStrings` argument must
be a dict, mapping nameIDs or descriptive names for the nameIDs to name
record values. A value is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
By default, both Windows (platformID=3) and Macintosh (platformID=1) name
records are added, unless any of `windows` or `mac` arguments is False.
The following descriptive names are available for nameIDs:
copyright (nameID 0)
familyName (nameID 1)
styleName (nameID 2)
uniqueFontIdentifier (nameID 3)
fullName (nameID 4)
version (nameID 5)
psName (nameID 6)
trademark (nameID 7)
manufacturer (nameID 8)
designer (nameID 9)
description (nameID 10)
vendorURL (nameID 11)
designerURL (nameID 12)
licenseDescription (nameID 13)
licenseInfoURL (nameID 14)
typographicFamily (nameID 16)
typographicSubfamily (nameID 17)
compatibleFullName (nameID 18)
sampleText (nameID 19)
postScriptCIDFindfontName (nameID 20)
wwsFamilyName (nameID 21)
wwsSubfamilyName (nameID 22)
lightBackgroundPalette (nameID 23)
darkBackgroundPalette (nameID 24)
variationsPostScriptNamePrefix (nameID 25)
"""
nameTable = self.font["name"] = newTable("name")
nameTable.names = []
for nameName, nameValue in nameStrings.items():
if isinstance(nameName, int):
nameID = nameName
else:
nameID = _nameIDs[nameName]
if isinstance(nameValue, str):
nameValue = dict(en=nameValue)
nameTable.addMultilingualName(
nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac
)
def setupOS2(self, **values):
"""Create a new `OS/2` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
if "xAvgCharWidth" not in values:
gs = self.font.getGlyphSet()
widths = [
gs[glyphName].width
for glyphName in gs.keys()
if gs[glyphName].width > 0
]
values["xAvgCharWidth"] = int(round(sum(widths) / float(len(widths))))
self._initTableWithValues("OS/2", _OS2Defaults, values)
if not (
"ulUnicodeRange1" in values
or "ulUnicodeRange2" in values
or "ulUnicodeRange3" in values
or "ulUnicodeRange3" in values
):
assert (
"cmap" in self.font
), "the 'cmap' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcUnicodeRanges(self.font)
def setupCFF(self, psName, fontInfo, charStringsDict, privateDict):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 1
fontSet.minor = 0
fontSet.otFont = self.font
fontSet.fontNames = [psName]
fontSet.topDictIndex = TopDictIndex()
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fdSelect = None
fdArray = None
topDict = TopDict()
topDict.charset = self.font.getGlyphOrder()
topDict.Private = private
topDict.GlobalSubrs = fontSet.GlobalSubrs
for key, value in fontInfo.items():
setattr(topDict, key, value)
if "FontMatrix" not in fontInfo:
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
charStrings = CharStrings(
None, topDict.charset, globalSubrs, private, fdSelect, fdArray
)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF "] = newTable("CFF ")
self.font["CFF "].cff = fontSet
def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
FDArrayIndex,
FontDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 2
fontSet.minor = 0
cff2GetGlyphOrder = self.font.getGlyphOrder
fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None)
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
if fdArrayList is None:
fdArrayList = [{}]
fdSelect = None
fdArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = globalSubrs
for privateDict in fdArrayList:
fontDict = FontDict()
fontDict.setCFF2(True)
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fontDict.Private = private
fdArray.append(fontDict)
topDict = TopDict()
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
topDict.FDArray = fdArray
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
private = fdArray[0].Private
charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF2"] = newTable("CFF2")
self.font["CFF2"].cff = fontSet
if regions:
self.setupCFF2Regions(regions)
def setupCFF2Regions(self, regions):
from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore
from .cffLib import VarStoreData
assert "fvar" in self.font, "fvar must to be set up first"
assert "CFF2" in self.font, "CFF2 must to be set up first"
axisTags = [a.axisTag for a in self.font["fvar"].axes]
varRegionList = buildVarRegionList(regions, axisTags)
varData = buildVarData(list(range(len(regions))), None, optimize=False)
varStore = buildVarStore(varRegionList, [varData])
vstore = VarStoreData(otVarStore=varStore)
topDict = self.font["CFF2"].cff.topDictIndex[0]
topDict.VarStore = vstore
for fontDict in topDict.FDArray:
fontDict.Private.vstore = vstore
def setupGlyf(self, glyphs, calcGlyphBounds=True):
"""Create the `glyf` table from a dict, that maps glyph names
to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example
as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`.
If `calcGlyphBounds` is True, the bounds of all glyphs will be
calculated. Only pass False if your glyph objects already have
their bounding box values set.
"""
assert self.isTTF
self.font["loca"] = newTable("loca")
self.font["glyf"] = newTable("glyf")
self.font["glyf"].glyphs = glyphs
if hasattr(self.font, "glyphOrder"):
self.font["glyf"].glyphOrder = self.font.glyphOrder
if calcGlyphBounds:
self.calcGlyphBounds()
def setupFvar(self, axes, instances):
"""Adds an font variations table to the font.
Args:
axes (list): See below.
instances (list): See below.
``axes`` should be a list of axes, with each axis either supplied as
a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the
format ```tupletag, minValue, defaultValue, maxValue, name``.
The ``name`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
```instances`` should be a list of instances, with each instance either
supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a
dict with keys ``location`` (mapping of axis tags to float values),
``stylename`` and (optionally) ``postscriptfontname``.
The ``stylename`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
"""
addFvar(self.font, axes, instances)
def setupAvar(self, axes):
"""Adds an axis variations table to the font.
Args:
axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects.
"""
from .varLib import _add_avar
_add_avar(self.font, OrderedDict(enumerate(axes))) # Only values are used
def setupGvar(self, variations):
gvar = self.font["gvar"] = newTable("gvar")
gvar.version = 1
gvar.reserved = 0
gvar.variations = variations
def calcGlyphBounds(self):
"""Calculate the bounding boxes of all glyphs in the `glyf` table.
This is usually not called explicitly by client code.
"""
glyphTable = self.font["glyf"]
for glyph in glyphTable.glyphs.values():
glyph.recalcBounds(glyphTable)
def setupHorizontalMetrics(self, metrics):
"""Create a new `hmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(width, leftSidebearing)` tuples.
"""
self.setupMetrics("hmtx", metrics)
def setupVerticalMetrics(self, metrics):
"""Create a new `vmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(height, topSidebearing)` tuples.
"""
self.setupMetrics("vmtx", metrics)
def setupMetrics(self, tableTag, metrics):
"""See `setupHorizontalMetrics()` and `setupVerticalMetrics()`."""
assert tableTag in ("hmtx", "vmtx")
mtxTable = self.font[tableTag] = newTable(tableTag)
roundedMetrics = {}
for gn in metrics:
w, lsb = metrics[gn]
roundedMetrics[gn] = int(round(w)), int(round(lsb))
mtxTable.metrics = roundedMetrics
def setupHorizontalHeader(self, **values):
"""Create a new `hhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("hhea", _hheaDefaults, values)
def setupVerticalHeader(self, **values):
"""Create a new `vhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("vhea", _vheaDefaults, values)
def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None):
"""Create a new `VORG` table. The `verticalOrigins` argument must be
a dict, mapping glyph names to vertical origin values.
The `defaultVerticalOrigin` argument should be the most common vertical
origin value. If omitted, this value will be derived from the actual
values in the `verticalOrigins` argument.
"""
if defaultVerticalOrigin is None:
# find the most frequent vorg value
bag = {}
for gn in verticalOrigins:
vorg = verticalOrigins[gn]
if vorg not in bag:
bag[vorg] = 1
else:
bag[vorg] += 1
defaultVerticalOrigin = sorted(
bag, key=lambda vorg: bag[vorg], reverse=True
)[0]
self._initTableWithValues(
"VORG",
{},
dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin),
)
vorgTable = self.font["VORG"]
vorgTable.majorVersion = 1
vorgTable.minorVersion = 0
for gn in verticalOrigins:
vorgTable[gn] = verticalOrigins[gn]
def setupPost(self, keepGlyphNames=True, **values):
"""Create a new `post` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
isCFF2 = "CFF2" in self.font
postTable = self._initTableWithValues("post", _postDefaults, values)
if (self.isTTF or isCFF2) and keepGlyphNames:
postTable.formatType = 2.0
postTable.extraNames = []
postTable.mapping = {}
else:
postTable.formatType = 3.0
def setupMaxp(self):
"""Create a new `maxp` table. This is called implicitly by FontBuilder
itself and is usually not called by client code.
"""
if self.isTTF:
defaults = _maxpDefaultsTTF
else:
defaults = _maxpDefaultsOTF
self._initTableWithValues("maxp", defaults, {})
def setupDummyDSIG(self):
"""This adds an empty DSIG table to the font to make some MS applications
happy. This does not properly sign the font.
"""
values = dict(
ulVersion=1,
usFlag=0,
usNumSigs=0,
signatureRecords=[],
)
self._initTableWithValues("DSIG", {}, values)
def addOpenTypeFeatures(self, features, filename=None, tables=None):
"""Add OpenType features to the font from a string containing
Feature File syntax.
The `filename` argument is used in error messages and to determine
where to look for "include" files.
The optional `tables` argument can be a list of OTL tables tags to
build, allowing the caller to only build selected OTL tables. See
`fontTools.feaLib` for details.
"""
from .feaLib.builder import addOpenTypeFeaturesFromString
addOpenTypeFeaturesFromString(
self.font, features, filename=filename, tables=tables
)
def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"):
"""Add conditional substitutions to a Variable Font.
See `fontTools.varLib.featureVars.addFeatureVariations`.
"""
from .varLib import featureVars
if "fvar" not in self.font:
raise KeyError("'fvar' table is missing; can't add FeatureVariations.")
featureVars.addFeatureVariations(
self.font, conditionalSubstitutions, featureTag=featureTag
)
def setupCOLR(self, colorLayers, version=None, varStore=None):
"""Build new COLR table using color layers dictionary.
Cf. `fontTools.colorLib.builder.buildCOLR`.
"""
from fontTools.colorLib.builder import buildCOLR
glyphMap = self.font.getReverseGlyphMap()
self.font["COLR"] = buildCOLR(
colorLayers, version=version, glyphMap=glyphMap, varStore=varStore
)
def setupCPAL(
self,
palettes,
paletteTypes=None,
paletteLabels=None,
paletteEntryLabels=None,
):
"""Build new CPAL table using list of palettes.
Optionally build CPAL v1 table using paletteTypes, paletteLabels and
paletteEntryLabels.
Cf. `fontTools.colorLib.builder.buildCPAL`.
"""
from fontTools.colorLib.builder import buildCPAL
self.font["CPAL"] = buildCPAL(
palettes,
paletteTypes=paletteTypes,
paletteLabels=paletteLabels,
paletteEntryLabels=paletteEntryLabels,
nameTable=self.font.get("name"),
)
def setupStat(self, axes, locations=None, elidedFallbackName=2):
"""Build a new 'STAT' table.
See `fontTools.otlLib.builder.buildStatTable` for details about
the arguments.
"""
from .otlLib.builder import buildStatTable
buildStatTable(self.font, axes, locations, elidedFallbackName)
def buildCmapSubTable(cmapping, format, platformID, platEncID):
subTable = cmap_classes[format](format)
subTable.cmap = cmapping
subTable.platformID = platformID
subTable.platEncID = platEncID
subTable.language = 0
return subTable
def addFvar(font, axes, instances):
from .ttLib.tables._f_v_a_r import Axis, NamedInstance
assert axes
fvar = newTable("fvar")
nameTable = font["name"]
for axis_def in axes:
axis = Axis()
if isinstance(axis_def, tuple):
(
axis.axisTag,
axis.minValue,
axis.defaultValue,
axis.maxValue,
name,
) = axis_def
else:
(axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = (
axis_def.tag,
axis_def.minimum,
axis_def.default,
axis_def.maximum,
axis_def.name,
)
if isinstance(name, str):
name = dict(en=name)
axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font)
fvar.axes.append(axis)
for instance in instances:
if isinstance(instance, dict):
coordinates = instance["location"]
name = instance["stylename"]
psname = instance.get("postscriptfontname")
else:
coordinates = instance.location
name = instance.localisedStyleName or instance.styleName
psname = instance.postScriptFontName
if isinstance(name, str):
name = dict(en=name)
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font)
if psname is not None:
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = coordinates
fvar.instances.append(inst)
font["fvar"] = fvar
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/fontBuilder.py",
"copies": "5",
"size": "31000",
"license": "apache-2.0",
"hash": -1183972799543810800,
"line_mean": 31.7349524815,
"line_max": 104,
"alpha_frac": 0.623,
"autogenerated": false,
"ratio": 3.7349397590361444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6857939759036145,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Formatter', 'NamespacedFormatter', 'FormatterMixin']
class Formatter(object):
"""\
A formatter is used to format the internal representation of a key or token.
This is useful for Redis and SQL databases, which often need to prefix keys
and columns in order to avoid clashes.
.. admonition:: Subclassing
Subclasses should implement ``format_key(key)`` and
``format_token(token)``.
"""
def format_key(self, key):
"""\
Formats a key.
"""
return key
def format_token(self, token):
"""\
Formats a token.
"""
return token
class FormatterMixin(object):
def format_key(self, key):
return self.formatter.format_key(key)
def format_token(self, token):
return self.formatter.format_token(token)
def format_pair(self, pair):
fkey = self.formatter.format_key(pair.key)
ftoken = self.formatter.format_token(pair.token)
return (fkey, ftoken)
class NamespacedFormatter(object):
"""\
Prefixes keys and tokens with `namespace` string.
:param namespace: a string to prefix to keys and tokens.
"""
separator = ':'
def __init__(self, namespace):
self.ns = namespace
def format_key(self, key):
return '{ns}{s}keys{s}{key}'.format(ns=self.ns,
s=self.separator, key=key)
def format_token(self, token):
return '{ns}{s}tokens{s}{token}'.format(ns=self.ns,
s=self.separator, token=token)
| {
"repo_name": "clibc/shorten",
"path": "shorten/formatter.py",
"copies": "1",
"size": "1489",
"license": "mit",
"hash": 4141860844120873000,
"line_mean": 24.2372881356,
"line_max": 80,
"alpha_frac": 0.6272666219,
"autogenerated": false,
"ratio": 3.7888040712468194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9776810587140397,
"avg_score": 0.027852021201284504,
"num_lines": 59
} |
"""All formatters from this pacakge should be easily mixed whith default ones using this pattern:
>>> from code_formatter.base import formatters
>>> from code_formatter import extras
>>> custom_formatters = formatters.copy()
>>> custom_formatters.register(extras.UnbreakableTupleFormatter,
extras.ListOfExpressionsWithSingleLineContinuationsFormatter)
"""
import ast
from .. import base
from ..code import CodeBlock, CodeLine
from ..exceptions import NotEnoughSpace
__all__ = ['UnbreakableListOfExpressionFormatter', 'LinebreakingListOfExpressionFormatter',
'UnbreakableTupleFormatter', 'LinebreakingAttributeFormatter']
class ListOfExpressionsWithSingleLineContinuationsFormatter(base.ListOfExpressionsFormatter):
multiline_continuation = False
class UnbreakableListOfExpressionFormatter(base.ListOfExpressionsFormatter):
def _format_code(self, width, continuation, suffix, line_width=None):
line_width = line_width or width
return self._format_line_continuation(width, continuation, suffix, line_width)
class LinebreakingListOfExpressionFormatter(base.ListOfExpressionsFormatter):
def _format_code(self, width, continuation, suffix, line_width=None):
return self._format_line_break(width, continuation, suffix, line_width or width)
class UnbreakableTupleFormatter(base.TupleFormatter):
"""Keep tuples in one line - for example:
[('Alternative', 'Alternative'),
('Blues', 'Blues'),
('Classical', 'Classical')]
"""
ListOfExpressionsFormatter = UnbreakableListOfExpressionFormatter
# FIXME: we should refactor this so "fallback" behaviour will be provided
# by generic Formatter aggregator
class CallFormatterWithLinebreakingFallback(base.CallFormatter):
def _format_code(self, width, continuation, suffix):
try:
return super(CallFormatterWithLinebreakingFallback, self)._format_code(width, continuation, suffix)
except NotEnoughSpace:
if not self._arguments_formatters:
raise
suffix = self._append_to_suffix(suffix, ')')
for i in range(width+1):
curr_width = width - i
block = self._func_formatter.format_code(curr_width)
block.append_tokens('(')
try:
subblock = self._arguments_formatter.format_code(width -
len(CodeLine.INDENT),
suffix=suffix)
except NotEnoughSpace:
continue
else:
# FIXME: this is really ugly way to detect last method access subexpression
indent = max(unicode(block.last_line).rfind('.'), 0) + len(CodeLine.INDENT)
if indent + 1 >= block.last_line.width:
continue
block.extend(subblock, indent=indent)
break
return block
class LinebreakingAttributeFormatter(base.AttributeFormatter):
"""This is really expermiental (as it API requires cleanup and it hacks
`ast` structure in many places) formatter.
It handles line breaking on attributes references, and alignes indentation to
first attribute reference in expression. For example this piece:
instance.method().attribute
can be formatted into:
(instance.method()
.attribute)
During registration this formatter replaces `AttributeFormatter` (which is quite obvious) but also
`CallFormatter` and `SubscriptionFormatter` by derived formatters from current formatters - so simple
`formatters.register(LinebreakingAttributeFormatter)` follows below logic:
>>> from ast import Attribute, Call, Subscript
>>> from code_formatter import base, format_code
>>> from code_formatter.extra import LinebreakingAttributeFormatter
>>> formatters = dict(base.formatters,
... **{Call: LinebreakingAttributeFormatter.call_formatter_factory(base.formatters[ast.Call]),
... Attribute: LinebreakingAttributeFormatter,
... Subscript: LinebreakingAttributeFormatter.subscription_formatter_factory(base.formatters[ast.Subscript])})
>>> print format_code('instance.identifier.identifier()',
... formatters_register=formatters, width=3, force=True)
(instance.identifier
.identifier())
"""
class AttrsRefsListFormatter(base.ListOfExpressionsFormatter):
separator = '.'
class _IdentifierFormatter(base.CodeFormatter):
def __init__(self, identifier, formatters_register, parent):
self.identifier = identifier
self.parent = parent
super(LinebreakingAttributeFormatter._IdentifierFormatter,
self).__init__(formatters_register)
def _format_code(self, width, continuation, suffix):
block = CodeBlock.from_tokens(self.identifier)
if suffix is not None:
block.merge(suffix)
return block
@classmethod
def call_formatter_factory(cls, CallFormatter):
class RedirectingCallFormatter(CallFormatter):
def __new__(cls, expr, formatters_register, parent=None, func_formatter=None):
# if func_formatter is not provided check whether we are not part of method call
if func_formatter is None and isinstance(expr.func, ast.Attribute):
return LinebreakingAttributeFormatter(expr, formatters_register, parent)
return super(RedirectingCallFormatter, cls).__new__(cls, expr=expr,
formatters_register=formatters_register,
parent=parent, func_formatter=func_formatter)
def __init__(self, expr, formatters_register, parent=None, func_formatter=None):
super(RedirectingCallFormatter, self).__init__(expr, formatters_register, parent)
if func_formatter:
self._func_formatter = func_formatter
return RedirectingCallFormatter
@classmethod
def subscription_formatter_factory(cls, SubscriptionFormatter):
class RedirectingSubsriptionFormatter(SubscriptionFormatter):
def __new__(cls, expr, formatters_register, parent=None, value_formatter=None):
# if value_formatter is not provided check wether we are not part of attribute ref
if value_formatter is None and isinstance(expr.value, ast.Attribute):
return LinebreakingAttributeFormatter(expr, formatters_register, parent)
return super(RedirectingSubsriptionFormatter, cls).__new__(cls,
expr=expr,
formatters_register=formatters_register,
parent=parent, value_formatter=value_formatter)
def __init__(self, expr, formatters_register, parent=None, value_formatter=None):
super(RedirectingSubsriptionFormatter, self).__init__(expr, formatters_register, parent)
if value_formatter:
self._value_formatter = value_formatter
return RedirectingSubsriptionFormatter
@classmethod
def register(cls, formatters_register):
formatters_register[ast.Attribute] = cls
formatters_register[ast.Subscript] = cls.subscription_formatter_factory(formatters_register[ast.Subscript])
formatters_register[ast.Call] = cls.call_formatter_factory(formatters_register[ast.Call])
return formatters_register
def __init__(self, *args, **kwargs):
super(base.AttributeFormatter, self).__init__(*args, **kwargs)
self._attrs_formatters = []
expr = self.expr
while (isinstance(expr, ast.Attribute) or
isinstance(expr, ast.Call) and
isinstance(expr.func, ast.Attribute) or
isinstance(expr, ast.Subscript) and
isinstance(expr.value, ast.Attribute)):
if isinstance(expr, ast.Attribute):
self._attrs_formatters.insert(0,
LinebreakingAttributeFormatter._IdentifierFormatter(expr.attr,
self.formatters_register,
parent=self))
expr = expr.value
elif isinstance(expr, ast.Call):
# FIXME: how to fix parent?? should we change type of parent to ast type?
func_formatter = LinebreakingAttributeFormatter._IdentifierFormatter(
(expr.func
.attr),
self.formatters_register,
parent=self)
CallFormatter = self.get_formatter_class(expr)
call_formater = CallFormatter(func_formatter=func_formatter, expr=expr,
formatters_register=self.formatters_register, parent=self)
self._attrs_formatters.insert(0, call_formater)
expr = expr.func.value
elif isinstance(expr, ast.Subscript):
# FIXME: how to fix parent?? should we change type of parent to ast type?
value_formatter = LinebreakingAttributeFormatter._IdentifierFormatter(
(expr.value.attr),
self.formatters_register,
parent=self)
SubscriptionFormatter = self.get_formatter_class(expr)
subscription_formatter = SubscriptionFormatter(value_formatter=value_formatter, expr=expr,
formatters_register=self.formatters_register,
parent=self)
self._attrs_formatters.insert(0, subscription_formatter)
expr = expr.value.value
self.value_formatter = self.get_formatter(expr)
def _format_code(self, width, continuation, suffix):
def _format(continuation, prefix=None):
block = CodeBlock.from_tokens(prefix) if prefix else CodeBlock()
for i in range(0, width - block.width + 1):
block.merge(self.value_formatter.format_code(width - block.width - i))
separator = CodeBlock.from_tokens('.')
attr_ref_indent = block.width
block.merge(separator.copy())
try:
block.merge(self._attrs_formatters[0]
.format_code(width - block.last_line.width, False,
suffix=(suffix if len(self._attrs_formatters) == 1
else None)))
for attr_formatter in self._attrs_formatters[1:]:
s = suffix if self._attrs_formatters[-1] == attr_formatter else None
try:
attr_block = attr_formatter.format_code(width - block.last_line.width -
separator.width,
False, suffix=s)
except NotEnoughSpace:
if not continuation:
raise
block.extend(separator, indent=attr_ref_indent)
block.merge(attr_formatter.format_code(width - attr_ref_indent, continuation, suffix=s))
else:
block.merge(separator)
block.merge(attr_block)
except NotEnoughSpace:
block = CodeBlock.from_tokens(prefix) if prefix else CodeBlock()
continue
return block
try:
return _format(continuation)
except NotEnoughSpace:
if continuation:
raise
suffix = self._append_to_suffix(suffix, ')')
return _format(True, '(')
| {
"repo_name": "paluh/code-formatter",
"path": "code_formatter/extras/__init__.py",
"copies": "1",
"size": "12919",
"license": "bsd-3-clause",
"hash": 5538142410787569000,
"line_mean": 51.0927419355,
"line_max": 139,
"alpha_frac": 0.5620404056,
"autogenerated": false,
"ratio": 5.3494824016563145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6411522807256316,
"avg_score": null,
"num_lines": null
} |
__all__ = ('ForwardRefPolicy', 'TypeHintWarning', 'typechecked', 'check_return_type',
'check_argument_types', 'check_type', 'TypeWarning', 'TypeChecker',
'typeguard_ignore')
import collections.abc
import gc
import inspect
import sys
import threading
from collections import OrderedDict
from enum import Enum
from functools import wraps, partial
from inspect import Parameter, isclass, isfunction, isgeneratorfunction
from io import TextIOBase, RawIOBase, IOBase, BufferedIOBase
from traceback import extract_stack, print_stack
from types import CodeType, FunctionType
from typing import (
Callable, Any, Union, Dict, List, TypeVar, Tuple, Set, Sequence, get_type_hints, TextIO,
Optional, IO, BinaryIO, Type, Generator, overload, Iterable, AsyncIterable, Iterator,
AsyncIterator, AbstractSet, TYPE_CHECKING)
from unittest.mock import Mock
from warnings import warn
from weakref import WeakKeyDictionary, WeakValueDictionary
# Python 3.8+
try:
from typing_extensions import Literal
except ImportError:
try:
from typing import Literal
except ImportError:
Literal = None
# Python 3.5.4+ / 3.6.2+
try:
from typing_extensions import NoReturn
except ImportError:
try:
from typing import NoReturn
except ImportError:
NoReturn = None
# Python 3.6+
try:
from inspect import isasyncgenfunction, isasyncgen
from typing import AsyncGenerator
except ImportError:
AsyncGenerator = None
def isasyncgen(obj):
return False
def isasyncgenfunction(func):
return False
# Python 3.8+
try:
from typing import ForwardRef
evaluate_forwardref = ForwardRef._evaluate
except ImportError:
from typing import _ForwardRef as ForwardRef
evaluate_forwardref = ForwardRef._eval_type
if TYPE_CHECKING:
_F = TypeVar("_F")
def typeguard_ignore(f: _F) -> _F:
"""This decorator is a noop during static type-checking."""
return f
else:
from typing import no_type_check as typeguard_ignore
_type_hints_map = WeakKeyDictionary() # type: Dict[FunctionType, Dict[str, Any]]
_functions_map = WeakValueDictionary() # type: Dict[CodeType, FunctionType]
_missing = object()
T_CallableOrType = TypeVar('T_CallableOrType', bound=Callable[..., Any])
class ForwardRefPolicy(Enum):
"""Defines how unresolved forward references are handled."""
ERROR = 1 #: propagate the :exc:`NameError` from :func:`~typing.get_type_hints`
WARN = 2 #: remove the annotation and emit a TypeHintWarning
#: replace the annotation with the argument's class if the qualified name matches, else remove
#: the annotation
GUESS = 3
class TypeHintWarning(UserWarning):
"""
A warning that is emitted when a type hint in string form could not be resolved to an actual
type.
"""
class _TypeCheckMemo:
__slots__ = 'globals', 'locals', 'typevars'
def __init__(self, globals: Dict[str, Any], locals: Dict[str, Any]):
self.globals = globals
self.locals = locals
self.typevars = {} # type: Dict[Any, type]
def _strip_annotation(annotation):
if isinstance(annotation, str):
return annotation.strip("'")
else:
return annotation
class _CallMemo(_TypeCheckMemo):
__slots__ = 'func', 'func_name', 'arguments', 'is_generator', 'type_hints'
def __init__(self, func: Callable, frame_locals: Optional[Dict[str, Any]] = None,
args: tuple = None, kwargs: Dict[str, Any] = None,
forward_refs_policy=ForwardRefPolicy.ERROR):
super().__init__(func.__globals__, frame_locals)
self.func = func
self.func_name = function_name(func)
self.is_generator = isgeneratorfunction(func)
signature = inspect.signature(func)
if args is not None and kwargs is not None:
self.arguments = signature.bind(*args, **kwargs).arguments
else:
assert frame_locals is not None, 'frame must be specified if args or kwargs is None'
self.arguments = frame_locals
self.type_hints = _type_hints_map.get(func)
if self.type_hints is None:
while True:
if sys.version_info < (3, 5, 3):
frame_locals = dict(frame_locals)
try:
hints = get_type_hints(func, localns=frame_locals)
except NameError as exc:
if forward_refs_policy is ForwardRefPolicy.ERROR:
raise
typename = str(exc).split("'", 2)[1]
for param in signature.parameters.values():
if _strip_annotation(param.annotation) == typename:
break
else:
raise
func_name = function_name(func)
if forward_refs_policy is ForwardRefPolicy.GUESS:
if param.name in self.arguments:
argtype = self.arguments[param.name].__class__
stripped = _strip_annotation(param.annotation)
if stripped == argtype.__qualname__:
func.__annotations__[param.name] = argtype
msg = ('Replaced forward declaration {!r} in {} with {!r}'
.format(stripped, func_name, argtype))
warn(TypeHintWarning(msg))
continue
msg = 'Could not resolve type hint {!r} on {}: {}'.format(
param.annotation, function_name(func), exc)
warn(TypeHintWarning(msg))
del func.__annotations__[param.name]
else:
break
self.type_hints = OrderedDict()
for name, parameter in signature.parameters.items():
if name in hints:
annotated_type = hints[name]
# PEP 428 discourages it by MyPy does not complain
if parameter.default is None:
annotated_type = Optional[annotated_type]
if parameter.kind == Parameter.VAR_POSITIONAL:
self.type_hints[name] = Tuple[annotated_type, ...]
elif parameter.kind == Parameter.VAR_KEYWORD:
self.type_hints[name] = Dict[str, annotated_type]
else:
self.type_hints[name] = annotated_type
if 'return' in hints:
self.type_hints['return'] = hints['return']
_type_hints_map[func] = self.type_hints
def resolve_forwardref(maybe_ref, memo: _TypeCheckMemo):
if isinstance(maybe_ref, ForwardRef):
if sys.version_info < (3, 9, 0):
return evaluate_forwardref(maybe_ref, memo.globals, memo.locals)
else:
return evaluate_forwardref(maybe_ref, memo.globals, memo.locals, frozenset())
else:
return maybe_ref
def get_type_name(type_):
# typing.* types don't have a __name__ on Python 3.7+
return getattr(type_, '__name__', None) or getattr(type_, '_name', None) or str(type_)
def find_function(frame) -> Optional[Callable]:
"""
Return a function object from the garbage collector that matches the frame's code object.
This process is unreliable as several function objects could use the same code object.
Fortunately the likelihood of this happening with the combination of the function objects
having different type annotations is a very rare occurrence.
:param frame: a frame object
:return: a function object if one was found, ``None`` if not
"""
func = _functions_map.get(frame.f_code)
if func is None:
for obj in gc.get_referrers(frame.f_code):
if inspect.isfunction(obj):
if func is None:
# The first match was found
func = obj
else:
# A second match was found
return None
# Cache the result for future lookups
if func is not None:
_functions_map[frame.f_code] = func
else:
raise LookupError('target function not found')
return func
def qualified_name(obj) -> str:
"""
Return the qualified name (e.g. package.module.Type) for the given object.
Builtins and types from the :mod:`typing` package get special treatment by having the module
name stripped from the generated name.
"""
type_ = obj if inspect.isclass(obj) else type(obj)
module = type_.__module__
qualname = type_.__qualname__
return qualname if module in ('typing', 'builtins') else '{}.{}'.format(module, qualname)
def function_name(func: Callable) -> str:
"""
Return the qualified name of the given function.
Builtins and types from the :mod:`typing` package get special treatment by having the module
name stripped from the generated name.
"""
# For partial functions and objects with __call__ defined, __qualname__ does not exist
module = func.__module__
qualname = getattr(func, '__qualname__', repr(func))
return qualname if module == 'builtins' else '{}.{}'.format(module, qualname)
def check_callable(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not callable(value):
raise TypeError('{} must be a callable'.format(argname))
if getattr(expected_type, "__args__", None):
try:
signature = inspect.signature(value)
except (TypeError, ValueError):
return
if hasattr(expected_type, '__result__'):
# Python 3.5
argument_types = expected_type.__args__
check_args = argument_types is not Ellipsis
else:
# Python 3.6
argument_types = expected_type.__args__[:-1]
check_args = argument_types != (Ellipsis,)
if check_args:
# The callable must not have keyword-only arguments without defaults
unfulfilled_kwonlyargs = [
param.name for param in signature.parameters.values() if
param.kind == Parameter.KEYWORD_ONLY and param.default == Parameter.empty]
if unfulfilled_kwonlyargs:
raise TypeError(
'callable passed as {} has mandatory keyword-only arguments in its '
'declaration: {}'.format(argname, ', '.join(unfulfilled_kwonlyargs)))
num_mandatory_args = len([
param.name for param in signature.parameters.values()
if param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD) and
param.default is Parameter.empty])
has_varargs = any(param for param in signature.parameters.values()
if param.kind == Parameter.VAR_POSITIONAL)
if num_mandatory_args > len(argument_types):
raise TypeError(
'callable passed as {} has too many arguments in its declaration; expected {} '
'but {} argument(s) declared'.format(argname, len(argument_types),
num_mandatory_args))
elif not has_varargs and num_mandatory_args < len(argument_types):
raise TypeError(
'callable passed as {} has too few arguments in its declaration; expected {} '
'but {} argument(s) declared'.format(argname, len(argument_types),
num_mandatory_args))
def check_dict(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, dict):
raise TypeError('type of {} must be a dict; got {} instead'.
format(argname, qualified_name(value)))
if expected_type is not dict:
if (hasattr(expected_type, "__args__") and
expected_type.__args__ not in (None, expected_type.__parameters__)):
key_type, value_type = expected_type.__args__
if key_type is not Any or value_type is not Any:
for k, v in value.items():
check_type('keys of {}'.format(argname), k, key_type, memo)
check_type('{}[{!r}]'.format(argname, k), v, value_type, memo)
def check_typed_dict(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
declared_keys = frozenset(expected_type.__annotations__)
if hasattr(expected_type, '__required_keys__'):
required_keys = expected_type.__required_keys__
else: # py3.8 and lower
required_keys = declared_keys if expected_type.__total__ else frozenset()
existing_keys = frozenset(value)
extra_keys = existing_keys - declared_keys
if extra_keys:
keys_formatted = ', '.join('"{}"'.format(key) for key in sorted(extra_keys))
raise TypeError('extra key(s) ({}) in {}'.format(keys_formatted, argname))
missing_keys = required_keys - existing_keys
if missing_keys:
keys_formatted = ', '.join('"{}"'.format(key) for key in sorted(missing_keys))
raise TypeError('required key(s) ({}) missing from {}'.format(keys_formatted, argname))
for key, argtype in get_type_hints(expected_type).items():
argvalue = value.get(key, _missing)
if argvalue is not _missing:
check_type('dict item "{}" for {}'.format(key, argname), argvalue, argtype, memo)
def check_list(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, list):
raise TypeError('type of {} must be a list; got {} instead'.
format(argname, qualified_name(value)))
if expected_type is not list:
if hasattr(expected_type, "__args__") and expected_type.__args__ not in \
(None, expected_type.__parameters__):
value_type = expected_type.__args__[0]
if value_type is not Any:
for i, v in enumerate(value):
check_type('{}[{}]'.format(argname, i), v, value_type, memo)
def check_sequence(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, collections.abc.Sequence):
raise TypeError('type of {} must be a sequence; got {} instead'.
format(argname, qualified_name(value)))
if hasattr(expected_type, "__args__") and expected_type.__args__ not in \
(None, expected_type.__parameters__):
value_type = expected_type.__args__[0]
if value_type is not Any:
for i, v in enumerate(value):
check_type('{}[{}]'.format(argname, i), v, value_type, memo)
def check_set(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isinstance(value, AbstractSet):
raise TypeError('type of {} must be a set; got {} instead'.
format(argname, qualified_name(value)))
if expected_type is not set:
if hasattr(expected_type, "__args__") and expected_type.__args__ not in \
(None, expected_type.__parameters__):
value_type = expected_type.__args__[0]
if value_type is not Any:
for v in value:
check_type('elements of {}'.format(argname), v, value_type, memo)
def check_tuple(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
# Specialized check for NamedTuples
is_named_tuple = False
if sys.version_info < (3, 8, 0):
is_named_tuple = hasattr(expected_type, '_field_types') # deprecated since python 3.8
else:
is_named_tuple = hasattr(expected_type, '__annotations__')
if is_named_tuple:
if not isinstance(value, expected_type):
raise TypeError('type of {} must be a named tuple of type {}; got {} instead'.
format(argname, qualified_name(expected_type), qualified_name(value)))
if sys.version_info < (3, 8, 0):
field_types = expected_type._field_types
else:
field_types = expected_type.__annotations__
for name, field_type in field_types.items():
check_type('{}.{}'.format(argname, name), getattr(value, name), field_type, memo)
return
elif not isinstance(value, tuple):
raise TypeError('type of {} must be a tuple; got {} instead'.
format(argname, qualified_name(value)))
if getattr(expected_type, '__tuple_params__', None):
# Python 3.5
use_ellipsis = expected_type.__tuple_use_ellipsis__
tuple_params = expected_type.__tuple_params__
elif getattr(expected_type, '__args__', None):
# Python 3.6+
use_ellipsis = expected_type.__args__[-1] is Ellipsis
tuple_params = expected_type.__args__[:-1 if use_ellipsis else None]
else:
# Unparametrized Tuple or plain tuple
return
if use_ellipsis:
element_type = tuple_params[0]
for i, element in enumerate(value):
check_type('{}[{}]'.format(argname, i), element, element_type, memo)
elif tuple_params == ((),):
if value != ():
raise TypeError('{} is not an empty tuple but one was expected'.format(argname))
else:
if len(value) != len(tuple_params):
raise TypeError('{} has wrong number of elements (expected {}, got {} instead)'
.format(argname, len(tuple_params), len(value)))
for i, (element, element_type) in enumerate(zip(value, tuple_params)):
check_type('{}[{}]'.format(argname, i), element, element_type, memo)
def check_union(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if hasattr(expected_type, '__union_params__'):
# Python 3.5
union_params = expected_type.__union_params__
else:
# Python 3.6+
union_params = expected_type.__args__
for type_ in union_params:
try:
check_type(argname, value, type_, memo)
return
except TypeError:
pass
typelist = ', '.join(get_type_name(t) for t in union_params)
raise TypeError('type of {} must be one of ({}); got {} instead'.
format(argname, typelist, qualified_name(value)))
def check_class(argname: str, value, expected_type, memo: _TypeCheckMemo) -> None:
if not isclass(value):
raise TypeError('type of {} must be a type; got {} instead'.format(
argname, qualified_name(value)))
# Needed on Python 3.7+
if expected_type is Type:
return
expected_class = None
if hasattr(expected_type, "__args__") and expected_type.__args__:
expected_class = expected_type.__args__[0]
if expected_class:
if expected_class is Any:
return
elif isinstance(expected_class, TypeVar):
check_typevar(argname, value, expected_class, memo, True)
elif not issubclass(value, expected_class):
raise TypeError('{} must be a subclass of {}; got {} instead'.format(
argname, qualified_name(expected_class), qualified_name(value)))
def check_typevar(argname: str, value, typevar: TypeVar, memo: _TypeCheckMemo,
subclass_check: bool = False) -> None:
bound_type = resolve_forwardref(memo.typevars.get(typevar, typevar.__bound__), memo)
value_type = value if subclass_check else type(value)
subject = argname if subclass_check else 'type of ' + argname
if bound_type is None:
# The type variable hasn't been bound yet -- check that the given value matches the
# constraints of the type variable, if any
if typevar.__constraints__:
constraints = [resolve_forwardref(c, memo) for c in typevar.__constraints__]
if value_type not in constraints:
typelist = ', '.join(get_type_name(t) for t in constraints if t is not object)
raise TypeError('{} must be one of ({}); got {} instead'.
format(subject, typelist, qualified_name(value_type)))
elif typevar.__covariant__ or typevar.__bound__:
if not issubclass(value_type, bound_type):
raise TypeError(
'{} must be {} or one of its subclasses; got {} instead'.
format(subject, qualified_name(bound_type), qualified_name(value_type)))
elif typevar.__contravariant__:
if not issubclass(bound_type, value_type):
raise TypeError(
'{} must be {} or one of its superclasses; got {} instead'.
format(subject, qualified_name(bound_type), qualified_name(value_type)))
else: # invariant
if value_type is not bound_type:
raise TypeError(
'{} must be exactly {}; got {} instead'.
format(subject, qualified_name(bound_type), qualified_name(value_type)))
if typevar not in memo.typevars:
# Bind the type variable to a concrete type
memo.typevars[typevar] = value_type
def check_literal(argname: str, value, expected_type, memo: _TypeCheckMemo):
def get_args(literal):
try:
args = literal.__args__
except AttributeError:
# Instance of Literal from typing_extensions
args = literal.__values__
retval = []
for arg in args:
if isinstance(arg, Literal.__class__) or getattr(arg, '__origin__', None) is Literal:
# The first check works on py3.6 and lower, the second one on py3.7+
retval.extend(get_args(arg))
elif isinstance(arg, (int, str, bytes, bool, type(None), Enum)):
retval.append(arg)
else:
raise TypeError('Illegal literal value: {}'.format(arg))
return retval
final_args = tuple(get_args(expected_type))
if value not in final_args:
raise TypeError('the value of {} must be one of {}; got {} instead'.
format(argname, final_args, value))
def check_number(argname: str, value, expected_type):
if expected_type is complex and not isinstance(value, (complex, float, int)):
raise TypeError('type of {} must be either complex, float or int; got {} instead'.
format(argname, qualified_name(value.__class__)))
elif expected_type is float and not isinstance(value, (float, int)):
raise TypeError('type of {} must be either float or int; got {} instead'.
format(argname, qualified_name(value.__class__)))
def check_io(argname: str, value, expected_type):
if expected_type is TextIO:
if not isinstance(value, TextIOBase):
raise TypeError('type of {} must be a text based I/O object; got {} instead'.
format(argname, qualified_name(value.__class__)))
elif expected_type is BinaryIO:
if not isinstance(value, (RawIOBase, BufferedIOBase)):
raise TypeError('type of {} must be a binary I/O object; got {} instead'.
format(argname, qualified_name(value.__class__)))
elif not isinstance(value, IOBase):
raise TypeError('type of {} must be an I/O object; got {} instead'.
format(argname, qualified_name(value.__class__)))
def check_protocol(argname: str, value, expected_type):
# TODO: implement proper compatibility checking and support non-runtime protocols
if getattr(expected_type, '_is_runtime_protocol', False):
if not isinstance(value, expected_type):
raise TypeError('type of {} ({}) is not compatible with the {} protocol'.
format(argname, type(value).__qualname__, expected_type.__qualname__))
# Equality checks are applied to these
origin_type_checkers = {
AbstractSet: check_set,
Callable: check_callable,
collections.abc.Callable: check_callable,
dict: check_dict,
Dict: check_dict,
list: check_list,
List: check_list,
Sequence: check_sequence,
collections.abc.Sequence: check_sequence,
collections.abc.Set: check_set,
set: check_set,
Set: check_set,
tuple: check_tuple,
Tuple: check_tuple,
type: check_class,
Type: check_class,
Union: check_union
}
_subclass_check_unions = hasattr(Union, '__union_set_params__')
if Literal is not None:
origin_type_checkers[Literal] = check_literal
generator_origin_types = (Generator, collections.abc.Generator,
Iterator, collections.abc.Iterator,
Iterable, collections.abc.Iterable)
asyncgen_origin_types = (AsyncIterator, collections.abc.AsyncIterator,
AsyncIterable, collections.abc.AsyncIterable)
if AsyncGenerator is not None:
asyncgen_origin_types += (AsyncGenerator,)
if hasattr(collections.abc, 'AsyncGenerator'):
asyncgen_origin_types += (collections.abc.AsyncGenerator,)
def check_type(argname: str, value, expected_type, memo: Optional[_TypeCheckMemo] = None, *,
globals: Optional[Dict[str, Any]] = None,
locals: Optional[Dict[str, Any]] = None) -> None:
"""
Ensure that ``value`` matches ``expected_type``.
The types from the :mod:`typing` module do not support :func:`isinstance` or :func:`issubclass`
so a number of type specific checks are required. This function knows which checker to call
for which type.
:param argname: name of the argument to check; used for error messages
:param value: value to be checked against ``expected_type``
:param expected_type: a class or generic type instance
:param globals: dictionary of global variables to use for resolving forward references
(defaults to the calling frame's globals)
:param locals: dictionary of local variables to use for resolving forward references
(defaults to the calling frame's locals)
"""
if expected_type is Any or isinstance(value, Mock):
return
if expected_type is None:
# Only happens on < 3.6
expected_type = type(None)
if memo is None:
frame = sys._getframe(1)
if globals is None:
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
memo = _TypeCheckMemo(globals, locals)
expected_type = resolve_forwardref(expected_type, memo)
origin_type = getattr(expected_type, '__origin__', None)
if origin_type is not None:
checker_func = origin_type_checkers.get(origin_type)
if checker_func:
checker_func(argname, value, expected_type, memo)
else:
check_type(argname, value, origin_type, memo)
elif isclass(expected_type):
if issubclass(expected_type, Tuple):
check_tuple(argname, value, expected_type, memo)
elif issubclass(expected_type, (float, complex)):
check_number(argname, value, expected_type)
elif _subclass_check_unions and issubclass(expected_type, Union):
check_union(argname, value, expected_type, memo)
elif isinstance(expected_type, TypeVar):
check_typevar(argname, value, expected_type, memo)
elif issubclass(expected_type, IO):
check_io(argname, value, expected_type)
elif issubclass(expected_type, dict) and hasattr(expected_type, '__annotations__'):
check_typed_dict(argname, value, expected_type, memo)
elif getattr(expected_type, '_is_protocol', False):
check_protocol(argname, value, expected_type)
else:
expected_type = (getattr(expected_type, '__extra__', None) or origin_type or
expected_type)
if expected_type is bytes:
# As per https://github.com/python/typing/issues/552
if not isinstance(value, (bytearray, bytes, memoryview)):
raise TypeError('type of {} must be bytes-like; got {} instead'
.format(argname, qualified_name(value)))
elif not isinstance(value, expected_type):
raise TypeError(
'type of {} must be {}; got {} instead'.
format(argname, qualified_name(expected_type), qualified_name(value)))
elif isinstance(expected_type, TypeVar):
# Only happens on < 3.6
check_typevar(argname, value, expected_type, memo)
elif isinstance(expected_type, Literal.__class__):
# Only happens on < 3.7 when using Literal from typing_extensions
check_literal(argname, value, expected_type, memo)
elif (isfunction(expected_type) and
getattr(expected_type, "__module__", None) == "typing" and
getattr(expected_type, "__qualname__", None).startswith("NewType.") and
hasattr(expected_type, "__supertype__")):
# typing.NewType, should check against supertype (recursively)
return check_type(argname, value, expected_type.__supertype__, memo)
def check_return_type(retval, memo: Optional[_CallMemo] = None) -> bool:
"""
Check that the return value is compatible with the return value annotation in the function.
:param retval: the value about to be returned from the call
:return: ``True``
:raises TypeError: if there is a type mismatch
"""
if memo is None:
# faster than inspect.currentframe(), but not officially
# supported in all python implementations
frame = sys._getframe(1)
try:
func = find_function(frame)
except LookupError:
return True # This can happen with the Pydev/PyCharm debugger extension installed
memo = _CallMemo(func, frame.f_locals)
if 'return' in memo.type_hints:
if memo.type_hints['return'] is NoReturn:
raise TypeError('{}() was declared never to return but it did'.format(memo.func_name))
try:
check_type('the return value', retval, memo.type_hints['return'], memo)
except TypeError as exc: # suppress unnecessarily long tracebacks
raise TypeError(*exc.args) from None
return True
def check_argument_types(memo: Optional[_CallMemo] = None) -> bool:
"""
Check that the argument values match the annotated types.
Unless both ``args`` and ``kwargs`` are provided, the information will be retrieved from
the previous stack frame (ie. from the function that called this).
:return: ``True``
:raises TypeError: if there is an argument type mismatch
"""
if memo is None:
# faster than inspect.currentframe(), but not officially
# supported in all python implementations
frame = sys._getframe(1)
try:
func = find_function(frame)
except LookupError:
return True # This can happen with the Pydev/PyCharm debugger extension installed
memo = _CallMemo(func, frame.f_locals)
for argname, expected_type in memo.type_hints.items():
if argname != 'return' and argname in memo.arguments:
value = memo.arguments[argname]
description = 'argument "{}"'.format(argname)
try:
check_type(description, value, expected_type, memo)
except TypeError as exc: # suppress unnecessarily long tracebacks
raise TypeError(*exc.args) from None
return True
class TypeCheckedGenerator:
def __init__(self, wrapped: Generator, memo: _CallMemo):
rtype_args = []
if hasattr(memo.type_hints['return'], "__args__"):
rtype_args = memo.type_hints['return'].__args__
self.__wrapped = wrapped
self.__memo = memo
self.__yield_type = rtype_args[0] if rtype_args else Any
self.__send_type = rtype_args[1] if len(rtype_args) > 1 else Any
self.__return_type = rtype_args[2] if len(rtype_args) > 2 else Any
self.__initialized = False
def __iter__(self):
return self
def __next__(self):
return self.send(None)
def __getattr__(self, name: str) -> Any:
return getattr(self.__wrapped, name)
def throw(self, *args):
return self.__wrapped.throw(*args)
def close(self):
self.__wrapped.close()
def send(self, obj):
if self.__initialized:
check_type('value sent to generator', obj, self.__send_type, memo=self.__memo)
else:
self.__initialized = True
try:
value = self.__wrapped.send(obj)
except StopIteration as exc:
check_type('return value', exc.value, self.__return_type, memo=self.__memo)
raise
check_type('value yielded from generator', value, self.__yield_type, memo=self.__memo)
return value
class TypeCheckedAsyncGenerator:
def __init__(self, wrapped: AsyncGenerator, memo: _CallMemo):
rtype_args = memo.type_hints['return'].__args__
self.__wrapped = wrapped
self.__memo = memo
self.__yield_type = rtype_args[0]
self.__send_type = rtype_args[1] if len(rtype_args) > 1 else Any
self.__initialized = False
async def __aiter__(self):
return self
def __anext__(self):
return self.asend(None)
def __getattr__(self, name: str) -> Any:
return getattr(self.__wrapped, name)
def athrow(self, *args):
return self.__wrapped.athrow(*args)
def aclose(self):
return self.__wrapped.aclose()
async def asend(self, obj):
if self.__initialized:
check_type('value sent to generator', obj, self.__send_type, memo=self.__memo)
else:
self.__initialized = True
value = await self.__wrapped.asend(obj)
check_type('value yielded from generator', value, self.__yield_type, memo=self.__memo)
return value
@overload
def typechecked(*, always: bool = False) -> Callable[[T_CallableOrType], T_CallableOrType]:
...
@overload
def typechecked(func: T_CallableOrType, *, always: bool = False) -> T_CallableOrType:
...
def typechecked(func=None, *, always=False, _localns: Optional[Dict[str, Any]] = None):
"""
Perform runtime type checking on the arguments that are passed to the wrapped function.
The return value is also checked against the return annotation if any.
If the ``__debug__`` global variable is set to ``False``, no wrapping and therefore no type
checking is done, unless ``always`` is ``True``.
This can also be used as a class decorator. This will wrap all type annotated methods,
including ``@classmethod``, ``@staticmethod``, and ``@property`` decorated methods,
in the class with the ``@typechecked`` decorator.
:param func: the function or class to enable type checking for
:param always: ``True`` to enable type checks even in optimized mode
"""
if func is None:
return partial(typechecked, always=always, _localns=_localns)
if not __debug__ and not always: # pragma: no cover
return func
if isclass(func):
prefix = func.__qualname__ + '.'
for key, attr in func.__dict__.items():
if inspect.isfunction(attr) or inspect.ismethod(attr) or inspect.isclass(attr):
if attr.__qualname__.startswith(prefix) and getattr(attr, '__annotations__', None):
setattr(func, key, typechecked(attr, always=always, _localns=func.__dict__))
elif isinstance(attr, (classmethod, staticmethod)):
if getattr(attr.__func__, '__annotations__', None):
wrapped = typechecked(attr.__func__, always=always, _localns=func.__dict__)
setattr(func, key, type(attr)(wrapped))
elif isinstance(attr, property):
kwargs = dict(doc=attr.__doc__)
for name in ("fset", "fget", "fdel"):
property_func = getattr(attr, name)
if property_func is None:
continue
kwargs[name] = typechecked(
property_func, always=always, _localns=func.__dict__
)
setattr(func, key, property(**kwargs))
return func
# Find the frame in which the function was declared, for resolving forward references later
if _localns is None:
_localns = sys._getframe(1).f_locals
# Find either the first Python wrapper or the actual function
python_func = inspect.unwrap(func, stop=lambda f: hasattr(f, '__code__'))
if not getattr(func, '__annotations__', None):
warn('no type annotations present -- not typechecking {}'.format(function_name(func)))
return func
def wrapper(*args, **kwargs):
memo = _CallMemo(python_func, _localns, args=args, kwargs=kwargs)
check_argument_types(memo)
retval = func(*args, **kwargs)
try:
check_return_type(retval, memo)
except TypeError as exc:
raise TypeError(*exc.args) from None
# If a generator is returned, wrap it if its yield/send/return types can be checked
if inspect.isgenerator(retval) or isasyncgen(retval):
return_type = memo.type_hints.get('return')
if return_type:
origin = getattr(return_type, '__origin__', None)
if origin in generator_origin_types:
return TypeCheckedGenerator(retval, memo)
elif origin is not None and origin in asyncgen_origin_types:
return TypeCheckedAsyncGenerator(retval, memo)
return retval
async def async_wrapper(*args, **kwargs):
memo = _CallMemo(python_func, _localns, args=args, kwargs=kwargs)
check_argument_types(memo)
retval = await func(*args, **kwargs)
check_return_type(retval, memo)
return retval
if inspect.iscoroutinefunction(func):
if python_func.__code__ is not async_wrapper.__code__:
return wraps(func)(async_wrapper)
else:
if python_func.__code__ is not wrapper.__code__:
return wraps(func)(wrapper)
# the target callable was already wrapped
return func
class TypeWarning(UserWarning):
"""
A warning that is emitted when a type check fails.
:ivar str event: ``call`` or ``return``
:ivar Callable func: the function in which the violation occurred (the called function if event
is ``call``, or the function where a value of the wrong type was returned from if event is
``return``)
:ivar str error: the error message contained by the caught :class:`TypeError`
:ivar frame: the frame in which the violation occurred
"""
__slots__ = ('func', 'event', 'message', 'frame')
def __init__(self, memo: Optional[_CallMemo], event: str, frame,
exception: Union[str, TypeError]): # pragma: no cover
self.func = memo.func
self.event = event
self.error = str(exception)
self.frame = frame
if self.event == 'call':
caller_frame = self.frame.f_back
event = 'call to {}() from {}:{}'.format(
function_name(self.func), caller_frame.f_code.co_filename, caller_frame.f_lineno)
else:
event = 'return from {}() at {}:{}'.format(
function_name(self.func), self.frame.f_code.co_filename, self.frame.f_lineno)
super().__init__('[{thread_name}] {event}: {self.error}'.format(
thread_name=threading.current_thread().name, event=event, self=self))
@property
def stack(self):
"""Return the stack where the last frame is from the target function."""
return extract_stack(self.frame)
def print_stack(self, file: TextIO = None, limit: int = None) -> None:
"""
Print the traceback from the stack frame where the target function was run.
:param file: an open file to print to (prints to stdout if omitted)
:param limit: the maximum number of stack frames to print
"""
print_stack(self.frame, limit, file)
class TypeChecker:
"""
A type checker that collects type violations by hooking into :func:`sys.setprofile`.
:param packages: list of top level modules and packages or modules to include for type checking
:param all_threads: ``True`` to check types in all threads created while the checker is
running, ``False`` to only check in the current one
:param forward_refs_policy: how to handle unresolvable forward references in annotations
.. deprecated:: 2.6
Use :func:`~.importhook.install_import_hook` instead. This class will be removed in v3.0.
"""
def __init__(self, packages: Union[str, Sequence[str]], *, all_threads: bool = True,
forward_refs_policy: ForwardRefPolicy = ForwardRefPolicy.ERROR):
assert check_argument_types()
warn('TypeChecker has been deprecated and will be removed in v3.0. '
'Use install_import_hook() or the pytest plugin instead.', DeprecationWarning)
self.all_threads = all_threads
self.annotation_policy = forward_refs_policy
self._call_memos = {} # type: Dict[Any, _CallMemo]
self._previous_profiler = None
self._previous_thread_profiler = None
self._active = False
if isinstance(packages, str):
self._packages = (packages,)
else:
self._packages = tuple(packages)
@property
def active(self) -> bool:
"""Return ``True`` if currently collecting type violations."""
return self._active
def should_check_type(self, func: Callable) -> bool:
if not func.__annotations__:
# No point in checking if there are no type hints
return False
elif isasyncgenfunction(func):
# Async generators cannot be supported because the return arg is of an opaque builtin
# type (async_generator_wrapped_value)
return False
else:
# Check types if the module matches any of the package prefixes
return any(func.__module__ == package or func.__module__.startswith(package + '.')
for package in self._packages)
def start(self):
if self._active:
raise RuntimeError('type checker already running')
self._active = True
# Install this instance as the current profiler
self._previous_profiler = sys.getprofile()
sys.setprofile(self)
# If requested, set this instance as the default profiler for all future threads
# (does not affect existing threads)
if self.all_threads:
self._previous_thread_profiler = threading._profile_hook
threading.setprofile(self)
def stop(self):
if self._active:
if sys.getprofile() is self:
sys.setprofile(self._previous_profiler)
else: # pragma: no cover
warn('the system profiling hook has changed unexpectedly')
if self.all_threads:
if threading._profile_hook is self:
threading.setprofile(self._previous_thread_profiler)
else: # pragma: no cover
warn('the threading profiling hook has changed unexpectedly')
self._active = False
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def __call__(self, frame, event: str, arg) -> None: # pragma: no cover
if not self._active:
# This happens if all_threads was enabled and a thread was created when the checker was
# running but was then stopped. The thread's profiler callback can't be reset any other
# way but this.
sys.setprofile(self._previous_thread_profiler)
return
# If an actual profiler is running, don't include the type checking times in its results
if event == 'call':
try:
func = find_function(frame)
except Exception:
func = None
if func is not None and self.should_check_type(func):
memo = self._call_memos[frame] = _CallMemo(
func, frame.f_locals, forward_refs_policy=self.annotation_policy)
if memo.is_generator:
return_type_hint = memo.type_hints['return']
if return_type_hint is not None:
origin = getattr(return_type_hint, '__origin__', None)
if origin in generator_origin_types:
# Check the types of the yielded values
memo.type_hints['return'] = return_type_hint.__args__[0]
else:
try:
check_argument_types(memo)
except TypeError as exc:
warn(TypeWarning(memo, event, frame, exc))
if self._previous_profiler is not None:
self._previous_profiler(frame, event, arg)
elif event == 'return':
if self._previous_profiler is not None:
self._previous_profiler(frame, event, arg)
if arg is None:
# a None return value might mean an exception is being raised but we have no way of
# checking
return
memo = self._call_memos.get(frame)
if memo is not None:
try:
if memo.is_generator:
check_type('yielded value', arg, memo.type_hints['return'], memo)
else:
check_return_type(arg, memo)
except TypeError as exc:
warn(TypeWarning(memo, event, frame, exc))
if not memo.is_generator:
del self._call_memos[frame]
elif self._previous_profiler is not None:
self._previous_profiler(frame, event, arg)
| {
"repo_name": "glenngillen/dotfiles",
"path": ".vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/typeguard/__init__.py",
"copies": "1",
"size": "46364",
"license": "mit",
"hash": 1142207847428083500,
"line_mean": 39.3867595819,
"line_max": 99,
"alpha_frac": 0.5997541196,
"autogenerated": false,
"ratio": 4.2830484988452655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013572772793528096,
"num_lines": 1148
} |
"""All foxx related methods."""
from .action import DatabaseAction
class Foxx:
"""A generic foxx function executor."""
def __init__(self, database):
"""Initialise database and its services."""
self.database = database
self.services = []
self.mounts = {}
def service(self, mount):
"""Return a service so that only route after the mount.
Parameters
----------
mount : str
mount point.
Returns
-------
FoxxService
A mounted service
"""
if mount not in self.mounts:
self.reload()
if mount not in self.mounts:
raise ValueError("Unable to find the mount: '%s'", mount)
return FoxxService(self.database, mount)
def get_available_services(self):
response = self.database.action.get('/_api/foxx', params={'excludeSystem': False})
response.raise_for_status()
return response.json()
def reload(self):
self.services = self.get_available_services()
self.mounts = {service['mount'] for service in self.services}
class FoxxService(DatabaseAction):
"""A foxx mount function executor."""
def __init__(self, database, mount):
"""Initialise mount and database."""
self.database = database
self.mount = mount
@property
def end_point_url(self):
"""End point url for foxx service."""
return '%s/_db/%s%s' % (
self.database.connection.getEndpointURL(), self.database.name,
self.mount
)
| {
"repo_name": "tariqdaouda/pyArango",
"path": "pyArango/foxx.py",
"copies": "1",
"size": "1593",
"license": "apache-2.0",
"hash": -8721384319168592000,
"line_mean": 26,
"line_max": 90,
"alpha_frac": 0.5756434401,
"autogenerated": false,
"ratio": 4.412742382271468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5488385822371469,
"avg_score": null,
"num_lines": null
} |
__all__ = ["FrameTracker"]
from devtools_event_listener import DevToolsEventListener
from status import *
import json
# Tracks execution context creation.
class FrameTracker(DevToolsEventListener):
def __init__(self, client):
DevToolsEventListener.__init__(self)
self.frame_to_context_map = {}
client.AddListener(self)
# return status and context_id<int>
def GetContextIdForFrame(self, frame_id):
context_id = self.frame_to_context_map.get(frame_id, None)
if type(context_id) != int:
return (Status(kNoSuchFrame, "context_id does not have frame"), -1)
return (Status(kOk), context_id)
# Overridden from DevToolsEventListener:
def OnConnected(self, client):
self.frame_to_context_map.clear()
params = {}
status = client.SendCommand("Runtime.enable", params)
if status.IsError():
return status
return client.SendCommand("Page.enable", params)
def OnEvent(self, client, method, params):
if method == "Runtime.executionContextCreated":
context = params.get("context", None)
if type(context) != dict:
return Status(kUnknownError, "Runtime.executionContextCreated missing dict 'context'")
context_id = context.get("id")
frame_id = context.get("frameId")
if type(context_id) != int or type(frame_id) != str:
js = json.dumps(context)
return Status(kUnknownError, "Runtime.executionContextCreated has invalid 'context': " + js)
self.frame_to_context_map[frame_id] = context_id
elif method == "Page.frameNavigated":
if not params["frame"].has_key("parentId"):
self.frame_to_context_map.clear()
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/frame_tracker.py",
"copies": "1",
"size": "1666",
"license": "bsd-3-clause",
"hash": -4042786569207896600,
"line_mean": 35.2173913043,
"line_max": 100,
"alpha_frac": 0.6836734694,
"autogenerated": false,
"ratio": 3.6455142231947484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48291876925947486,
"avg_score": null,
"num_lines": null
} |
__all__ = ['fread_csv', 'read_csv', 'fwrite_csv', 'write_csv']
import csv
import collections
import autos.constants as constants
def iterlists(fp, **kwargs):
yield from csv.reader(fp, **kwargs)
def itertuples(fp, **kwargs):
'''Iterate over CSV rows and yields namedtuple for each row.
Fieldnames need to be valid Python identifier except for names starting with underscore.
Read more: https://docs.python.org/3/library/collections.html#collections.namedtuple
:type fp: file object
:param fp: File object that points to a CSV file.
:type kwargs: keyword arguments
:param kwargs: extra arguments to be passed to csv.reader.
:rtype: iterator
:returns: Namedtuple rows.
'''
reader = iterlists(fp, **kwargs)
header = next(reader)
Row = collections.namedtuple('Row', header)
for row in reader:
yield Row(*row)
def iterdicts(fp, **kwargs):
yield from csv.DictReader(fp, **kwargs)
def fread_csv(fp, as_namedtuple=False, as_dict=False, **kwargs):
delimiter = kwargs.pop('delimiter', constants.DEFAULT_DELIMITER)
if as_namedtuple:
rows = itertuples(fp, delimiter=delimiter, **kwargs)
elif as_dict:
rows = iterdicts(fp, delimiter=delimiter, **kwargs)
else:
rows = iterlists(fp, delimiter=delimiter, **kwargs)
yield from rows
def read_csv(path, as_namedtuple=False, as_dict=False, **kwargs):
encoding = kwargs.pop('encoding', constants.DEFAULT_ENCODING)
newline = kwargs.pop('newline', constants.DEFAULT_NEWLINE)
fp = open(path, encoding=encoding, newline=newline)
yield from fread_csv(
fp,
as_namedtuple=as_namedtuple,
as_dict=as_dict,
**kwargs
)
def fwrite_csv(fp, rows, from_dict=False, header=True, **kwargs):
'''Write rows to file-object.
:type fp: file object
:param fp: File object that points to a CSV file.
:type rows: iterable
:param rows: Iterable of iterables or dicts.
:type from_dict: bool
:param from_dict: If from_dict is True, use csv.DictWriter.
:type header: bool
:param header: If header is true, write header. Only applicable if from_dict is true.
:type kwargs: keyword arguments
:param kwargs: extra arguments to be passed to csv.writer() or csv.DictWriter().
'''
delimiter = kwargs.pop('delimiter', constants.DEFAULT_DELIMITER)
with fp:
if from_dict:
writer = csv.DictWriter(fp, delimiter=delimiter, **kwargs)
if header:
writer.writeheader()
else:
writer = csv.writer(fp, delimiter=delimiter, **kwargs)
writer.writerows(rows)
def write_csv(path, rows, from_dict=False, header=True, **kwargs):
'''Write rows to path.
:type path: str
:param path: Destination CSV file path.
:type kwargs: keyword arguments
:param kwargs: extra arguments to be passed to open() and csv.writer().
'''
encoding = kwargs.pop('encoding', constants.DEFAULT_ENCODING)
newline = kwargs.pop('newline', constants.DEFAULT_NEWLINE)
fp = open(path, mode='w', encoding=encoding, newline=newline)
fwrite_csv(fp, rows, from_dict=from_dict, header=header, **kwargs)
| {
"repo_name": "hans-t/autos",
"path": "autos/utils/csv.py",
"copies": "1",
"size": "3211",
"license": "mit",
"hash": -7683370219736241000,
"line_mean": 29.2924528302,
"line_max": 92,
"alpha_frac": 0.6645904703,
"autogenerated": false,
"ratio": 3.791027154663518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4955617624963518,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Fridge']
__version__ = '0.2'
import json
import errno
class Fridge(dict):
"""
Fridge is a subclass of :class:`dict` and thus fully conforms to its interface.
Fridge keeps an open file until it's closed, so you have to call :meth:`close`
when you're done using it.
Fridge implements :meth:`__enter__` and :meth:`__exit__` so you can use
`with` statement.
:param path: a path to a file that will be used to load and save the data
:param file: a file object that will be used to load and save the data.
This file object in not closed by fridge automatically.
:param dump_args: dictionary of arguments that are passed to :func:`json.dump`.
:param load_args: dictionary of arguments that are passed to :func:`json.load`.
`path` and `file` arguments are mutually exclusive.
"""
default_args = {}
@classmethod
def readonly(cls, *args, **kwargs):
"""
Return an already closed read-only instance of Fridge.
Arguments are the same as for the constructor.
"""
fridge = cls(*args, **kwargs)
fridge.close()
return fridge
@classmethod
def _getdefault(cls):
default = cls.default_args
path = default.get('path')
file = default.get('file')
return path, file
def __new__(cls, path=None, file=None, *args, **kwargs):
if path is None and file is None:
path, file = cls._getdefault()
if path is None and file is None:
raise ValueError('No path or file specified')
elif path is not None and file is not None:
raise ValueError('Only path or only file can be passed')
fridge = super(Fridge, cls).__new__(cls)
return fridge
def __init__(self, path=None, file=None, dump_args=None, load_args=None):
if path is None and file is None:
path, file = self._getdefault()
self.dump_args = dump_args or {}
self.load_args = load_args or {}
# so that __del__ doesn't try to close the file if its opening fails
self.closed = True
if file is not None:
self.file = file
self.close_file = False
else:
try:
self.file = open(path, 'r+')
except IOError as e:
if e.errno == errno.ENOENT:
self.file=open(path, 'w+')
else:
raise
self.close_file = True
#: True after :meth:`close` is called, False otherwise.
self.closed = False
self.load()
def _check_open(self):
if self.closed:
raise ValueError('Operation on a closed fridge object')
def load(self):
"""
Force reloading the data from the file.
All data in the in-memory dictionary is discarded.
This method is called automatically by the constructor, normally you
don't need to call it.
"""
self._check_open()
try:
data = json.load(self.file, **self.load_args)
except ValueError:
data = {}
if not isinstance(data, dict):
raise ValueError('Root JSON type must be dictionary')
self.clear()
self.update(data)
def save(self):
"""
Force saving the dictionary to the file.
All data in the file is discarded.
This method is called automatically by :meth:`close`.
"""
self._check_open()
self.file.truncate(0)
self.file.seek(0)
json.dump(self, self.file, **self.dump_args)
def close(self):
"""
Close the fridge.
Calls :meth:`save` and closes the underlying file object unless
an already open file was passed to the constructor.
This method has no effect if the object is already closed.
After the fridge is closed :meth:`save` and :meth:`load` will raise an exception
but you will still be able to use it as an ordinary dictionary.
"""
if not self.closed:
self.save()
if self.close_file:
self.file.close()
self.closed = True
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
return False
def __del__(self):
self.close()
| {
"repo_name": "swarmer/fridge",
"path": "fridge.py",
"copies": "1",
"size": "4372",
"license": "mit",
"hash": -7045466559556542000,
"line_mean": 29.7887323944,
"line_max": 88,
"alpha_frac": 0.5725068618,
"autogenerated": false,
"ratio": 4.236434108527132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001973767811874864,
"num_lines": 142
} |
__all__ = ["from_databroker"]
from ._data import Data
def from_databroker(run, dataset="primary"):
"""Import a dataset from a bluesky run into the WrightTools Data format.
Parameters
----------
run: BlueskyRun
The bluesky run as returned by e.g. catalog["<uid>"]
dataset: str
The string identifier of the stream to import from the bluesky run.
By default "primary" is used, but e.g. "baseline" is also common
"""
describe = run.describe()
md = describe["metadata"]
start = md["start"]
ds = run[dataset].read()
shape = start.get("shape", (len(ds.time),))
detectors = start.get("detectors", [])
data = Data(name=start["uid"])
for var in ds:
if var == "uid":
continue
if var.endswith("_busy"):
continue
if any(var.startswith(d) for d in detectors):
data.create_channel(var, values=ds[var].data.reshape(shape))
else:
# TODO units, once they are in the dataset metadata
data.create_variable(var, values=ds[var].data.reshape(shape))
transform = [x[0] for x, ds_name in start["hints"]["dimensions"] if ds_name == dataset]
data.transform(*transform)
return data
| {
"repo_name": "wright-group/WrightTools",
"path": "WrightTools/data/_databroker.py",
"copies": "1",
"size": "1245",
"license": "mit",
"hash": 3515474857955063300,
"line_mean": 30.9230769231,
"line_max": 91,
"alpha_frac": 0.6024096386,
"autogenerated": false,
"ratio": 3.8190184049079754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49214280435079755,
"avg_score": null,
"num_lines": null
} |
__all__ = ['fromMaybe', 'Maybe', 'Just', 'Nothing']
def fromMaybe( default, m ):
if( m.isJust( ) ):
return m.get( )
else:
return default
class Maybe:
def __init__( self, m ):
self.isjust = m.isJust( )
self.value = fromMaybe( 0, m )
def get( self ):
return self.value
def isJust( self ):
return self.isjust
def isNothing( self ):
return not self.isjust
def __repr__( self ):
return 'Nothing' if self.isNothing( ) else 'Just( %s )' % repr( self.get( ) )
def __str__( self ):
return '' if self.isNothing( ) else repr( self.get( ) )
def __unicode__( self ):
return u'' if self.isNothing( ) else unicode( self.get( ) )
def __nonzero__( self ):
return False if self.isNothing( ) else bool( self.get( ) )
def __call__( self, *args, **kwargs ):
return Nothing( ) if self.isNothing( ) \
else Just( self.get( )( *args, **kwargs ) )
def __getattr__( self, name ):
try:
return Nothing( ) if self.isNothing( ) \
else Just( getattr( self.get( ), name ) )
except:
return Nothing( )
def __getitem__( self, key_or_slice ):
try:
return Nothing( ) if self.isNothing( ) \
else Just( self.get( ).__getitem__( key_or_slice ) )
except:
return Nothing( )
def __rshift__( self, fn ):
return Nothing( ) if self.isNothing( ) \
else Maybe( fn( self.get( ) ) )
def __or__( self, other ):
return other if self.isNothing( ) \
else self.get( )
class Just( Maybe ):
def __init__( self, value ):
self.isjust = True
self.value = value
class Nothing( Maybe ):
def __init__( self ):
self.isjust = False
self.value = 0
| {
"repo_name": "DweebsUnited/CodeMonkey",
"path": "QualityBreeze/Maybe.py",
"copies": "1",
"size": "1859",
"license": "bsd-3-clause",
"hash": -3116613851039125000,
"line_mean": 25.1830985915,
"line_max": 85,
"alpha_frac": 0.5072619688,
"autogenerated": false,
"ratio": 3.6379647749510764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46452267437510764,
"avg_score": null,
"num_lines": null
} |
__all__ = ['fsvd']
import numpy as np
def fsvd(A, k, i, usePowerMethod=False):
"""
FSVD Fast Singular Value Decomposition
[U,S,V] = FSVD(A,k,i,usePowerMethod) computes the truncated singular
value decomposition of the input matrix A up to rank k using i levels of
Krylov method as given in [1], p. 3.
If usePowerMethod is given as true, then only exponent i is used (i.e.
as power method). See [2] p.9, Randomized PCA algorithm for details.
[1] Halko, N., Martinsson, P. G., Shkolnisky, Y., & Tygert, M. (2010).
An algorithm for the principal component analysis of large data sets.
Arxiv preprint arXiv:1007.5510, 0526. Retrieved April 1, 2011, from
http://arxiv.org/abs/1007.5510.
[2] Halko, N., Martinsson, P. G., & Tropp, J. A. (2009). Finding
structure with randomness: Probabilistic algorithms for constructing
approximate matrix decompositions. Arxiv preprint arXiv:0909.4061.
Retrieved April 1, 2011, from http://arxiv.org/abs/0909.4061.
Copyright 2011 Ismail Ari, http://ismailari.com.
Args:
A (float): Matrix to be decomposed
k (int): maximum rank of the matrix
i (int): number of levels of the Krylov method
usePowerMethod (bool, optional): Description
Returns:
float: U, S, V -> standard output of the SVD method
"""
if (usePowerMethod == False):
i = 1;
s = A.shape
# Take (conjugate) transpose if necessary. It makes H smaller thus
# leading the computations to be faster
isTransposed = False
if (s[0] < s[1]):
A = A.T
isTransposed = True
n = A.shape[1]
l = k + 2
# Form a real nxl matrix G whose entries are iid Gaussian r.v.s of zero
# mean and unit variance
G = np.random.randn(n,l)
if (usePowerMethod):
# Use only the given exponent
H = np.dot(A,G)
for j in range(2,i+1):
H = np.dot(A, np.dot(A.T,H))
else:
# Compute the mxl matrices H^{(0)}, ..., H^{(i)}
# Note that this is done implicitly in each iteration below.
H = []
H = np.append(A*G)
for j in range(1,i):
H = np.append(np.dot(A, np.dot(A.T, H[j-1])))
H = np.concatenate(H)
## Using the pivoted QR-decomposiion, form a real mx((i+1)l) matrix Q
## whose columns are orthonormal, s.t. there exists a real
## ((i+1)l)x((i+1)l) matrix R for which H = QR.
[Q, R] = np.linalg.qr(H)
#pdb.set_trace()
## Compute the nx((i+1)l) product matrix T = A^T Q
T = np.dot(A.T,Q)
## Form an SVD of T
Vt, St, W = np.linalg.svd(T)
## Compute the mx((i+1)l) product matrix
Ut = np.dot(Q,W)
## Retrieve the leftmost mxk block U of Ut, the leftmost nxk block V of
## Vt, and the leftmost uppermost kxk block S of St. The product U S V^T
## then approxiamtes A.
if (isTransposed):
V = Ut[:,0:k-1];
U = Vt[:,0:k-1];
else:
U = Ut[:,0:k-1]
V = Vt[:,0:k-1]
S = St[0:k-1]
return U, S, V
| {
"repo_name": "aasensio/pyiacsun",
"path": "pyiacsun/linalg/fsvd.py",
"copies": "1",
"size": "3055",
"license": "mit",
"hash": 1943865829431899100,
"line_mean": 29.2475247525,
"line_max": 76,
"alpha_frac": 0.5934533552,
"autogenerated": false,
"ratio": 3.0039331366764994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4097386491876499,
"avg_score": null,
"num_lines": null
} |
__all__ = ['fsvd']
import numpy as np
def fsvd(A, k, i, usePowerMethod=False):
#function [U,S,V] = fsvd(A, k, i, usePowerMethod)
# FSVD Fast Singular Value Decomposition
#
# [U,S,V] = FSVD(A,k,i,usePowerMethod) computes the truncated singular
# value decomposition of the input matrix A upto rank k using i levels of
# Krylov method as given in [1], p. 3.
#
# If usePowerMethod is given as true, then only exponent i is used (i.e.
# as power method). See [2] p.9, Randomized PCA algorithm for details.
#
# [1] Halko, N., Martinsson, P. G., Shkolnisky, Y., & Tygert, M. (2010).
# An algorithm for the principal component analysis of large data sets.
# Arxiv preprint arXiv:1007.5510, 0526. Retrieved April 1, 2011, from
# http://arxiv.org/abs/1007.5510.
#
# [2] Halko, N., Martinsson, P. G., & Tropp, J. A. (2009). Finding
# structure with randomness: Probabilistic algorithms for constructing
# approximate matrix decompositions. Arxiv preprint arXiv:0909.4061.
# Retrieved April 1, 2011, from http://arxiv.org/abs/0909.4061.
#
# See also SVD.
#
# Copyright 2011 Ismail Ari, http://ismailari.com.
if (usePowerMethod == False):
i = 1;
s = A.shape
# Take (conjugate) transpose if necessary. It makes H smaller thus
# leading the computations to be faster
isTransposed = False
if (s[0] < s[1]):
A = A.T
isTransposed = True
n = A.shape[1]
l = k + 2
# Form a real nxl matrix G whose entries are iid Gaussian r.v.s of zero
# mean and unit variance
G = np.random.randn(n,l)
if (usePowerMethod):
# Use only the given exponent
H = np.dot(A,G)
for j in range(2,i+1):
H = np.dot(A, np.dot(A.T,H))
else:
# Compute the mxl matrices H^{(0)}, ..., H^{(i)}
# Note that this is done implicitly in each iteration below.
H = []
H = np.append(A*G)
for j in range(1,i):
H = np.append(np.dot(A, np.dot(A.T, H[j-1])))
H = np.concatenate(H)
## Using the pivoted QR-decomposiion, form a real mx((i+1)l) matrix Q
## whose columns are orthonormal, s.t. there exists a real
## ((i+1)l)x((i+1)l) matrix R for which H = QR.
[Q, R] = np.linalg.qr(H)
#pdb.set_trace()
## Compute the nx((i+1)l) product matrix T = A^T Q
T = np.dot(A.T,Q)
## Form an SVD of T
Vt, St, W = np.linalg.svd(T)
## Compute the mx((i+1)l) product matrix
Ut = np.dot(Q,W)
## Retrieve the leftmost mxk block U of Ut, the leftmost nxk block V of
## Vt, and the leftmost uppermost kxk block S of St. The product U S V^T
## then approxiamtes A.
if (isTransposed):
V = Ut[:,0:k-1];
U = Vt[:,0:k-1];
else:
U = Ut[:,0:k-1]
V = Vt[:,0:k-1]
S = St[0:k-1]
return U, S, V
#A = np.array([[1,2,3,4],[3,5,6,7],[2,8,3,1]])
#A = np.random.randn(2000,2000)
#start = time.clock()
#U2, S2, V2 = np.linalg.svd(A)
#print (time.clock() - start)
#start = time.clock()
#U, S, V = fsvd(A, 9, 3, usePowerMethod=True)
#print (time.clock() - start)
| {
"repo_name": "aasensio/pyAndres",
"path": "fsvd.py",
"copies": "1",
"size": "2897",
"license": "mit",
"hash": 4012535678350259700,
"line_mean": 26.5904761905,
"line_max": 75,
"alpha_frac": 0.6344494304,
"autogenerated": false,
"ratio": 2.4447257383966243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7980883621288448,
"avg_score": 0.11965830950163539,
"num_lines": 105
} |
"""All fuel shares of the base year for the
different technologies are defined in this file.
"""
from energy_demand.initalisations import helpers
def assign_by_fuel_tech_p(
enduses,
sectors,
fueltypes,
fueltypes_nr
):
"""Assigning fuel share per enduse for different technologies
for the base year.
Arguments
----------
enduses : dict
Enduses
sectors : dict
Sectors per submodel
fueltypes : dict
Fueltypes lookup
fueltypes_nr : int
Number of fueltypes
Returns
-------
fuel_tech_p_by : dict
Residential fuel share percentages
Note
----
- In an enduse, either all fueltypes with assigned fuelsneed to be
assigned with technologies or none. No mixing possible
- Technologies can be defined for the following fueltypes:
'solid_fuel': 0,
'gas': 1,
'electricity': 2,
'oil': 3,
'biomass': 4,
'hydrogen': 5,
'heat': 6
- Not defined fueltypes will be assigned placholder technologies
"""
fuel_tech_p_by = {}
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['residential'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['service'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['industry'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
# ====================
# Residential Submodel
# ====================
# ---------------
# rs_lighting
# Calculated on the basis of ECUK Table 3.08
# ---------------
fuel_tech_p_by['rs_lighting'][fueltypes['electricity']] = {
'standard_lighting_bulb': 0.04,
'halogen': 0.56,
'fluorescent_strip_lighting': 0.07,
'energy_saving_lighting_bulb': 0.32,
'LED': 0.01}
# ---------------
# rs_cold
# Calculated on the basis of ECUK Table 3.08
# ---------------
fuel_tech_p_by['rs_cold'][fueltypes['electricity']] = {
'chest_freezer': 0.087,
'fridge_freezer': 0.588,
'refrigerator': 0.143,
'upright_freezer': 0.182}
# ---------------
# rs_cooking
# Calculated on the basis of ECUK Table 3.08
# Calculated on the assumption that 5 to 10%
# of all households have induction hobs (https://productspy.co.uk/are-induction-hobs-safe/ (5-10%))
# ---------------
fuel_tech_p_by['rs_cooking'][fueltypes['electricity']] = {
'hob_electricity': 0.95,
'hob_induction_electricity': 0.05}
fuel_tech_p_by['rs_cooking'][fueltypes['gas']] = {
'hob_gas': 1.0}
fuel_tech_p_by['rs_cooking'][fueltypes['hydrogen']] = {
'hob_hydrogen': 1.0}
fuel_tech_p_by['rs_cooking'][fueltypes['biomass']] = {
'hob_biomass': 1.0}
# ---------------
# rs_wet
# calculated on the basis of EUCK Table 3.08
# ---------------
fuel_tech_p_by['rs_wet'][fueltypes['electricity']] = {
'washing_machine': 0.305,
'washer_dryer': 0.157,
'dishwasher': 0.220,
'tumble_dryer': 0.318}
# ---------------
# rs_space_heating
#
# According to the DCLG (2014) English Housing Survey. Energy Report. doi: 10.1017/CBO9781107415324.004.
# Annex Table 3.1, the following number of electric heating technologies can be found in the UK:
#
# storage heaters 5.5 % of all houses
# electric room heaters 2.0 % of all houses
# electric central heating 0.65 % of all houses
#
# As heat pumps were not accounted for, they are taken from OFGEM (2015),
# which states that there are about 0.1m heat pumps of about in total 27m
# households in the UK. This corresponds to about 0.4 %. (see also Hannon 2015).
# According to Hannon (2015), heat pumps account only for a tiny fraction of the UK.
# heat supply for buildings (approximately 0.2%). This percentage is substract from
# the storage heaters.
#
# storage heaters 5.1 % of all houses --> ~ 62% (100.0 / 8.15) * 5.1
# secondary_heater_electricity
# electric room heaters 2.0 % of all houses --> ~ 25% (100.0 / 8.15) * 2.0
# electric central heating 0.65 % of all houses --> ~ 8% (100.0 / 8.15) * 0.65
# heat pumps 0.4 % of all houses --> ~ 0.5% (100.0 / 8.15) * 0.4
#
# OFGEM (2015); Insights paper on households with electric and other non-gas heating,
# (December), 1–84.
#
# Hannon, M. J. (2015). Raising the temperature of the UK heat pump market:
# Learning lessons from Finland. Energy Policy, 85, 369–375.
# https://doi.org/10.1016/j.enpol.2015.06.016
# ---------------
fuel_tech_p_by['rs_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['rs_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
# ---
# According to table 3.19, 59.7% (43.5% + 14.3%) have some form of condensing boiler.
# Todays share of district heating is about 2% of UK non-industraiyl demand
# http://fes.nationalgrid.com/media/1215/160712-national-grid-dh-summary-report.pdf
# ---
fuel_tech_p_by['rs_space_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['rs_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['rs_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['rs_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'district_heating_fuel_cell': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0}
# -------------
# Residential water heating
# -------------
fuel_tech_p_by['rs_water_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['rs_water_heating'][fueltypes['electricity']] = {
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['rs_water_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['hydrogen']] = {
'boiler_hydrogen': 1.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['oil']] = {
'boiler_oil': 1.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
# ===================
# Service subModel
# ===================
# ss_lighting Simplified based on Table 5.09 (Office lighting)
fuel_tech_p_by['ss_lighting'][fueltypes['electricity']] = {
'halogen': 0.45,
'fluorescent_strip_lighting': 0.07,
'energy_saving_lighting_bulb': 0.47, #All different lighting next to halogen are summarised here ("non-halogen lighting")
'LED': 0.01}
# ----------------
# Service space heating (ss_space_heating)
# For ss_space heating the load profile is the same for all technologies
# ----------------
fuel_tech_p_by['ss_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['ss_space_heating'][fueltypes['gas']] = {
'district_heating_CHP_gas': 0.02,
'boiler_condensing_gas': 0.6,
'boiler_gas': 0.38}
fuel_tech_p_by['ss_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'secondary_heater_electricity': 0.95,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['ss_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
fuel_tech_p_by['ss_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0}
fuel_tech_p_by['ss_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0,
'district_heating_fuel_cell': 0.0}
# -------------
# Service water heating
# -------------
fuel_tech_p_by['ss_water_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['ss_water_heating'][fueltypes['electricity']] = {
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['ss_water_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['hydrogen']] = {
'boiler_hydrogen': 1.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['oil']] = {
'boiler_oil': 1.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
# ------------------------------
# Cooling
# ECUK Table 5.09
# ------------------------------
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['electricity']] = {
'central_air_conditioner_electricity': 0.64,
'decentral_air_conditioner_electricity': 0.36}
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['gas']] = {
'central_air_conditioner_gas': 0.64,
'decentral_air_conditioner_gas': 0.36}
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['oil']] = {
'central_air_conditioner_oil': 0.64,
'decentral_air_conditioner_oil': 0.36}
# Helper: Transfer all defined shares for every enduse to every sector
fuel_tech_p_by = helpers.copy_fractions_all_sectors(
fuel_tech_p_by,
sectors['service'],
affected_enduses=enduses['service'])
# ===================
# Industry subModel - Fuel shares of technologies in enduse
# ===================
# ----------------
# Industrial space heating (is_space_heating)
# ----------------
fuel_tech_p_by['is_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['is_space_heating'][fueltypes['gas']] = {
'district_heating_CHP_gas': 0.02,
'boiler_condensing_gas': 0.6,
'boiler_gas': 0.38}
fuel_tech_p_by['is_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'secondary_heater_electricity': 0.95,
'heat_pumps_electricity': 0.05,
'storage_heater_electricity': 0}
fuel_tech_p_by['is_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
fuel_tech_p_by['is_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0}
fuel_tech_p_by['is_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0,
'district_heating_fuel_cell': 0.0}
# Helper: Transfer all defined shares for every enduse to every sector
fuel_tech_p_by = helpers.copy_fractions_all_sectors(
fuel_tech_p_by,
sectors=sectors['industry'],
affected_enduses=enduses['industry'])
# ----------------
# Industrial High temporal processes (is_high_temp_process)
# ----------------
# Todays share is about: 17% electric furnace, 82% basic oxygen (Key Statistics 2016, appea, EnergyQuest)
#-- basic_metals (sector)
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['solid_fuel']] = {
'basic_oxygen_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['electricity']] = {
'electric_arc_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['gas']] = {
'SNG_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['biomass']] = {
'biomass_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['hydrogen']] = {
'hydrogen_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['oil']] = {
'oil_furnace': 1.0}
#-- non_metallic_mineral_products
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['solid_fuel']] = {
'dry_kiln_coal': 0.9,
'wet_kiln_coal': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['oil']] = {
'dry_kiln_oil': 0.9,
'wet_kiln_oil': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['gas']] = {
'dry_kiln_gas': 0.9,
'wet_kiln_gas': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['electricity']] = {
'dry_kiln_electricity': 0.9,
'wet_kiln_electricity': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['biomass']] = {
'dry_kiln_biomass': 1.0}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['hydrogen']] = {
'dry_kiln_hydrogen': 1.0}
return dict(fuel_tech_p_by)
| {
"repo_name": "nismod/energy_demand",
"path": "energy_demand/assumptions/fuel_shares.py",
"copies": "1",
"size": "13596",
"license": "mit",
"hash": -7506671172259957000,
"line_mean": 36.2383561644,
"line_max": 129,
"alpha_frac": 0.5760741613,
"autogenerated": false,
"ratio": 2.8381708081019004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39142449694019,
"avg_score": null,
"num_lines": null
} |
''' All functions acting on the hits of one DUT are listed here'''
from __future__ import division
import logging
import os.path
import re
import tables as tb
import numpy as np
from scipy.ndimage import median_filter
from pixel_clusterizer.clusterizer import HitClusterizer
from testbeam_analysis.tools import smc
from testbeam_analysis.tools import analysis_utils, plot_utils
from testbeam_analysis.tools.plot_utils import plot_masked_pixels, plot_cluster_size
def check_file(input_hits_file, n_pixel, output_check_file=None,
event_range=1, plot=True, chunk_size=1000000):
'''Checks the hit table to have proper data.
The checks include:
- hit definitions:
- position has to start at 1 (not 0)
- position should not exceed number of pixels (n_pixel)
- event building
- event number has to be strictly monotone
- hit position correlations of consecutive events are
created. Should be zero for distinctly
built events.
Parameters
----------
input_hits_file : string
File name of the hit table.
output_check_file : string
Filename of the output file with the correlation histograms.
n_pixel : tuple
Tuple of the total number of pixels (column/row).
event_range : integer
The range of events to correlate.
E.g.: event_range = 2 correlates to predecessing event hits.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Check data of hit file %s ===', input_hits_file)
if output_check_file is None:
output_check_file = input_hits_file[:-3] + '_check.h5'
with tb.open_file(output_check_file, mode="w") as out_file_h5:
with tb.open_file(input_hits_file, 'r') as input_file_h5:
shape_column = (n_pixel[0], n_pixel[0])
shape_row = (n_pixel[1], n_pixel[1])
col_corr = np.zeros(shape_column, dtype=np.int32)
row_corr = np.zeros(shape_row, dtype=np.int32)
last_event = None
out_dE = out_file_h5.create_earray(out_file_h5.root, name='EventDelta',
title='Change of event number per non empty event',
shape=(0, ),
atom=tb.Atom.from_dtype(np.dtype(np.uint64)),
filters=tb.Filters(complib='blosc',
complevel=5,
fletcher32=False))
out_E = out_file_h5.create_earray(out_file_h5.root, name='EventNumber',
title='Event number of non empty event',
shape=(0, ),
atom=tb.Atom.from_dtype(np.dtype(np.uint64)),
filters=tb.Filters(complib='blosc',
complevel=5,
fletcher32=False))
for hits, _ in analysis_utils.data_aligned_at_events(
input_file_h5.root.Hits,
chunk_size=chunk_size):
if not np.all(np.diff(hits['event_number']) >= 0):
raise RuntimeError('The event number does not always increase. \
The hits cannot be used like this!')
if np.any(hits['column'] < 1) or np.any(hits['row'] < 1):
raise RuntimeError('The column/row definition does not \
start at 1!')
if (np.any(hits['column'] > n_pixel[0])
or np.any(hits['row'] > n_pixel[1])):
raise RuntimeError('The column/row definition exceed the nuber \
of pixels (%s/%s)!', n_pixel[0], n_pixel[1])
analysis_utils.correlate_hits_on_event_range(hits,
col_corr,
row_corr,
event_range)
event_numbers = np.unique(hits['event_number'])
event_delta = np.diff(event_numbers)
if last_event:
event_delta = np.concatenate((np.array([event_numbers[0] - last_event]),
event_delta))
last_event = event_numbers[-1]
out_dE.append(event_delta)
out_E.append(event_numbers)
out_col = out_file_h5.create_carray(out_file_h5.root, name='CorrelationColumns',
title='Column Correlation with event range=%s' % event_range,
atom=tb.Atom.from_dtype(col_corr.dtype),
shape=col_corr.shape,
filters=tb.Filters(complib='blosc',
complevel=5,
fletcher32=False))
out_row = out_file_h5.create_carray(out_file_h5.root, name='CorrelationRows',
title='Row Correlation with event range=%s' % event_range,
atom=tb.Atom.from_dtype(row_corr.dtype),
shape=row_corr.shape,
filters=tb.Filters(complib='blosc',
complevel=5,
fletcher32=False))
out_col[:] = col_corr
out_row[:] = row_corr
if plot:
plot_utils.plot_checks(input_corr_file=output_check_file)
def generate_pixel_mask(input_hits_file, n_pixel, pixel_mask_name="NoisyPixelMask", output_mask_file=None, pixel_size=None, threshold=10.0, filter_size=3, dut_name=None, plot=True, chunk_size=1000000):
'''Generating pixel mask from the hit table.
Parameters
----------
input_hits_file : string
File name of the hit table.
n_pixel : tuple
Tuple of the total number of pixels (column/row).
pixel_mask_name : string
Name of the node containing the mask inside the output file.
output_mask_file : string
File name of the output mask file.
pixel_size : tuple
Tuple of the pixel size (column/row). If None, assuming square pixels.
threshold : float
The threshold for pixel masking. The threshold is given in units of
sigma of the pixel noise (background subtracted). The lower the value
the more pixels are masked.
filter_size : scalar or tuple
Adjust the median filter size by giving the number of columns and rows.
The higher the value the more the background is smoothed and more
pixels are masked.
dut_name : string
Name of the DUT. If None, file name of the hit table will be printed.
plot : bool
If True, create additional output plots.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Generating %s for %s ===', ' '.join(item.lower() for item in re.findall('[A-Z][^A-Z]*', pixel_mask_name)), input_hits_file)
if output_mask_file is None:
output_mask_file = os.path.splitext(input_hits_file)[0] + '_' + '_'.join(item.lower() for item in re.findall('[A-Z][^A-Z]*', pixel_mask_name)) + '.h5'
# Create occupancy histogram
def work(hit_chunk):
col, row = hit_chunk['column'], hit_chunk['row']
return analysis_utils.hist_2d_index(col - 1, row - 1, shape=n_pixel)
smc.SMC(table_file_in=input_hits_file,
file_out=output_mask_file,
func=work,
node_desc={'name': 'HistOcc'},
chunk_size=chunk_size)
# Create mask from occupancy histogram
with tb.open_file(output_mask_file, 'r+') as out_file_h5:
occupancy = out_file_h5.root.HistOcc[:]
# Run median filter across data, assuming 0 filling past the edges to get expected occupancy
blurred = median_filter(occupancy.astype(np.int32), size=filter_size, mode='constant', cval=0.0)
# Spot noisy pixels maxima by substracting expected occupancy
difference = np.ma.masked_array(occupancy - blurred)
std = np.ma.std(difference)
abs_occ_threshold = threshold * std
occupancy = np.ma.masked_where(difference > abs_occ_threshold, occupancy)
logging.info('Masked %d pixels at threshold %.1f in %s', np.ma.count_masked(occupancy), threshold, input_hits_file)
# Generate tuple col / row array of hot pixels, do not use getmask()
pixel_mask = np.ma.getmaskarray(occupancy)
# Create masked pixels array
masked_pixel_table = out_file_h5.create_carray(out_file_h5.root, name=pixel_mask_name, title='Pixel Mask', atom=tb.Atom.from_dtype(pixel_mask.dtype), shape=pixel_mask.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
masked_pixel_table[:] = pixel_mask
if plot:
plot_masked_pixels(input_mask_file=output_mask_file, pixel_size=pixel_size, dut_name=dut_name)
return output_mask_file
def cluster_hits(input_hits_file, output_cluster_file=None, input_disabled_pixel_mask_file=None, input_noisy_pixel_mask_file=None, min_hit_charge=0, max_hit_charge=None, column_cluster_distance=1, row_cluster_distance=1, frame_cluster_distance=1, dut_name=None, plot=True, chunk_size=1000000):
'''Clusters the hits in the data file containing the hit table.
Parameters
----------
input_hits_file : string
Filename of the input hits file.
output_cluster_file : string
Filename of the output cluster file. If None, the filename will be derived from the input hits file.
input_disabled_pixel_mask_file : string
Filename of the input disabled mask file.
input_noisy_pixel_mask_file : string
Filename of the input disabled mask file.
min_hit_charge : uint
Minimum hit charge. Minimum possible hit charge must be given in order to correcly calculate the cluster coordinates.
max_hit_charge : uint
Maximum hit charge. Hits wit charge above the limit will be ignored.
column_cluster_distance : uint
Maximum column distance between hist so that they are assigned to the same cluster. Value of 0 effectively disables the clusterizer in column direction.
row_cluster_distance : uint
Maximum row distance between hist so that they are assigned to the same cluster. Value of 0 effectively disables the clusterizer in row direction.
frame_cluster_distance : uint
Sometimes an event has additional timing information (e.g. bunch crossing ID, frame ID). Value of 0 effectively disables the clusterization in time.
dut_name : string
Name of the DUT. If None, filename of the output cluster file will be used.
plot : bool
If True, create additional output plots.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Clustering hits in %s ===', input_hits_file)
if output_cluster_file is None:
output_cluster_file = os.path.splitext(input_hits_file)[0] + '_clustered.h5'
# Get noisy and disabled pixel, they are excluded for clusters
if input_disabled_pixel_mask_file is not None:
with tb.open_file(input_disabled_pixel_mask_file, 'r') as input_mask_file_h5:
disabled_pixels = np.dstack(np.nonzero(input_mask_file_h5.root.DisabledPixelMask[:]))[0] + 1
else:
disabled_pixels = None
if input_noisy_pixel_mask_file is not None:
with tb.open_file(input_noisy_pixel_mask_file, 'r') as input_mask_file_h5:
noisy_pixels = np.dstack(np.nonzero(input_mask_file_h5.root.NoisyPixelMask[:]))[0] + 1
else:
noisy_pixels = None
# Prepare clusterizer
# Define end of cluster function to
# calculate the size in col/row for each cluster
def calc_cluster_dimensions(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index, cluster_id,
charge_correction, noisy_pixels,
disabled_pixels, seed_hit_index):
min_col = hits[cluster_hit_indices[0]].column
max_col = hits[cluster_hit_indices[0]].column
min_row = hits[cluster_hit_indices[0]].row
max_row = hits[cluster_hit_indices[0]].row
for i in cluster_hit_indices[1:]:
if i < 0: # Not used indeces = -1
break
if hits[i].column < min_col:
min_col = hits[i].column
if hits[i].column > max_col:
max_col = hits[i].column
if hits[i].row < min_row:
min_row = hits[i].row
if hits[i].row > max_row:
max_row = hits[i].row
clusters[cluster_index].err_cols = max_col - min_col + 1
clusters[cluster_index].err_rows = max_row - min_row + 1
# Create clusterizer object with parameters
clz = HitClusterizer(column_cluster_distance=column_cluster_distance,
row_cluster_distance=row_cluster_distance,
frame_cluster_distance=frame_cluster_distance,
min_hit_charge=min_hit_charge,
max_hit_charge=max_hit_charge)
# Add an additional fields to hold the cluster size in x/y
clz.add_cluster_field(description=('err_cols', '<f4'))
clz.add_cluster_field(description=('err_rows', '<f4'))
clz.set_end_of_cluster_function(calc_cluster_dimensions)
# Run clusterizer on hit table in parallel on all cores
def cluster_func(hits, clz, noisy_pixels, disabled_pixels):
_, cl = clz.cluster_hits(hits,
noisy_pixels=noisy_pixels,
disabled_pixels=disabled_pixels)
return cl
smc.SMC(table_file_in=input_hits_file,
file_out=output_cluster_file,
func=cluster_func,
func_kwargs={'clz': clz,
'noisy_pixels': noisy_pixels,
'disabled_pixels': disabled_pixels},
node_desc={'name': 'Cluster'},
align_at='event_number',
chunk_size=chunk_size)
# Calculate cluster size histogram
def hist_func(cluster):
n_hits = cluster['n_hits']
hist = analysis_utils.hist_1d_index(n_hits,
shape=(np.max(n_hits) + 1,))
return hist
smc.SMC(table_file_in=output_cluster_file,
file_out=output_cluster_file[:-3] + '_hist.h5',
func=hist_func,
node_desc={'name': 'HistClusterSize'},
chunk_size=chunk_size)
# Load infos from cluster size for error determination and plotting
with tb.open_file(output_cluster_file[:-3] + '_hist.h5', 'r') as input_file_h5:
hight = input_file_h5.root.HistClusterSize[:]
n_clusters = hight.sum()
n_hits = (hight * np.arange(0, hight.shape[0])).sum()
max_cluster_size = hight.shape[0] - 1
# Calculate position error from cluster size
def get_eff_pitch(hist, cluster_size):
''' Effective pitch to describe the cluster
size propability distribution
hist : array like
Histogram with cluster size distribution
cluster_size : Cluster size to calculate the pitch for
'''
return np.sqrt(hight[int(cluster_size)].astype(np.float) / hight.sum())
def pos_error_func(clusters):
# Check if end_of_cluster function was called
# Under unknown and rare circumstances this might not be the case
if not np.any(clusters['err_cols']):
raise RuntimeError('Clustering failed, please report bug at:'
'https://github.com/SiLab-Bonn/testbeam_analysis/issues')
# Set errors for small clusters, where charge sharing enhances
# resolution
for css in [(1, 1), (1, 2), (2, 1), (2, 2)]:
sel = np.logical_and(clusters['err_cols'] == css[0],
clusters['err_rows'] == css[1])
clusters['err_cols'][sel] = get_eff_pitch(hist=hight,
cluster_size=css[0]) / np.sqrt(12)
clusters['err_rows'][sel] = get_eff_pitch(hist=hight,
cluster_size=css[1]) / np.sqrt(12)
# Set errors for big clusters, where delta electrons reduce resolution
sel = np.logical_or(clusters['err_cols'] > 2, clusters['err_rows'] > 2)
clusters['err_cols'][sel] = clusters['err_cols'][sel] / np.sqrt(12)
clusters['err_rows'][sel] = clusters['err_rows'][sel] / np.sqrt(12)
return clusters
smc.SMC(table_file_in=output_cluster_file,
file_out=output_cluster_file,
func=pos_error_func,
chunk_size=chunk_size)
# Copy masks to result cluster file
with tb.open_file(output_cluster_file, 'r+') as output_file_h5:
# Copy nodes to result file
if input_disabled_pixel_mask_file is not None:
with tb.open_file(input_disabled_pixel_mask_file, 'r') as input_mask_file_h5:
input_mask_file_h5.root.DisabledPixelMask._f_copy(newparent=output_file_h5.root)
if input_noisy_pixel_mask_file is not None:
with tb.open_file(input_noisy_pixel_mask_file, 'r') as input_mask_file_h5:
input_mask_file_h5.root.NoisyPixelMask._f_copy(newparent=output_file_h5.root)
if plot:
plot_cluster_size(output_cluster_file, dut_name=os.path.split(output_cluster_file)[1],
output_pdf_file=os.path.splitext(output_cluster_file)[0] + '_cluster_size.pdf',
chunk_size=chunk_size, gui=False)
return output_cluster_file
if __name__ == '__main__':
pass
| {
"repo_name": "SiLab-Bonn/testbeam_analysis",
"path": "testbeam_analysis/hit_analysis.py",
"copies": "2",
"size": "18424",
"license": "mit",
"hash": -3716936009864800000,
"line_mean": 48.3941018767,
"line_max": 293,
"alpha_frac": 0.5716456795,
"autogenerated": false,
"ratio": 3.98701579744644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.555866147694644,
"avg_score": null,
"num_lines": null
} |
# all functions are based on http://aerosols.wustl.edu/AAARworkshop08/software/AEROCALC-11-3-03.xls
import numpy as np
import warnings
from atmPy.aerosols import tools
def loss_in_a_T_junction(temperature=293.15,
pressure=65.3,
particle_diameter=2.5,
particle_velocity=30,
particle_density=1000,
pick_of_tube_diameter=2.15 * 1e-3,
verbose=False):
"""Returns the fraction of particles which make from a main tubing into a T-like pick-of based on the stopping distancde
Arguments
---------
temperature: float.
Temperature in Kelvin.
pressure: float.
pressure in kPa.
particle_diameter: float.
in meter
particle_velocity: float.
in meter/second.
particle_density: float.
kg/m^3
verbose: bool.
"""
pl = tools.stopping_distance(temperature=temperature,
pressure=pressure,
particle_diameter=particle_diameter,
particle_velocity=particle_velocity,
particle_density=particle_density,
verbose=verbose)
out = 1. - pl / pick_of_tube_diameter
if verbose:
print('loss_in_a_T_junction: %s' % out)
return out
def loss_at_an_abrupt_contraction_in_circular_tubing(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=1, # µm
particle_density=1000, # kg/m^3
tube_air_velocity=False, # m/s
flow_rate_in_inlet=3, # cc/s
tube_diameter=0.0025, # m
contraction_diameter=0.00125, # m
contraction_angle=90, # degrees
verbose=False,
):
"""
(B&W 8-69 to 8-71; W&B 6-54, 17-25)
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle diameter 1 µm
Particle density 1000 kg/m^3
Tube air velocity 10 m/s
Tube diameter 0.0025 m
Contraction diameter 0.00125 m
Contraction angle 90 degrees
"""
if not tube_air_velocity:
tube_air_velocity = tools.flow_rate2flow_velocity(flow_rate_in_inlet, tube_diameter, verbose=verbose)
st_num = tools.stokes_number(particle_density, particle_diameter, pressure, temperature, tube_air_velocity, 1,
contraction_diameter, verbose=verbose)
# st_num = (particle_density * particle_diameter * particle_diameter * 0.000000000001 * slip_correction_factor * B906 / (18*B912*B908))
frac = 1 - (1 / (1 + ((2 * st_num * (1 - (contraction_diameter / tube_diameter) ** 2)) / (
3.14 * np.exp(-0.0185 * contraction_angle))) ** -1.24))
return frac
def aspiration_efficiency_all_forward_angles(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=10, # µm
particle_density=1000, # kg/m^3
inlet_diameter=0.025, # m
sampling_angle=46, # degrees between 0 to 90°
flow_rate_in_inlet=3, # cc/s
air_velocity_in_inlet=False, # m/s
velocity_ratio=5, # R is 1 for isokinetic, > 1 for subisokinetic
force=False,
verbose=False):
"""
(B&W 8-20, 8-21, 8-22; W&B 6-20, 6-21, 6-22)
Hangal and Willeke Eviron. Sci. Tech. 24:688-691 (1990)
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle diameter 10 µm
Particle density 1000 kg/m^3
Inlet diameter 0.025 m
Sampling angle 46 degrees between 0 to 90°
Air velocity in inlet (Vi) 0.34 m/s
Velocity ratio (Vw/Vi) 5 R is 1 for isokinetic, > 1 for subisokinetic
"""
if not air_velocity_in_inlet:
air_velocity_in_inlet = tools.flow_rate2flow_velocity(flow_rate_in_inlet, inlet_diameter, verbose=verbose)
st_num = tools.stokes_number(particle_density, particle_diameter, pressure, temperature, air_velocity_in_inlet,
velocity_ratio, inlet_diameter, verbose=verbose)
# rey_num = tools.flow_reynolds_number(inlet_diameter, air_velocity_in_inlet, temperature, pressure, verbose=verbose)
if (45 < sampling_angle <= 90) and (1.25 < velocity_ratio < 6.25) and (0.003 < st_num < 0.2):
pass
else:
txt = """sampling angle, velocity ratio, or stokes number is not in valid regime!
Sampling angle: %s (45 < angle < 90)
Velocity ratio: %s (1.25 < ratio < 6.25)
Stokes number: %s (0.003 < st_num < 0.2)""" % (sampling_angle, velocity_ratio, st_num)
if force:
warnings.warn(txt)
else:
raise ValueError(txt)
if sampling_angle == 0:
inert_param = 1 - 1 / (1 + (2 + 0.617 / velocity_ratio) * st_num)
elif sampling_angle > 45:
inert_param = 3 * st_num ** (velocity_ratio ** -0.5)
else:
f619 = st_num * np.exp(0.022 * sampling_angle)
inert_param = (1 - 1 / (1 + (2 + 0.617 / velocity_ratio) * f619)) * (
1 - 1 / (1 + 0.55 * f619 * np.exp(0.25 * f619))) / (1 - 1 / (1 + 2.617 * f619))
# =IF(B609=0,1-1/(1+(2+0.617/B611)*B618),IF(B609>45,3*B618^(B611^-0.5),(1-1/(1+(2+0.617/B611)*B619))*(1-1/(1+0.55*B619*EXP(0.25*B619)))/(1-1/(1+2.617*B619))))
asp_eff = 1 + (velocity_ratio * np.cos(sampling_angle * np.pi / 180) - 1) * inert_param
return asp_eff
def loss_in_a_bent_section_of_circular_tubing(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=3.5, # µm
particle_density=1000, # kg/m^3
tube_air_flow_rate=3, # cc/s
tube_air_velocity=False, # m/s
tube_diameter=0.0025, # m
angle_of_bend=90, # degrees
flow_type='auto',
verbose=False
):
""" (B&W 8-66 to 8-68; W&B 6-52, 6-53)
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle Diameter 3.5 µm
Particle Density 1000 kg/m^3
Tube air velocity 6.25 m/s
Tube diameter 0.0025 m
Angle of bend 90 degrees"""
if not tube_air_velocity:
tube_air_velocity = tools.flow_rate2flow_velocity(tube_air_flow_rate, tube_diameter, verbose=verbose)
if flow_type == 'auto':
flow_type = tools.test_flow_type_in_tube(tube_diameter, tube_air_velocity, temperature, pressure, verbose=verbose)
velocity_ratio = 1
stnum = tools.stokes_number(particle_density, particle_diameter, pressure, temperature, tube_air_velocity,
velocity_ratio, tube_diameter, verbose=verbose)
if flow_type == 'laminar':
fract = 1 - stnum * angle_of_bend * np.pi / 180.
elif flow_type == 'turbulent':
fract = np.exp(-2.823 * stnum * angle_of_bend * np.pi / 180)
else:
raise ValueError('Unknown flow type: %s' % flow_type)
return fract
def gravitational_loss_in_circular_tube(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=10, # µm
particle_density=1000, # kg/m^3
tube_diameter=0.01, # m
tube_length=0.1, # m
incline_angle=60, # degrees from horizontal (0-90)
flow_rate=3, # cc/s
mean_flow_velocity=False, # 0.1061 # m/s)
flow_type='auto',
verbose=False):
"""
Arguments
---------
temperature = 293.15, # Kelvin
pressure = 101.3, # kPa
particle_diameter = 10, # µm
particle_density = 1000, # kg/m^3
tube_diameter = 0.01, # m
tube_length = 0.1, # m
incline_angle = 60, # degrees from horizontal (0-90)
flow_rate = 3, # cc/s
mean_flow_velocity = False #0.1061 # m/s)"""
if not mean_flow_velocity:
mean_flow_velocity = tools.flow_rate2flow_velocity(flow_rate, tube_diameter, verbose=verbose)
if flow_type == 'auto':
flow_type = tools.test_flow_type_in_tube(tube_diameter, mean_flow_velocity, temperature, pressure, verbose=verbose)
if flow_type == 'laminar':
sv = tools.settling_velocity(temperature, particle_density, particle_diameter, pressure, verbose=verbose)
k770 = np.cos(np.pi * incline_angle / 180) * 3 * sv * tube_length / (4 * tube_diameter * mean_flow_velocity)
if np.any((k770 ** (2. / 3)) > 1):
fract = 0
else:
if np.any(k770 ** (2 / 3.) > 1):
k771 = 0
else:
k771 = np.arcsin(k770 ** (1 / 3.)) # done
fract = (1 - (2 / np.pi) * (
2 * k770 * np.sqrt(1 - k770 ** (2 / 3)) + k771 - (k770 ** (1 / 3) * np.sqrt(1 - k770 ** (2 / 3))))) # done
if np.any(fract < 0):
fract = 0
elif flow_type == 'turbulent':
raise ValueError('Sorry this loss mechanism has not been implemented for turbulent flow')
else:
raise ValueError('Unknown flow type: %s' % flow_type)
if verbose:
print('k770: %s' % k770)
print('k771: %s' % k771)
print('fraction penetrating: %s' % fract)
return fract
def gravitational_loss_in_an_inlet(temperature=293.15, # K
pressure=101.3, # hPa
particle_diameter=15, # um
particle_density=1000, # kg/m^3
inlet_diameter=0.0127, # m
inlet_length=1., # m
sampling_angle=45, # deg; keep between 0 to 90°
air_velocity_in_inlet=30, # m/s;
velocity_ratio=1.5,
# R is 1 for isokinetic, > 1 for subisokinetic, < 1 for superisokinetic
verbose=False):
"""Gravitational losses in an inlet (B&W 8-23, 8-24; W&B 6-23, 6-24)
Not to be mixed up with gravitational loss in a circular tube
Arguments
---------
temperature: float.
Temperature in K.
pressure: float.
Pressure in kPa.
particle_diameter: float.
Aerosol particle diameter in micro meter.
particle_density: float.
Density of the particle material in kg/m^3.
inlet_diameter: float.
Inlet diameter in m.
inlet_length: float.
Inlent length in m.
sampling_angle: float.
Angle of the inlet in deg. 0 is horizontal; keep between 0 to 90°.
air_velocity_in_inlet: float.
Velocity of the air in inlet in m/s.
velocity_ratio: float.
Ratio between velocity outside and inside the inlet. R is 1 for isokinetic, > 1 for subisokinetic, < 1 for superisokinetic
verbose: bool.
if results are printed.
"""
out = np.exp(-4.7 * tools.K(sampling_angle, inlet_length, air_velocity_in_inlet, inlet_diameter, particle_density,
particle_diameter, pressure, temperature, velocity_ratio, verbose=verbose) ** 0.75)
if verbose:
print('Fraction lost due to gravitation: %s' % out)
return out
| {
"repo_name": "mtat76/atm-py",
"path": "build/lib/atmPy/aerosols/sampling_efficiency.py",
"copies": "3",
"size": "12616",
"license": "mit",
"hash": -2773366266132271600,
"line_mean": 43.8540925267,
"line_max": 162,
"alpha_frac": 0.4973817836,
"autogenerated": false,
"ratio": 3.606294706723891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5603676490323891,
"avg_score": null,
"num_lines": null
} |
# all functions are based on http://aerosols.wustl.edu/AAARworkshop08/software/AEROCALC-11-3-03.xls
import numpy as np
import warnings
def test_flow_type_in_tube(tube_diameter, tube_air_velocity, temperature, pressure, verbose=False):
rey_num = flow_reynolds_number(tube_diameter, tube_air_velocity, temperature, pressure, verbose=verbose)
if np.all(rey_num < 2000):
flowtype = 'laminar'
elif np.all(rey_num > 4000):
flowtype = 'turbulent'
else:
txt = """Flowtype can not be detected. Flow type is ambigues."""
raise ValueError(txt)
if verbose:
txt = """Flow type: %s""" % flowtype
print(txt)
return flowtype
def air_density(temperature, pressure, verbose=False):
out = 1.293 * (273.15 / temperature) * (pressure / 101.3)
if verbose:
print('air density: %s' % out)
return out
def particle_reynolds_number(temperature, # Kelvin
pressure, # kPa
particle_diameter, # µm
particle_velocity, # m/s (B&W 4-1; W&B 3-1; Hinds 2-41)
verbose=False
):
"""
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle diameter 5 µm
Particle velocity 0.01 m/s
"""
ad = air_density(temperature, pressure, verbose=verbose)
av = air_viscosity(temperature, verbose=verbose)
out = 0.000001 * ad * particle_diameter * particle_velocity / av
if verbose:
print('Particle reynolds number: %s' % out)
return out
def flow_reynolds_number(inlet_diameter, air_velocity_in_inlet, temperature, pressure, verbose=False):
"""definition!"""
out = air_density(temperature, pressure, verbose=verbose) * inlet_diameter * air_velocity_in_inlet / air_viscosity(
temperature, verbose=verbose)
if verbose:
print('flow reynolds number: %s' % out)
return out
def gravitational_dep_parameter(inlet_length, temperature, particle_density, particle_diameter, air_velocity_in_inlet,
inlet_diameter, verbose=False):
"""what is that???"""
out = inlet_length * settling_velocity(temperature, particle_density, particle_diameter, verbose=verbose) / (
air_velocity_in_inlet * inlet_diameter)
if verbose:
print('flow reynolds number: %s' % out)
return out
def stokes_number(particle_density, particle_diameter, pressure, temperature, air_velocity_in_inlet, velocity_ratio,
inlet_diameter, verbose=False):
"""what is that"""
scf = slip_correction_factor(pressure, particle_diameter, verbose=verbose)
av = air_viscosity(temperature, verbose=verbose)
out = (particle_density * particle_diameter ** 2 * 0.000000000001 * scf / (
18 * av)) * air_velocity_in_inlet * velocity_ratio / inlet_diameter
if verbose:
print('stokes number: %s' % out)
return out
def slip_correction_factor(pressure, particle_diameter, verbose=False):
"""define"""
out = 1 + (2 / (pressure * particle_diameter * 0.752)) * (
6.32 + 2.01 * np.exp(-0.1095 * pressure * 0.752 * particle_diameter))
if verbose:
print('slip_correction_factor: %s' % out)
return out
def air_viscosity(temperature, verbose=False):
"""define"""
out = 0.00001708 * ((temperature / 273.15) ** 1.5) * ((393.396) / (temperature + 120.246))
if verbose:
print('air_viscosity: %s' % out)
return out
def settling_velocity(temperature, particle_density, particle_diameter, pressure, verbose=False):
"""define!"""
out = particle_density * particle_diameter ** 2 * 0.000000000001 * 9.81 * slip_correction_factor(pressure,
particle_diameter,
verbose=verbose) / (
18 * air_viscosity(temperature, verbose=verbose))
if verbose:
print('settling_velocity: %s' % out)
return out
def gravitational_dep_parameter(inlet_length, air_velocity_in_inlet, inlet_diameter, temperature, particle_density,
particle_diameter, pressure, verbose=False):
"""what is that???"""
out = inlet_length * settling_velocity(temperature, particle_density, particle_diameter, pressure,
verbose=verbose) / (air_velocity_in_inlet * inlet_diameter)
if verbose:
print('gravitational_dep_parameter: %s' % out)
return out
def K(sampling_angle, inlet_length, air_velocity_in_inlet, inlet_diameter, particle_density, particle_diameter,
pressure, temperature, velocity_ratio, verbose=False):
"""what is that?"""
gdp = gravitational_dep_parameter(inlet_length, air_velocity_in_inlet, inlet_diameter, temperature,
particle_density, particle_diameter, pressure, verbose=verbose)
sn = stokes_number(particle_density, particle_diameter, pressure, temperature, air_velocity_in_inlet,
velocity_ratio, inlet_diameter, verbose=verbose)
frn = flow_reynolds_number(inlet_diameter, air_velocity_in_inlet, temperature, pressure, verbose=verbose)
out = (np.sqrt(gdp * sn) * frn ** -0.25) * np.sqrt(np.cos(np.deg2rad(sampling_angle)))
if verbose:
print('K: %s' % out)
return out
def stopping_distance(temperature=293.15, # Kelvin
pressure=65.3, # kPa
particle_diameter=2.5, # µm
particle_velocity=30, # m/s
particle_density=1000, # kg/m^3
verbose=False, ):
"""
(B&W 4-34, 36; W&B 3-34, 3-36; Hinds 5-19, 20, 21)
Temperature 293.15 Kelvin
Pressure 65.3 kPa
Particle diameter 2.5 µm
Particle velocity 30 m/s
Particle density 1000 kg/m^3"""
rey_num = particle_reynolds_number(temperature, pressure, particle_diameter, particle_velocity, verbose=False)
scf = slip_correction_factor(pressure, particle_diameter, verbose=verbose)
av = air_viscosity(temperature, verbose=verbose)
ad = air_density(temperature, pressure, verbose=verbose)
if np.any(rey_num < 1):
out = particle_density * particle_velocity * particle_diameter ** 2 * 0.000000000001 * scf / (av * 18)
else:
out = particle_density * particle_diameter * 0.000001 * (
(rey_num) ** (1 / 3) - np.arctan(((rey_num) ** (1 / 3)) / np.sqrt(6)) * np.sqrt(6)) / ad
# =IF(B261<1,B257*B256*B255*B255*0.000000000001*B262/(B260*18),B257*B255*0.000001*((B261)^(1/3)-ATAN(((B261)^(1/3))/SQRT(6))*SQRT(6))/(B259
if verbose:
print('stopping distance: %s m' % out)
return out
##########################
def flow_rate2flow_velocity(flow_rate, diameter_tubing, verbose=False):
"""
Parameters
----------
flow_rate: float.
in cc/s
diameter_tubing: float.
in m
Returns
-------
flow velocity in m/s"""
flow_rate = flow_rate * 1e-6
vel = 4 * flow_rate / (np.pi * diameter_tubing ** 2)
if verbose:
print('mean flow velocity: %s' % vel)
return vel
| {
"repo_name": "mtat76/atm-py",
"path": "build/lib/atmPy/aerosols/tools.py",
"copies": "4",
"size": "7294",
"license": "mit",
"hash": -2197643134375301600,
"line_mean": 39.2762430939,
"line_max": 143,
"alpha_frac": 0.5998628258,
"autogenerated": false,
"ratio": 3.4747378455672067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6074600671367207,
"avg_score": null,
"num_lines": null
} |
# all functions are based on http://aerosols.wustl.edu/AAARworkshop08/software/AEROCALC-11-3-03.xls
import warnings
import numpy as np
import pandas as pd
from atmPy.aerosols.physics import _tools_sampling_efficiency
def loss_in_a_T_junction(temperature=293.15,
pressure=65.3,
particle_diameter=2.5,
particle_velocity=30,
particle_density=1000,
pick_of_tube_diameter=2.15 * 1e-3,
verbose=False):
"""Returns the fraction of particles which make from a main tubing into a T-like pick-of based on the stopping distancde
Arguments
---------
temperature: float.
Temperature in Kelvin.
pressure: float.
pressure in kPa.
particle_diameter: float.
in meter
particle_velocity: float.
in meter/second.
particle_density: float.
kg/m^3
verbose: bool.
"""
diag_os = np.sqrt(2) * pick_of_tube_diameter
diag_is = pick_of_tube_diameter
mid_dig = (diag_os + diag_is) / 2
effective_tube_diameter = mid_dig / np.sqrt(2)
if not hasattr(particle_diameter, '__iter__'):
particle_diameter = [particle_diameter]
df = pd.DataFrame()
for d in particle_diameter:
pl = _tools_sampling_efficiency.stopping_distance(temperature=temperature,
pressure=pressure,
particle_diameter=d,
particle_velocity=particle_velocity,
particle_density=particle_density,
verbose=verbose)
out = 1. - pl / effective_tube_diameter
if not hasattr(out, '__iter__'):
out = [out]
df[d] = out
df[df < 0] = 0
if verbose:
print('loss_in_a_T_junction: %s' % df)
return df
def loss_at_an_abrupt_contraction_in_circular_tubing(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=1, # µm
particle_density=1000, # kg/m^3
tube_air_velocity=False, # m/s
flow_rate_in_inlet=3, # cc/s
tube_diameter=0.0025, # m
contraction_diameter=0.00125, # m
contraction_angle=90, # degrees
verbose=False,
):
"""
(B&W 8-69 to 8-71; W&B 6-54, 17-25)
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle diameter 1 µm
Particle density 1000 kg/m^3
Tube air velocity 10 m/s
Tube diameter 0.0025 m
Contraction diameter 0.00125 m
Contraction angle 90 degrees
"""
if not tube_air_velocity:
tube_air_velocity = _tools_sampling_efficiency.flow_rate2flow_velocity(flow_rate_in_inlet, tube_diameter, verbose=verbose)
st_num = _tools_sampling_efficiency.stokes_number(particle_density, particle_diameter, pressure, temperature, tube_air_velocity, 1,
contraction_diameter, verbose=verbose)
# st_num = (particle_density * particle_diameter * particle_diameter * 0.000000000001 * slip_correction_factor * B906 / (18*B912*B908))
frac = 1 - (1 / (1 + ((2 * st_num * (1 - (contraction_diameter / tube_diameter) ** 2)) / (
3.14 * np.exp(-0.0185 * contraction_angle))) ** -1.24))
return frac
# def aspiration_efficiency_all_forward_angles(temperature=293.15, # Kelvin
# pressure=101.3, # kPa
# particle_diameter=10, # µm
# particle_density=1000, # kg/m^3
# inlet_diameter=0.025, # m
# sampling_angle=46, # degrees between 0 to 90°
# flow_rate_in_inlet=3, # cc/s
# air_velocity_in_inlet=False, # m/s
# velocity_ratio=5, # R is 1 for isokinetic, > 1 for subisokinetic
# force=False,
# verbose=False):
# """
# (B&W 8-20, 8-21, 8-22; W&B 6-20, 6-21, 6-22)
# Hangal and Willeke Eviron. Sci. Tech. 24:688-691 (1990)
# Temperature 293.15 Kelvin
# Pressure 101.3 kPa
# Particle diameter 10 µm
# Particle density 1000 kg/m^3
# Inlet diameter 0.025 m
# Sampling angle 46 degrees between 0 to 90°
# Air velocity in inlet (Vi) 0.34 m/s
# Velocity ratio (Vw/Vi) 5 R is 1 for isokinetic, > 1 for subisokinetic
# """
# if not air_velocity_in_inlet:
# air_velocity_in_inlet = _tools_sampling_efficiency.flow_rate2flow_velocity(flow_rate_in_inlet, inlet_diameter, verbose=verbose)
#
# st_num = _tools_sampling_efficiency.stokes_number(particle_density, particle_diameter, pressure, temperature, air_velocity_in_inlet,
# velocity_ratio, inlet_diameter, verbose=verbose)
# # rey_num = tools.flow_reynolds_number(inlet_diameter, air_velocity_in_inlet, temperature, pressure, verbose=verbose)
# if np.any(45 < sampling_angle) and np.any(sampling_angle <= 90) \
# and np.any(1.25 < velocity_ratio) and np.any(velocity_ratio < 6.25) \
# and np.any(0.003 < st_num) and np.any(st_num < 0.2):
# pass
# else:
# txt = """sampling angle, velocity ratio, or stokes number is not in valid regime!
# Sampling angle: %s (45 < angle < 90)
# Velocity ratio: %s (1.25 < ratio < 6.25)
# Stokes number: %s (0.003 < st_num < 0.2)""" % (sampling_angle, velocity_ratio, st_num)
# if force:
# warnings.warn(txt)
# else:
# raise ValueError(txt)
#
# if sampling_angle == 0:
# inert_param = 1 - 1 / (1 + (2 + 0.617 / velocity_ratio) * st_num)
# elif sampling_angle > 45:
# inert_param = 3 * st_num ** (velocity_ratio ** -0.5)
# else:
# f619 = st_num * np.exp(0.022 * sampling_angle)
# inert_param = (1 - 1 / (1 + (2 + 0.617 / velocity_ratio) * f619)) * (
# 1 - 1 / (1 + 0.55 * f619 * np.exp(0.25 * f619))) / (1 - 1 / (1 + 2.617 * f619))
# # =IF(B609=0,1-1/(1+(2+0.617/B611)*B618),IF(B609>45,3*B618^(B611^-0.5),(1-1/(1+(2+0.617/B611)*B619))*(1-1/(1+0.55*B619*EXP(0.25*B619)))/(1-1/(1+2.617*B619))))
#
#
# asp_eff = 1 + (velocity_ratio * np.cos(sampling_angle * np.pi / 180) - 1) * inert_param
# return asp_eff
def loss_in_a_bent_section_of_circular_tubing(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=3.5, # µm
particle_density=1000, # kg/m^3
tube_air_flow_rate=3, # cc/s
tube_air_velocity=False, # m/s
tube_diameter=0.0025, # m
angle_of_bend=90, # degrees
flow_type='auto',
verbose=False
):
""" (B&W 8-66 to 8-68; W&B 6-52, 6-53)
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle Diameter 3.5 µm
Particle Density 1000 kg/m^3
Tube air velocity 6.25 m/s
Tube diameter 0.0025 m
Angle of bend 90 degrees"""
if not tube_air_velocity:
tube_air_velocity = _tools_sampling_efficiency.flow_rate2flow_velocity(tube_air_flow_rate, tube_diameter, verbose=verbose)
if flow_type == 'auto':
flow_type = _tools_sampling_efficiency.test_flow_type_in_tube(tube_diameter, tube_air_velocity, temperature, pressure, verbose=verbose)
velocity_ratio = 1
stnum = _tools_sampling_efficiency.stokes_number(particle_density, particle_diameter, pressure, temperature, tube_air_velocity,
velocity_ratio, tube_diameter, verbose=verbose)
if flow_type == 'laminar':
fract = 1 - stnum * angle_of_bend * np.pi / 180.
elif flow_type == 'turbulent':
fract = np.exp(-2.823 * stnum * angle_of_bend * np.pi / 180)
else:
raise ValueError('Unknown flow type: %s' % flow_type)
return fract
def gravitational_loss_in_circular_tube(temperature=293.15, # Kelvin
pressure=101.3, # kPa
particle_diameter=10, # µm
particle_density=1000, # kg/m^3
tube_diameter=0.01, # m
tube_length=0.1, # m
incline_angle=60, # degrees from horizontal (0-90)
flow_rate=3, # cc/s
mean_flow_velocity=False, # 0.1061 # m/s)
flow_type='auto',
verbose=False):
"""
Arguments
---------
temperature = 293.15, # Kelvin
pressure = 101.3, # kPa
particle_diameter = 10, # µm
particle_density = 1000, # kg/m^3
tube_diameter = 0.01, # m
tube_length = 0.1, # m
incline_angle = 60, # degrees from horizontal (0-90)
flow_rate = 3, # cc/s
mean_flow_velocity = False #0.1061 # m/s)"""
if not mean_flow_velocity:
mean_flow_velocity = _tools_sampling_efficiency.flow_rate2flow_velocity(flow_rate, tube_diameter, verbose=verbose)
if flow_type == 'auto':
flow_type = _tools_sampling_efficiency.test_flow_type_in_tube(tube_diameter, mean_flow_velocity, temperature, pressure, verbose=verbose)
if flow_type == 'laminar':
sv = _tools_sampling_efficiency.settling_velocity(temperature, particle_density, particle_diameter, pressure, verbose=verbose)
k770 = np.cos(np.pi * incline_angle / 180) * 3 * sv * tube_length / (4 * tube_diameter * mean_flow_velocity)
if np.any((k770 ** (2. / 3)) > 1):
fract = 0
else:
if np.any(k770 ** (2 / 3.) > 1):
k771 = 0
else:
k771 = np.arcsin(k770 ** (1 / 3.)) # done
fract = (1 - (2 / np.pi) * (
2 * k770 * np.sqrt(1 - k770 ** (2 / 3)) + k771 - (k770 ** (1 / 3) * np.sqrt(1 - k770 ** (2 / 3))))) # done
if np.any(fract < 0):
fract = 0
elif flow_type == 'turbulent':
raise ValueError('Sorry this loss mechanism has not been implemented for turbulent flow')
else:
raise ValueError('Unknown flow type: %s' % flow_type)
if verbose:
print('k770: %s' % k770)
print('k771: %s' % k771)
print('fraction penetrating: %s' % fract)
return fract
# def gravitational_loss_in_an_inlet(temperature=293.15, # K
# pressure=101.3, # hPa ... kPa?!?
# particle_diameter=15, # um
# particle_density=1000, # kg/m^3
# inlet_diameter=0.0127, # m
# inlet_length=1., # m
# sampling_angle=45, # deg; keep between 0 to 90°
# air_velocity_in_inlet=30, # m/s;
# velocity_ratio=1.5,
# # R is 1 for isokinetic, > 1 for subisokinetic, < 1 for superisokinetic
# verbose=False):
# """Gravitational losses in an inlet (B&W 8-23, 8-24; W&B 6-23, 6-24)
# Not to be mixed up with gravitational loss in a circular tube
#
# Arguments
# ---------
# temperature: float.
# Temperature in K.
# pressure: float.
# Pressure in kPa.
# particle_diameter: float.
# Aerosol particle diameter in micro meter.
# particle_density: float.
# Density of the particle material in kg/m^3.
# inlet_diameter: float.
# Inlet diameter in m.
# inlet_length: float.
# Inlent length in m.
# sampling_angle: float.
# Angle of the inlet in deg. 0 is horizontal; keep between 0 to 90°.
# air_velocity_in_inlet: float.
# Velocity of the air in inlet in m/s.
# velocity_ratio: float.
# Ratio between velocity outside and inside the inlet. R is 1 for isokinetic, > 1 for subisokinetic, < 1 for superisokinetic
# verbose: bool.
# if results are printed.
# """
#
# out = np.exp(-4.7 * _tools_sampling_efficiency.K(sampling_angle, inlet_length, air_velocity_in_inlet, inlet_diameter, particle_density,
# particle_diameter, pressure, temperature, velocity_ratio, verbose=verbose) ** 0.75)
# if verbose:
# print('Fraction lost due to gravitation: %s' % out)
# return out
def inlet_efficiency_isoaxial_horizontal_sharp_edged(temperature = 293.15, # Kelvin
pressure = 101.3, # kPa
particle_diameter = 15, # µm
particle_density = 1000, # kg/m^3
inlet_diameter = 0.0127, # m
inlet_length = 0.1, # m
sampling_flow_rate = 3, # cc/s
air_velocity_inlet = False,# m/s
ambient_air_speed = 25.5, # m/s
velocity_ratio = False, # R is 1 for isokinetic, > 1 for subisokinetic
verbose = False
):
"""
Inlet efficiency for an isoaxial horizontal sharp-edged inlet: aspiration + transmission (B&W 8-8, 8-14, 8-16, 8-18; W&B 6-8, 6-14, 6-16, 6-18, Hinds 10-7)
Temperature 293.15 Kelvin
Pressure 101.3 kPa
Particle diameter 15 µm
Particle density 1000 kg/m^3
Inlet diameter 0.0127 m
Inlet length 0.1 m
Air velocity in inlet (Vi) 0.05 m/s
Velocity ratio (Vw/Vi) 15
Parameters
----------
temperature
pressure
particle_diameter
particle_density
inlet_diameter
inlet_length
air_velocity_inlet
velocity_ratio
verbose
Returns
-------
"""
if not air_velocity_inlet:
air_velocity_inlet = _tools_sampling_efficiency.flow_rate2flow_velocity(sampling_flow_rate, inlet_diameter, verbose=verbose)
if not velocity_ratio:
velocity_ratio = ambient_air_speed/air_velocity_inlet
st_no = _tools_sampling_efficiency.stokes_number(particle_density, particle_diameter, pressure, temperature, air_velocity_inlet, velocity_ratio, inlet_diameter, verbose=verbose)
asp_eff = 1 + (velocity_ratio - 1) * (1 - (1 / (1 + (2 + (0.617 / velocity_ratio)) * st_no)))
set_vel = _tools_sampling_efficiency.settling_velocity(temperature, particle_density, particle_diameter, pressure, verbose=verbose)
grav_param =(inlet_length / air_velocity_inlet) / (inlet_diameter / set_vel)
flow_reyno = _tools_sampling_efficiency.flow_reynolds_number(inlet_diameter, air_velocity_inlet, temperature, pressure, verbose=verbose)
grav_eff = np.exp(-4.7 * (np.sqrt(grav_param * st_no / np.sqrt(flow_reyno)))**0.75)
if velocity_ratio > 1:
init_trans_eff = (1 + (velocity_ratio - 1) / (1 + 2.66 / st_no**(2/3))) / (1 + (velocity_ratio - 1) / (1 + 0.418 / st_no))
else:
init_trans_eff = 1
def vena_contracta_efficiency():
if velocity_ratio < 1:
eff = np.exp(-75 * (0.09 * (st_no * (air_velocity_inlet - air_velocity_inlet * velocity_ratio) / (air_velocity_inlet * velocity_ratio))**0.3)**2)
else:
eff = 1
return eff
vena_cont_eff = vena_contracta_efficiency()
efficiency = asp_eff * grav_eff * init_trans_eff * vena_cont_eff
return efficiency
def test_inlet_efficiency_isoaxial_horizontal_sharp_edged():
calc = inlet_efficiency_isoaxial_horizontal_sharp_edged(temperature=293.15,
pressure=101.3,
particle_diameter=15,
particle_density=1000,
inlet_diameter=0.0127,
inlet_length=0.1,
air_velocity_inlet=0.05,
velocity_ratio=15,
verbose=False)
if calc != 0.71686004968714068:
txt = "Test failed for inlet_efficiency_isoaxial_horizontal_sharp_edged"
raise ValueError(txt)
else:
return True
| {
"repo_name": "hagne/atm-py",
"path": "atmPy/aerosols/physics/sampling_efficiency.py",
"copies": "1",
"size": "18101",
"license": "mit",
"hash": 6516389514747485000,
"line_mean": 45.0229007634,
"line_max": 181,
"alpha_frac": 0.4925637198,
"autogenerated": false,
"ratio": 3.574505928853755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4567069648653755,
"avg_score": null,
"num_lines": null
} |
''' All functions creating results (e.g. efficiency, residuals, track density) from fitted tracks are listed here.'''
from __future__ import division
import logging
import re
from collections import Iterable
import os.path
import tables as tb
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats import binned_statistic_2d
from scipy.optimize import curve_fit
from testbeam_analysis.tools import plot_utils
from testbeam_analysis.tools import geometry_utils
from testbeam_analysis.tools import analysis_utils
def calculate_residuals(input_tracks_file, input_alignment_file, n_pixels, pixel_size, output_residuals_file=None, dut_names=None, use_duts=None, max_chi2=None, nbins_per_pixel=None, npixels_per_bin=None, force_prealignment=False, use_fit_limits=True, cluster_size_selection=None, plot=True, gui=False, chunk_size=1000000):
'''Takes the tracks and calculates residuals for selected DUTs in col, row direction.
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input aligment file.
n_pixels : iterable of tuples
One tuple per DUT describing the number of pixels in column, row direction
e.g. for 2 DUTs: n_pixels = [(80, 336), (80, 336)]
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension in um in column, row direction
e.g. for 2 DUTs: pixel_size = [(250, 50), (250, 50)]
output_residuals_file : string
Filename of the output residuals file. If None, the filename will be derived from the input hits file.
dut_names : iterable
Name of the DUTs. If None, DUT numbers will be used.
use_duts : iterable
The duts to calculate residuals for. If None all duts in the input_tracks_file are used
max_chi2 : uint, iterable
Use only not heavily scattered tracks to increase track pointing resolution (cut on chi2).
Cut can be a number and is used then for all DUTS or a list with a chi 2 cut for each DUT.
If None, no cut is applied.
nbins_per_pixel : int
Number of bins per pixel along the residual axis. Number is a positive integer or None to automatically set the binning.
npixels_per_bin : int
Number of pixels per bin along the position axis. Number is a positive integer or None to automatically set the binning.
force_prealignment : bool
Take the prealignment, although if a coarse alignment is availale.
cluster_size_selection : uint
Select which cluster sizes should be included for residual calculation. If None all cluster sizes are taken.
plot : bool
If True, create additional output plots.
gui : bool
If True, use GUI for plotting.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Calculating residuals ===')
use_prealignment = True if force_prealignment else False
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
if output_residuals_file is None:
output_residuals_file = os.path.splitext(input_tracks_file)[0] + '_residuals.h5'
if plot is True and not gui:
output_pdf = PdfPages(os.path.splitext(output_residuals_file)[0] + '.pdf', keep_empty=False)
else:
output_pdf = None
figs = [] if gui else None
if not isinstance(max_chi2, Iterable):
max_chi2 = [max_chi2] * n_duts
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
with tb.open_file(output_residuals_file, mode='w') as out_file_h5:
for node in in_file_h5.root:
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if use_duts and actual_dut not in use_duts:
continue
logging.debug('Calculate residuals for DUT%d', actual_dut)
initialize = True # initialize the histograms
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size):
# select good hits and tracks
selection = np.logical_and(~np.isnan(tracks_chunk['x_dut_%d' % actual_dut]), ~np.isnan(tracks_chunk['track_chi2']))
tracks_chunk = tracks_chunk[selection] # Take only tracks where actual dut has a hit, otherwise residual wrong
if cluster_size_selection is not None:
tracks_chunk = tracks_chunk[tracks_chunk['n_hits_dut_%d' % actual_dut] == cluster_size_selection]
if max_chi2[actual_dut] is not None:
tracks_chunk = tracks_chunk[tracks_chunk['track_chi2'] <= max_chi2[actual_dut]]
# Coordinates in global coordinate system (x, y, z)
hit_x, hit_y, hit_z = tracks_chunk['x_dut_%d' % actual_dut], tracks_chunk['y_dut_%d' % actual_dut], tracks_chunk['z_dut_%d' % actual_dut]
intersection_x, intersection_y, intersection_z = tracks_chunk['offset_0'], tracks_chunk['offset_1'], tracks_chunk['offset_2']
# Transform to local coordinate system
if use_prealignment:
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else: # Apply transformation from fine alignment information
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
if not np.allclose(hit_z_local, 0.0) or not np.allclose(intersection_z_local, 0.0):
logging.error('Hit z position = %s and z intersection %s', str(hit_z_local[:3]), str(intersection_z_local[:3]))
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
difference = np.column_stack((hit_x, hit_y, hit_z)) - np.column_stack((intersection_x, intersection_y, intersection_z))
difference_local = np.column_stack((hit_x_local, hit_y_local, hit_z_local)) - np.column_stack((intersection_x_local, intersection_y_local, intersection_z_local))
# Histogram residuals in different ways
if initialize: # Only true for the first iteration, calculate the binning for the histograms
initialize = False
plot_n_pixels = 6.0
# detect peaks and calculate width to estimate the size of the histograms
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference[:, 0]), np.max(difference[:, 0])
nbins = np.arange(min_difference - (pixel_size[actual_dut][0] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][0] / nbins_per_pixel), pixel_size[actual_dut][0] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference[:, 0], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_x, fwhm_x, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_x, fwhm_x, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_x, fwhm_x = 0.0, pixel_size[actual_dut][0] * plot_n_pixels
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference[:, 1]), np.max(difference[:, 1])
nbins = np.arange(min_difference - (pixel_size[actual_dut][1] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][1] / nbins_per_pixel), pixel_size[actual_dut][1] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference[:, 1], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_y, fwhm_y, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_y, fwhm_y, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_y, fwhm_y = 0.0, pixel_size[actual_dut][1] * plot_n_pixels
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference_local[:, 0]), np.max(difference_local[:, 0])
nbins = np.arange(min_difference - (pixel_size[actual_dut][0] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][0] / nbins_per_pixel), pixel_size[actual_dut][0] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference_local[:, 0], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_col, fwhm_col, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_col, fwhm_col, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_col, fwhm_col = 0.0, pixel_size[actual_dut][0] * plot_n_pixels
if nbins_per_pixel is not None:
min_difference, max_difference = np.min(difference_local[:, 1]), np.max(difference_local[:, 1])
nbins = np.arange(min_difference - (pixel_size[actual_dut][1] / nbins_per_pixel), max_difference + 2 * (pixel_size[actual_dut][1] / nbins_per_pixel), pixel_size[actual_dut][1] / nbins_per_pixel)
else:
nbins = "auto"
hist, edges = np.histogram(difference_local[:, 1], bins=nbins)
edge_center = (edges[1:] + edges[:-1]) / 2.0
try:
_, center_row, fwhm_row, _ = analysis_utils.peak_detect(edge_center, hist)
except RuntimeError:
# do some simple FWHM with numpy array
try:
_, center_row, fwhm_row, _ = analysis_utils.simple_peak_detect(edge_center, hist)
except RuntimeError:
center_row, fwhm_row = 0.0, pixel_size[actual_dut][1] * plot_n_pixels
# calculate the binning of the histograms, the minimum size is given by plot_n_pixels, otherwise FWHM is taken into account
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][0], pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_x / pixel_size[actual_dut][0]))
if np.mod(width / pixel_size[actual_dut][0], 2) != 0:
width += pixel_size[actual_dut][0]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][0])
x_range = (center_x - 0.5 * width, center_x + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_x / pixel_size[actual_dut][0])
x_range = (center_x - width, center_x + width)
hist_residual_x_hist, hist_residual_x_xedges = np.histogram(difference[:, 0], range=x_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_x), np.max(intersection_x)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][0], npixels_per_bin * pixel_size[actual_dut][0])
else:
nbins = "auto"
_, hist_residual_x_yedges = np.histogram(intersection_x, bins=nbins)
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][1], pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_y / pixel_size[actual_dut][1]))
if np.mod(width / pixel_size[actual_dut][1], 2) != 0:
width += pixel_size[actual_dut][1]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][1])
y_range = (center_y - 0.5 * width, center_y + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_y / pixel_size[actual_dut][1])
y_range = (center_y - width, center_y + width)
hist_residual_y_hist, hist_residual_y_yedges = np.histogram(difference[:, 1], range=y_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_y), np.max(intersection_y)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][1], npixels_per_bin * pixel_size[actual_dut][1])
else:
nbins = "auto"
_, hist_residual_y_xedges = np.histogram(intersection_y, bins=nbins)
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][0], pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_col / pixel_size[actual_dut][0]))
if np.mod(width / pixel_size[actual_dut][0], 2) != 0:
width += pixel_size[actual_dut][0]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][0])
col_range = (center_col - 0.5 * width, center_col + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][0] * np.ceil(plot_n_pixels * fwhm_col / pixel_size[actual_dut][0])
col_range = (center_col - width, center_col + width)
hist_residual_col_hist, hist_residual_col_xedges = np.histogram(difference_local[:, 0], range=col_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_x_local), np.max(intersection_x_local)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][0], npixels_per_bin * pixel_size[actual_dut][0])
else:
nbins = "auto"
_, hist_residual_col_yedges = np.histogram(intersection_x_local, bins=nbins)
if nbins_per_pixel is not None:
width = max(plot_n_pixels * pixel_size[actual_dut][1], pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_row / pixel_size[actual_dut][1]))
if np.mod(width / pixel_size[actual_dut][1], 2) != 0:
width += pixel_size[actual_dut][1]
nbins = int(nbins_per_pixel * width / pixel_size[actual_dut][1])
row_range = (center_row - 0.5 * width, center_row + 0.5 * width)
else:
nbins = "auto"
width = pixel_size[actual_dut][1] * np.ceil(plot_n_pixels * fwhm_row / pixel_size[actual_dut][1])
row_range = (center_row - width, center_row + width)
hist_residual_row_hist, hist_residual_row_yedges = np.histogram(difference_local[:, 1], range=row_range, bins=nbins)
if npixels_per_bin is not None:
min_intersection, max_intersection = np.min(intersection_y_local), np.max(intersection_y_local)
nbins = np.arange(min_intersection, max_intersection + npixels_per_bin * pixel_size[actual_dut][1], npixels_per_bin * pixel_size[actual_dut][1])
else:
nbins = "auto"
_, hist_residual_row_xedges = np.histogram(intersection_y_local, bins=nbins)
# global x residual against x position
hist_x_residual_x_hist, hist_x_residual_x_xedges, hist_x_residual_x_yedges = np.histogram2d(
intersection_x,
difference[:, 0],
bins=(hist_residual_x_yedges, hist_residual_x_xedges))
# global y residual against y position
hist_y_residual_y_hist, hist_y_residual_y_xedges, hist_y_residual_y_yedges = np.histogram2d(
intersection_y,
difference[:, 1],
bins=(hist_residual_y_xedges, hist_residual_y_yedges))
# global y residual against x position
hist_x_residual_y_hist, hist_x_residual_y_xedges, hist_x_residual_y_yedges = np.histogram2d(
intersection_x,
difference[:, 1],
bins=(hist_residual_x_yedges, hist_residual_y_yedges))
# global x residual against y position
hist_y_residual_x_hist, hist_y_residual_x_xedges, hist_y_residual_x_yedges = np.histogram2d(
intersection_y,
difference[:, 0],
bins=(hist_residual_y_xedges, hist_residual_x_xedges))
# local column residual against column position
hist_col_residual_col_hist, hist_col_residual_col_xedges, hist_col_residual_col_yedges = np.histogram2d(
intersection_x_local,
difference_local[:, 0],
bins=(hist_residual_col_yedges, hist_residual_col_xedges))
# local row residual against row position
hist_row_residual_row_hist, hist_row_residual_row_xedges, hist_row_residual_row_yedges = np.histogram2d(
intersection_y_local,
difference_local[:, 1],
bins=(hist_residual_row_xedges, hist_residual_row_yedges))
# local row residual against column position
hist_col_residual_row_hist, hist_col_residual_row_xedges, hist_col_residual_row_yedges = np.histogram2d(
intersection_x_local,
difference_local[:, 1],
bins=(hist_residual_col_yedges, hist_residual_row_yedges))
# local column residual against row position
hist_row_residual_col_hist, hist_row_residual_col_xedges, hist_row_residual_col_yedges = np.histogram2d(
intersection_y_local,
difference_local[:, 0],
bins=(hist_residual_row_xedges, hist_residual_col_xedges))
else: # adding data to existing histograms
hist_residual_x_hist += np.histogram(difference[:, 0], bins=hist_residual_x_xedges)[0]
hist_residual_y_hist += np.histogram(difference[:, 1], bins=hist_residual_y_yedges)[0]
hist_residual_col_hist += np.histogram(difference_local[:, 0], bins=hist_residual_col_xedges)[0]
hist_residual_row_hist += np.histogram(difference_local[:, 1], bins=hist_residual_row_yedges)[0]
# global x residual against x position
hist_x_residual_x_hist += np.histogram2d(
intersection_x,
difference[:, 0],
bins=(hist_x_residual_x_xedges, hist_x_residual_x_yedges))[0]
# global y residual against y position
hist_y_residual_y_hist += np.histogram2d(
intersection_y,
difference[:, 1],
bins=(hist_y_residual_y_xedges, hist_y_residual_y_yedges))[0]
# global y residual against x position
hist_x_residual_y_hist += np.histogram2d(
intersection_x,
difference[:, 1],
bins=(hist_x_residual_y_xedges, hist_x_residual_y_yedges))[0]
# global x residual against y position
hist_y_residual_x_hist += np.histogram2d(
intersection_y,
difference[:, 0],
bins=(hist_y_residual_x_xedges, hist_y_residual_x_yedges))[0]
# local column residual against column position
hist_col_residual_col_hist += np.histogram2d(
intersection_x_local,
difference_local[:, 0],
bins=(hist_col_residual_col_xedges, hist_col_residual_col_yedges))[0]
# local row residual against row position
hist_row_residual_row_hist += np.histogram2d(
intersection_y_local,
difference_local[:, 1],
bins=(hist_row_residual_row_xedges, hist_row_residual_row_yedges))[0]
# local row residual against column position
hist_col_residual_row_hist += np.histogram2d(
intersection_x_local,
difference_local[:, 1],
bins=(hist_col_residual_row_xedges, hist_col_residual_row_yedges))[0]
# local column residual against row position
hist_row_residual_col_hist += np.histogram2d(
intersection_y_local,
difference_local[:, 0],
bins=(hist_row_residual_col_xedges, hist_row_residual_col_yedges))[0]
logging.debug('Storing residual histograms...')
dut_name = dut_names[actual_dut] if dut_names else ("DUT" + str(actual_dut))
# Global residuals
fit_residual_x, cov_residual_x = analysis_utils.fit_residuals(
hist=hist_residual_x_hist,
edges=hist_residual_x_xedges,
label='X residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_x = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsX_DUT%d' % (actual_dut),
title='Residual distribution in x direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_x_hist.dtype),
shape=hist_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_x.attrs.xedges = hist_residual_x_xedges
out_res_x.attrs.fit_coeff = fit_residual_x
out_res_x.attrs.fit_cov = cov_residual_x
out_res_x[:] = hist_residual_x_hist
fit_residual_y, cov_residual_y = analysis_utils.fit_residuals(
hist=hist_residual_y_hist,
edges=hist_residual_y_yedges,
label='Y residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_y = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_y_hist.dtype),
shape=hist_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_y.attrs.yedges = hist_residual_y_yedges
out_res_y.attrs.fit_coeff = fit_residual_y
out_res_y.attrs.fit_cov = cov_residual_y
out_res_y[:] = hist_residual_y_hist
fit_x_residual_x, cov_x_residual_x = analysis_utils.fit_residuals_vs_position(
hist=hist_x_residual_x_hist,
xedges=hist_x_residual_x_xedges,
yedges=hist_x_residual_x_yedges,
xlabel='X position [um]',
ylabel='X residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_x_res_x = out_file_h5.create_carray(out_file_h5.root,
name='XResidualsX_DUT%d' % (actual_dut),
title='Residual distribution in x direction as a function of the x position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_x_residual_x_hist.dtype),
shape=hist_x_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_x.attrs.xedges = hist_x_residual_x_xedges
out_x_res_x.attrs.yedges = hist_x_residual_x_yedges
out_x_res_x.attrs.fit_coeff = fit_x_residual_x
out_x_res_x.attrs.fit_cov = cov_x_residual_x
out_x_res_x[:] = hist_x_residual_x_hist
fit_y_residual_y, cov_y_residual_y = analysis_utils.fit_residuals_vs_position(
hist=hist_y_residual_y_hist,
xedges=hist_y_residual_y_xedges,
yedges=hist_y_residual_y_yedges,
xlabel='Y position [um]',
ylabel='Y residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_y_res_y = out_file_h5.create_carray(out_file_h5.root,
name='YResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the y position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_y_residual_y_hist.dtype),
shape=hist_y_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_y_res_y.attrs.xedges = hist_y_residual_y_xedges
out_y_res_y.attrs.yedges = hist_y_residual_y_yedges
out_y_res_y.attrs.fit_coeff = fit_y_residual_y
out_y_res_y.attrs.fit_cov = cov_y_residual_y
out_y_res_y[:] = hist_y_residual_y_hist
fit_x_residual_y, cov_x_residual_y = analysis_utils.fit_residuals_vs_position(
hist=hist_x_residual_y_hist,
xedges=hist_x_residual_y_xedges,
yedges=hist_x_residual_y_yedges,
xlabel='X position [um]',
ylabel='Y residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_x_res_y = out_file_h5.create_carray(out_file_h5.root,
name='XResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the x position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_x_residual_y_hist.dtype),
shape=hist_x_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_y.attrs.xedges = hist_x_residual_y_xedges
out_x_res_y.attrs.yedges = hist_x_residual_y_yedges
out_x_res_y.attrs.fit_coeff = fit_x_residual_y
out_x_res_y.attrs.fit_cov = cov_x_residual_y
out_x_res_y[:] = hist_x_residual_y_hist
fit_y_residual_x, cov_y_residual_x = analysis_utils.fit_residuals_vs_position(
hist=hist_y_residual_x_hist,
xedges=hist_y_residual_x_xedges,
yedges=hist_y_residual_x_yedges,
xlabel='Y position [um]',
ylabel='X residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_y_res_x = out_file_h5.create_carray(out_file_h5.root,
name='YResidualsX_DUT%d' % (actual_dut),
title='Residual distribution in x direction as a function of the y position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_y_residual_x_hist.dtype),
shape=hist_y_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_y_res_x.attrs.xedges = hist_y_residual_x_xedges
out_y_res_x.attrs.yedges = hist_y_residual_x_yedges
out_y_res_x.attrs.fit_coeff = fit_y_residual_x
out_y_res_x.attrs.fit_cov = cov_y_residual_x
out_y_res_x[:] = hist_y_residual_x_hist
# Local residuals
fit_residual_col, cov_residual_col = analysis_utils.fit_residuals(
hist=hist_residual_col_hist,
edges=hist_residual_col_xedges,
label='Column residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_col = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsCol_DUT%d' % (actual_dut),
title='Residual distribution in column direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_col_hist.dtype),
shape=hist_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_col.attrs.xedges = hist_residual_col_xedges
out_res_col.attrs.fit_coeff = fit_residual_col
out_res_col.attrs.fit_cov = cov_residual_col
out_res_col[:] = hist_residual_col_hist
fit_residual_row, cov_residual_row = analysis_utils.fit_residuals(
hist=hist_residual_row_hist,
edges=hist_residual_row_yedges,
label='Row residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_res_row = out_file_h5.create_carray(out_file_h5.root,
name='ResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_residual_row_hist.dtype),
shape=hist_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_res_row.attrs.yedges = hist_residual_row_yedges
out_res_row.attrs.fit_coeff = fit_residual_row
out_res_row.attrs.fit_cov = cov_residual_row
out_res_row[:] = hist_residual_row_hist
fit_col_residual_col, cov_col_residual_col = analysis_utils.fit_residuals_vs_position(
hist=hist_col_residual_col_hist,
xedges=hist_col_residual_col_xedges,
yedges=hist_col_residual_col_yedges,
xlabel='Column position [um]',
ylabel='Column residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_col_res_col = out_file_h5.create_carray(out_file_h5.root,
name='ColResidualsCol_DUT%d' % (actual_dut),
title='Residual distribution in column direction as a function of the column position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_col_residual_col_hist.dtype),
shape=hist_col_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_col.attrs.xedges = hist_col_residual_col_xedges
out_col_res_col.attrs.yedges = hist_col_residual_col_yedges
out_col_res_col.attrs.fit_coeff = fit_col_residual_col
out_col_res_col.attrs.fit_cov = cov_col_residual_col
out_col_res_col[:] = hist_col_residual_col_hist
fit_row_residual_row, cov_row_residual_row = analysis_utils.fit_residuals_vs_position(
hist=hist_row_residual_row_hist,
xedges=hist_row_residual_row_xedges,
yedges=hist_row_residual_row_yedges,
xlabel='Row position [um]',
ylabel='Row residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_row_res_row = out_file_h5.create_carray(out_file_h5.root,
name='RowResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the row position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_row_residual_row_hist.dtype),
shape=hist_row_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_row_res_row.attrs.xedges = hist_row_residual_row_xedges
out_row_res_row.attrs.yedges = hist_row_residual_row_yedges
out_row_res_row.attrs.fit_coeff = fit_row_residual_row
out_row_res_row.attrs.fit_cov = cov_row_residual_row
out_row_res_row[:] = hist_row_residual_row_hist
fit_col_residual_row, cov_col_residual_row = analysis_utils.fit_residuals_vs_position(
hist=hist_col_residual_row_hist,
xedges=hist_col_residual_row_xedges,
yedges=hist_col_residual_row_yedges,
xlabel='Column position [um]',
ylabel='Row residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_col_res_row = out_file_h5.create_carray(out_file_h5.root,
name='ColResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the column position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_col_residual_row_hist.dtype),
shape=hist_col_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_row.attrs.xedges = hist_col_residual_row_xedges
out_col_res_row.attrs.yedges = hist_col_residual_row_yedges
out_col_res_row.attrs.fit_coeff = fit_col_residual_row
out_col_res_row.attrs.fit_cov = cov_col_residual_row
out_col_res_row[:] = hist_col_residual_row_hist
fit_row_residual_col, cov_row_residual_col = analysis_utils.fit_residuals_vs_position(
hist=hist_row_residual_col_hist,
xedges=hist_row_residual_col_xedges,
yedges=hist_row_residual_col_yedges,
xlabel='Row position [um]',
ylabel='Column residual [um]',
title='Residuals for %s' % (dut_name,),
output_pdf=output_pdf,
gui=gui,
figs=figs
)
out_row_res_col = out_file_h5.create_carray(out_file_h5.root,
name='RowResidualsCol_DUT%d' % (actual_dut),
title='Residual distribution in column direction as a function of the row position for %s' % (dut_name),
atom=tb.Atom.from_dtype(hist_row_residual_col_hist.dtype),
shape=hist_row_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_row_res_col.attrs.xedges = hist_row_residual_col_xedges
out_row_res_col.attrs.yedges = hist_row_residual_col_yedges
out_row_res_col.attrs.fit_coeff = fit_row_residual_col
out_row_res_col.attrs.fit_cov = cov_row_residual_col
out_row_res_col[:] = hist_row_residual_col_hist
if output_pdf is not None:
output_pdf.close()
if gui:
return figs
def calculate_efficiency(input_tracks_file, input_alignment_file, bin_size, sensor_size, output_efficiency_file=None, pixel_size=None, n_pixels=None, minimum_track_density=1, max_distance=500, use_duts=None, max_chi2=None, force_prealignment=False, cut_distance=None, col_range=None, row_range=None, show_inefficient_events=False, plot=True, gui=False, chunk_size=1000000):
'''Takes the tracks and calculates the hit efficiency and hit/track hit distance for selected DUTs.
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input alignment file.
bin_size : iterable
Sizes of bins (i.e. (virtual) pixel size). Give one tuple (x, y) for every plane or list of tuples for different planes.
sensor_size : Tuple or list of tuples
Describes the sensor size for each DUT. If one tuple is given it is (size x, size y)
If several tuples are given it is [(DUT0 size x, DUT0 size y), (DUT1 size x, DUT1 size y), ...]
output_efficiency_file : string
Filename of the output efficiency file. If None, the filename will be derived from the input hits file.
minimum_track_density : int
Minimum track density required to consider bin for efficiency calculation.
use_duts : iterable
Calculate the efficiency for selected DUTs. If None, all duts are selected.
max_chi2 : uint
Only use tracks with a chi2 <= max_chi2.
force_prealignment : bool
Take the prealignment, although if a coarse alignment is availale.
cut_distance : int
Use only distances (between DUT hit and track hit) smaller than cut_distance.
max_distance : int
Defines binnig of distance values.
col_range : iterable
Column value to calculate efficiency for (to neglect noisy edge pixels for efficiency calculation).
row_range : iterable
Row value to calculate efficiency for (to neglect noisy edge pixels for efficiency calculation).
plot : bool
If True, create additional output plots.
chunk_size : int
Chunk size of the data when reading from file.
pixel_size : iterable
tuple or list of col/row pixel dimension
n_pixels : iterable
tuple or list of amount of pixel in col/row dimension
show_inefficient_events : bool
Whether to log inefficient events
gui : bool
If True, use GUI for plotting.
'''
logging.info('=== Calculating efficiency ===')
if output_efficiency_file is None:
output_efficiency_file = os.path.splitext(input_tracks_file)[0] + '_efficiency.h5'
if plot is True and not gui:
output_pdf = PdfPages(os.path.splitext(output_efficiency_file)[0] + '.pdf', keep_empty=False)
else:
output_pdf = None
use_prealignment = True if force_prealignment else False
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
use_duts = use_duts if use_duts is not None else range(n_duts) # standard setting: fit tracks for all DUTs
if not isinstance(max_chi2, Iterable):
max_chi2 = [max_chi2] * len(use_duts)
efficiencies = []
pass_tracks = []
total_tracks = []
figs = [] if gui else None
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
with tb.open_file(output_efficiency_file, 'w') as out_file_h5:
for index, node in enumerate(in_file_h5.root):
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if actual_dut not in use_duts:
continue
dut_index = np.where(np.array(use_duts) == actual_dut)[0][0]
logging.info('Calculate efficiency for DUT%d', actual_dut)
# Calculate histogram properties (bins size and number of bins)
bin_size = [bin_size, ] if not isinstance(bin_size, Iterable) else bin_size
if len(bin_size) == 1:
actual_bin_size_x = bin_size[0][0]
actual_bin_size_y = bin_size[0][1]
else:
actual_bin_size_x = bin_size[dut_index][0]
actual_bin_size_y = bin_size[dut_index][1]
dimensions = [sensor_size, ] if not isinstance(sensor_size, Iterable) else sensor_size # Sensor dimensions for each DUT
if len(dimensions) == 1:
dimensions = dimensions[0]
else:
dimensions = dimensions[dut_index]
n_bin_x = int(dimensions[0] / actual_bin_size_x)
n_bin_y = int(dimensions[1] / actual_bin_size_y)
# Define result histograms, these are filled for each hit chunk
# total_distance_array = np.zeros(shape=(n_bin_x, n_bin_y, max_distance))
total_hit_hist = np.zeros(shape=(n_bin_x, n_bin_y), dtype=np.uint32)
total_track_density = np.zeros(shape=(n_bin_x, n_bin_y))
total_track_density_with_DUT_hit = np.zeros(shape=(n_bin_x, n_bin_y))
actual_max_chi2 = max_chi2[dut_index]
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size):
# Cut in Chi 2 of the track fit
if actual_max_chi2:
tracks_chunk = tracks_chunk[tracks_chunk['track_chi2'] <= max_chi2]
# Transform the hits and track intersections into the local coordinate system
# Coordinates in global coordinate system (x, y, z)
hit_x, hit_y, hit_z = tracks_chunk['x_dut_%d' % actual_dut], tracks_chunk['y_dut_%d' % actual_dut], tracks_chunk['z_dut_%d' % actual_dut]
intersection_x, intersection_y, intersection_z = tracks_chunk['offset_0'], tracks_chunk['offset_1'], tracks_chunk['offset_2']
# Transform to local coordinate system
if use_prealignment:
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else: # Apply transformation from alignment information
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
# Quickfix that center of sensor is local system is in the center and not at the edge
hit_x_local, hit_y_local = hit_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], hit_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersection_x_local, intersection_y_local = intersection_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], intersection_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersections_local = np.column_stack((intersection_x_local, intersection_y_local, intersection_z_local))
hits_local = np.column_stack((hit_x_local, hit_y_local, hit_z_local))
if not np.allclose(hits_local[np.isfinite(hits_local[:, 2]), 2], 0.0) or not np.allclose(intersection_z_local, 0.0):
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
# Usefull for debugging, print some inefficient events that can be cross checked
# Select virtual hits
sel_virtual = np.isnan(tracks_chunk['x_dut_%d' % actual_dut])
if show_inefficient_events:
logging.info('These events are inefficient: %s', str(tracks_chunk['event_number'][sel_virtual]))
# Select hits from column, row range (e.g. to supress edge pixels)
col_range = [col_range, ] if not isinstance(col_range, Iterable) else col_range
if len(col_range) == 1:
curr_col_range = col_range[0]
else:
curr_col_range = col_range[dut_index]
if curr_col_range is not None:
selection = np.logical_and(intersections_local[:, 0] >= curr_col_range[0], intersections_local[:, 0] <= curr_col_range[1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
row_range = [row_range, ] if not isinstance(row_range, Iterable) else row_range
if len(row_range) == 1:
curr_row_range = row_range[0]
else:
curr_row_range = row_range[dut_index]
if curr_row_range is not None:
selection = np.logical_and(intersections_local[:, 1] >= curr_row_range[0], intersections_local[:, 1] <= curr_row_range[1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
# Calculate distance between track hit and DUT hit
scale = np.square(np.array((1, 1, 0))) # Regard pixel size for calculating distances
distance = np.sqrt(np.dot(np.square(intersections_local - hits_local), scale)) # Array with distances between DUT hit and track hit for each event. Values in um
col_row_distance = np.column_stack((hits_local[:, 0], hits_local[:, 1], distance))
# total_distance_array += np.histogramdd(col_row_distance, bins=(n_bin_x, n_bin_y, max_distance), range=[[0, dimensions[0]], [0, dimensions[1]], [0, max_distance]])[0]
total_hit_hist += (np.histogram2d(hits_local[:, 0], hits_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]).astype(np.uint32)
# total_hit_hist += (np.histogram2d(hits_local[:, 0], hits_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[-dimensions[0] / 2., dimensions[0] / 2.], [-dimensions[1] / 2., dimensions[1] / 2.]])[0]).astype(np.uint32)
# Calculate efficiency
selection = ~np.isnan(hits_local[:, 0])
if cut_distance: # Select intersections where hit is in given distance around track intersection
intersection_valid_hit = intersections_local[np.logical_and(selection, distance < cut_distance)]
else:
intersection_valid_hit = intersections_local[selection]
total_track_density += np.histogram2d(intersections_local[:, 0], intersections_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]
total_track_density_with_DUT_hit += np.histogram2d(intersection_valid_hit[:, 0], intersection_valid_hit[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]
if np.all(total_track_density == 0):
logging.warning('No tracks on DUT%d, cannot calculate efficiency', actual_dut)
continue
efficiency = np.zeros_like(total_track_density_with_DUT_hit)
efficiency[total_track_density != 0] = total_track_density_with_DUT_hit[total_track_density != 0].astype(np.float) / total_track_density[total_track_density != 0].astype(np.float) * 100.
efficiency = np.ma.array(efficiency, mask=total_track_density < minimum_track_density)
if not np.any(efficiency):
raise RuntimeError('All efficiencies for DUT%d are zero, consider changing cut values!', actual_dut)
# Calculate distances between hit and intersection
# distance_mean_array = np.average(total_distance_array, axis=2, weights=range(0, max_distance)) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_mean_array = np.ma.masked_invalid(distance_mean_array)
# distance_max_array = np.amax(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_min_array = np.amin(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_max_array = np.ma.masked_invalid(distance_max_array)
# distance_min_array = np.ma.masked_invalid(distance_min_array)
# plot_utils.plot_track_distances(distance_min_array, distance_max_array, distance_mean_array)
plot_utils.efficiency_plots(total_hit_hist, total_track_density, total_track_density_with_DUT_hit, efficiency, actual_dut, minimum_track_density, plot_range=dimensions, cut_distance=cut_distance, output_pdf=output_pdf, gui=gui, figs=figs)
logging.info('Efficiency = %1.4f +- %1.4f', np.ma.mean(efficiency), np.ma.std(efficiency))
efficiencies.append(np.ma.mean(efficiency))
dut_group = out_file_h5.create_group(out_file_h5.root, 'DUT_%d' % actual_dut)
out_efficiency = out_file_h5.create_carray(dut_group, name='Efficiency', title='Efficiency map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(efficiency.dtype), shape=efficiency.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_efficiency_mask = out_file_h5.create_carray(dut_group, name='Efficiency_mask', title='Masked pixel map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(efficiency.mask.dtype), shape=efficiency.mask.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# For correct statistical error calculation the number of detected tracks over total tracks is needed
out_pass = out_file_h5.create_carray(dut_group, name='Passing_tracks', title='Passing events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_track_density_with_DUT_hit.dtype), shape=total_track_density_with_DUT_hit.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_total = out_file_h5.create_carray(dut_group, name='Total_tracks', title='Total events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_track_density.dtype), shape=total_track_density.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
pass_tracks.append(total_track_density_with_DUT_hit.sum())
total_tracks.append(total_track_density.sum())
logging.info('Passing / total tracks: %d / %d', total_track_density_with_DUT_hit.sum(), total_track_density.sum())
# Store parameters used for efficiency calculation
out_efficiency.attrs.bin_size = bin_size
out_efficiency.attrs.minimum_track_density = minimum_track_density
out_efficiency.attrs.sensor_size = sensor_size
out_efficiency.attrs.use_duts = use_duts
out_efficiency.attrs.max_chi2 = max_chi2
out_efficiency.attrs.cut_distance = cut_distance
out_efficiency.attrs.max_distance = max_distance
out_efficiency.attrs.col_range = col_range
out_efficiency.attrs.row_range = row_range
out_efficiency[:] = efficiency.T
out_efficiency_mask[:] = efficiency.mask.T
out_pass[:] = total_track_density_with_DUT_hit.T
out_total[:] = total_track_density.T
if output_pdf is not None:
output_pdf.close()
if gui:
return figs
return efficiencies, pass_tracks, total_tracks
def calculate_purity(input_tracks_file, input_alignment_file, bin_size, sensor_size, output_purity_file=None, pixel_size=None, n_pixels=None, minimum_hit_density=10, max_distance=500, use_duts=None, max_chi2=None, force_prealignment=False, cut_distance=None, col_range=None, row_range=None, show_inefficient_events=False, output_file=None, plot=True, chunk_size=1000000):
'''Takes the tracks and calculates the hit purity and hit/track hit distance for selected DUTs.
Parameters
----------
input_tracks_file : string
Filename with the tracks table.
input_alignment_file : pytables file
Filename of the input aligment data.
bin_size : iterable
Bins sizes (i.e. (virtual) pixel size). Give one tuple (x, y) for every plane or list of tuples for different planes.
sensor_size : Tuple or list of tuples
Describes the sensor size for each DUT. If one tuple is given it is (size x, size y).
If several tuples are given it is [(DUT0 size x, DUT0 size y), (DUT1 size x, DUT1 size y), ...].
output_purity_file : string
Filename of the output purity file. If None, the filename will be derived from the input hits file.
minimum_hit_density : int
Minimum hit density required to consider bin for purity calculation.
use_duts : iterable
The DUTs to calculate purity for. If None all duts are used.
max_chi2 : int
Only use track with a chi2 <= max_chi2.
force_prealignment : bool
Take the prealignment, although if a coarse alignment is availale.
cut_distance : int
Hit - track intersection <= cut_distance = pure hit (hit assigned to track).
Hit - track intersection > cut_distance = inpure hit (hit without a track).
max_distance : int
Defines binnig of distance values.
col_range, row_range : iterable
Column / row value to calculate purity for (to neglect noisy edge pixels for purity calculation).
plot : bool
If True, create additional output plots.
chunk_size : int
Chunk size of the data when reading from file.
'''
logging.info('=== Calculate purity ===')
if output_purity_file is None:
output_purity_file = os.path.splitext(input_tracks_file)[0] + '_purity.h5'
if plot is True:
output_pdf = PdfPages(os.path.splitext(output_purity_file)[0] + '.pdf', keep_empty=False)
else:
output_pdf = None
use_prealignment = True if force_prealignment else False
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
if not use_prealignment:
try:
alignment = in_file_h5.root.Alignment[:]
logging.info('Use alignment data')
except tb.exceptions.NodeError:
use_prealignment = True
logging.info('Use prealignment data')
if not isinstance(max_chi2, Iterable):
max_chi2 = [max_chi2] * n_duts
purities = []
pure_hits = []
total_hits = []
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
with tb.open_file(output_purity_file, 'w') as out_file_h5:
for index, node in enumerate(in_file_h5.root):
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if use_duts and actual_dut not in use_duts:
continue
logging.info('Calculate purity for DUT %d', actual_dut)
# Calculate histogram properties (bins size and number of bins)
bin_size = [bin_size, ] if not isinstance(bin_size, Iterable) else bin_size
if len(bin_size) != 1:
actual_bin_size_x = bin_size[index][0]
actual_bin_size_y = bin_size[index][1]
else:
actual_bin_size_x = bin_size[0][0]
actual_bin_size_y = bin_size[0][1]
dimensions = [sensor_size, ] if not isinstance(sensor_size, Iterable) else sensor_size # Sensor dimensions for each DUT
if len(dimensions) == 1:
dimensions = dimensions[0]
else:
dimensions = dimensions[index]
n_bin_x = int(dimensions[0] / actual_bin_size_x)
n_bin_y = int(dimensions[1] / actual_bin_size_y)
# Define result histograms, these are filled for each hit chunk
total_hit_hist = np.zeros(shape=(n_bin_x, n_bin_y), dtype=np.uint32)
total_pure_hit_hist = np.zeros(shape=(n_bin_x, n_bin_y), dtype=np.uint32)
actual_max_chi2 = max_chi2[index]
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size):
# Take only tracks where actual dut has a hit, otherwise residual wrong
selection = np.logical_and(~np.isnan(tracks_chunk['x_dut_%d' % actual_dut]), ~np.isnan(tracks_chunk['track_chi2']))
selection_hit = ~np.isnan(tracks_chunk['x_dut_%d' % actual_dut])
# Cut in Chi 2 of the track fit
if actual_max_chi2:
tracks_chunk = tracks_chunk[tracks_chunk['track_chi2'] <= max_chi2]
# Transform the hits and track intersections into the local coordinate system
# Coordinates in global coordinate system (x, y, z)
hit_x_dut, hit_y_dut, hit_z_dut = tracks_chunk['x_dut_%d' % actual_dut][selection_hit], tracks_chunk['y_dut_%d' % actual_dut][selection_hit], tracks_chunk['z_dut_%d' % actual_dut][selection_hit]
hit_x, hit_y, hit_z = tracks_chunk['x_dut_%d' % actual_dut][selection], tracks_chunk['y_dut_%d' % actual_dut][selection], tracks_chunk['z_dut_%d' % actual_dut][selection]
intersection_x, intersection_y, intersection_z = tracks_chunk['offset_0'][selection], tracks_chunk['offset_1'][selection], tracks_chunk['offset_2'][selection]
# Transform to local coordinate system
if use_prealignment:
hit_x_local_dut, hit_y_local_dut, hit_z_local_dut = geometry_utils.apply_alignment(hit_x_dut, hit_y_dut, hit_z_dut,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else: # Apply transformation from alignment information
hit_x_local_dut, hit_y_local_dut, hit_z_local_dut = geometry_utils.apply_alignment(hit_x_dut, hit_y_dut, hit_z_dut,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hit_x, hit_y, hit_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_alignment(intersection_x, intersection_y, intersection_z,
dut_index=actual_dut,
alignment=alignment,
inverse=True)
# Quickfix that center of sensor is local system is in the center and not at the edge
hit_x_local_dut, hit_y_local_dut = hit_x_local_dut + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], hit_y_local_dut + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
hit_x_local, hit_y_local = hit_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], hit_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersection_x_local, intersection_y_local = intersection_x_local + pixel_size[actual_dut][0] / 2. * n_pixels[actual_dut][0], intersection_y_local + pixel_size[actual_dut][1] / 2. * n_pixels[actual_dut][1]
intersections_local = np.column_stack((intersection_x_local, intersection_y_local, intersection_z_local))
hits_local = np.column_stack((hit_x_local, hit_y_local, hit_z_local))
hits_local_dut = np.column_stack((hit_x_local_dut, hit_y_local_dut, hit_z_local_dut))
if not np.allclose(hits_local[np.isfinite(hits_local[:, 2]), 2], 0.0) or not np.allclose(intersection_z_local, 0.0):
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
# Usefull for debugging, print some inpure events that can be cross checked
# Select virtual hits
sel_virtual = np.isnan(tracks_chunk['x_dut_%d' % actual_dut])
if show_inefficient_events:
logging.info('These events are unpure: %s', str(tracks_chunk['event_number'][sel_virtual]))
# Select hits from column, row range (e.g. to supress edge pixels)
col_range = [col_range, ] if not isinstance(col_range, Iterable) else col_range
row_range = [row_range, ] if not isinstance(row_range, Iterable) else row_range
if len(col_range) == 1:
index = 0
if len(row_range) == 1:
index = 0
if col_range[index] is not None:
selection = np.logical_and(intersections_local[:, 0] >= col_range[index][0], intersections_local[:, 0] <= col_range[index][1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
if row_range[index] is not None:
selection = np.logical_and(intersections_local[:, 1] >= row_range[index][0], intersections_local[:, 1] <= row_range[index][1]) # Select real hits
hits_local, intersections_local = hits_local[selection], intersections_local[selection]
# Calculate distance between track hit and DUT hit
scale = np.square(np.array((1, 1, 0))) # Regard pixel size for calculating distances
distance = np.sqrt(np.dot(np.square(intersections_local - hits_local), scale)) # Array with distances between DUT hit and track hit for each event. Values in um
total_hit_hist += (np.histogram2d(hits_local_dut[:, 0], hits_local_dut[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]).astype(np.uint32)
# Calculate purity
pure_hits_local = hits_local[distance < cut_distance]
if not np.any(pure_hits_local):
logging.warning('No pure hits in DUT %d, cannot calculate purity', actual_dut)
continue
total_pure_hit_hist += (np.histogram2d(pure_hits_local[:, 0], pure_hits_local[:, 1], bins=(n_bin_x, n_bin_y), range=[[0, dimensions[0]], [0, dimensions[1]]])[0]).astype(np.uint32)
purity = np.zeros_like(total_hit_hist)
purity[total_hit_hist != 0] = total_pure_hit_hist[total_hit_hist != 0].astype(np.float) / total_hit_hist[total_hit_hist != 0].astype(np.float) * 100.
purity = np.ma.array(purity, mask=total_hit_hist < minimum_hit_density)
if not np.any(purity):
raise RuntimeError('No pure hit for DUT%d, consider changing cut values or check track building!', actual_dut)
# Calculate distances between hit and intersection
# distance_mean_array = np.average(total_distance_array, axis=2, weights=range(0, max_distance)) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_mean_array = np.ma.masked_invalid(distance_mean_array)
# distance_max_array = np.amax(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_min_array = np.amin(total_distance_array, axis=2) * sum(range(0, max_distance)) / total_hit_hist.astype(np.float)
# distance_max_array = np.ma.masked_invalid(distance_max_array)
# distance_min_array = np.ma.masked_invalid(distance_min_array)
# plot_utils.plot_track_distances(distance_min_array, distance_max_array, distance_mean_array)
plot_utils.purity_plots(total_pure_hit_hist, total_hit_hist, purity, actual_dut, minimum_hit_density, plot_range=dimensions, cut_distance=cut_distance, output_pdf=output_pdf)
logging.info('Purity = %1.4f +- %1.4f', np.ma.mean(purity), np.ma.std(purity))
purities.append(np.ma.mean(purity))
dut_group = out_file_h5.create_group(out_file_h5.root, 'DUT_%d' % actual_dut)
out_purity = out_file_h5.create_carray(dut_group, name='Purity', title='Purity map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(purity.dtype), shape=purity.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_purity_mask = out_file_h5.create_carray(dut_group, name='Purity_mask', title='Masked pixel map of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(purity.mask.dtype), shape=purity.mask.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# For correct statistical error calculation the number of pure hits over total hits is needed
out_pure_hits = out_file_h5.create_carray(dut_group, name='Pure_hits', title='Passing events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_pure_hit_hist.dtype), shape=total_pure_hit_hist.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_total_total = out_file_h5.create_carray(dut_group, name='Total_hits', title='Total events of DUT%d' % actual_dut, atom=tb.Atom.from_dtype(total_hit_hist.dtype), shape=total_hit_hist.T.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
pure_hits.append(total_pure_hit_hist.sum())
total_hits.append(total_hit_hist.sum())
logging.info('Pure hits / total hits: %d / %d, Purity = %.2f', total_pure_hit_hist.sum(), total_hit_hist.sum(), total_pure_hit_hist.sum()/total_hit_hist.sum() * 100)
# Store parameters used for purity calculation
out_purity.attrs.bin_size = bin_size
out_purity.attrs.minimum_hit_density = minimum_hit_density
out_purity.attrs.sensor_size = sensor_size
out_purity.attrs.use_duts = use_duts
out_purity.attrs.max_chi2 = max_chi2
out_purity.attrs.cut_distance = cut_distance
out_purity.attrs.max_distance = max_distance
out_purity.attrs.col_range = col_range
out_purity.attrs.row_range = row_range
out_purity[:] = purity.T
out_purity_mask[:] = purity.mask.T
out_pure_hits[:] = total_pure_hit_hist.T
out_total_total[:] = total_hit_hist.T
if output_pdf is not None:
output_pdf.close()
return purities, pure_hits, total_hits
def histogram_track_angle(input_tracks_file, input_alignment_file=None, output_track_angle_file=None, n_bins="auto", plot_range=(None, None), use_duts=None, dut_names=None, plot=True, chunk_size=499999):
'''Calculates and histograms the track angle of the fitted tracks for selected DUTs.
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input alignment file.
If None, the DUT planes are assumed to be perpendicular to the z axis.
output_track_angle_file: string
Filename of the output track angle file with track angle histogram and fitted means and sigmas of track angles for selected DUTs.
If None, deduce filename from input tracks file.
n_bins : uint
Number of bins for the histogram.
If "auto", automatic binning is used.
plot_range : iterable of tuples
Tuple of the plot range in rad for alpha and beta angular distribution, e.g. ((-0.01, +0.01), -0.01, +0.01)).
If (None, None), plotting from minimum to maximum.
use_duts : iterable
Calculate the track angle for given DUTs. If None, all duts are used.
dut_names : iterable
Name of the DUTs. If None, DUT numbers will be used.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Calculating track angles ===')
if input_alignment_file:
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
else:
alignment = None
if output_track_angle_file is None:
output_track_angle_file = os.path.splitext(input_tracks_file)[0] + '_track_angles.h5'
with tb.open_file(input_tracks_file, 'r') as in_file_h5:
with tb.open_file(output_track_angle_file, mode="w") as out_file_h5:
nodes = in_file_h5.list_nodes("/")
if not nodes:
return
extended_nodes = nodes[:1]
extended_nodes.extend(nodes)
for index, node in enumerate(extended_nodes): # loop through all DUTs in track table
initialize = True
actual_dut = int(re.findall(r'\d+', node.name)[-1])
if index == 0:
dut_name = None
else:
dut_name = "DUT%d" % actual_dut
if use_duts is not None and actual_dut not in use_duts:
continue
if alignment is not None and index != 0:
rotation_matrix = geometry_utils.rotation_matrix(alpha=alignment[actual_dut]['alpha'],
beta=alignment[actual_dut]['beta'],
gamma=alignment[actual_dut]['gamma'])
basis_global = rotation_matrix.T.dot(np.eye(3))
dut_plane_normal = basis_global[2]
if dut_plane_normal[2] < 0:
dut_plane_normal = -dut_plane_normal
else:
dut_plane_normal = np.array([0.0, 0.0, 1.0])
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): # only store track slopes of selected DUTs
track_slopes = np.column_stack((tracks_chunk['slope_0'],
tracks_chunk['slope_1'],
tracks_chunk['slope_2']))
# TODO: alpha/beta wrt DUT col / row
total_angles = np.arccos(np.inner(dut_plane_normal, track_slopes))
alpha_angles = 0.5 * np.pi - np.arccos(np.inner(track_slopes, np.cross(dut_plane_normal, np.array([1.0, 0.0, 0.0]))))
beta_angles = 0.5 * np.pi - np.arccos(np.inner(track_slopes, np.cross(dut_plane_normal, np.array([0.0, 1.0, 0.0]))))
if initialize:
total_angle_hist, total_angle_hist_edges = np.histogram(total_angles, bins=n_bins, range=None)
alpha_angle_hist, alpha_angle_hist_edges = np.histogram(alpha_angles, bins=n_bins, range=plot_range[1])
beta_angle_hist, beta_angle_hist_edges = np.histogram(beta_angles, bins=n_bins, range=plot_range[0])
initialize = False
else:
total_angle_hist += np.histogram(total_angles, bins=total_angle_hist_edges)[0]
alpha_angle_hist += np.histogram(alpha_angles, bins=alpha_angle_hist_edges)[0]
beta_angle_hist += np.histogram(beta_angles, bins=beta_angle_hist_edges)[0]
# write results
track_angle_total = out_file_h5.create_carray(where=out_file_h5.root,
name='Total_Track_Angle_Hist%s' % (("_%s" % dut_name) if dut_name else ""),
title='Total track angle distribution%s' % (("_for_%s" % dut_name) if dut_name else ""),
atom=tb.Atom.from_dtype(total_angle_hist.dtype),
shape=total_angle_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
track_angle_beta = out_file_h5.create_carray(where=out_file_h5.root,
name='Beta_Track_Angle_Hist%s' % (("_%s" % dut_name) if dut_name else ""),
title='Beta track angle distribution%s' % (("_for_%s" % dut_name) if dut_name else ""),
atom=tb.Atom.from_dtype(beta_angle_hist.dtype),
shape=beta_angle_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
track_angle_alpha = out_file_h5.create_carray(where=out_file_h5.root,
name='Alpha_Track_Angle_Hist%s' % (("_%s" % dut_name) if dut_name else ""),
title='Alpha track angle distribution%s' % (("_for_%s" % dut_name) if dut_name else ""),
atom=tb.Atom.from_dtype(alpha_angle_hist.dtype),
shape=alpha_angle_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# fit histograms for x and y direction
bin_center = (total_angle_hist_edges[1:] + total_angle_hist_edges[:-1]) / 2.0
mean = analysis_utils.get_mean_from_histogram(total_angle_hist, bin_center)
rms = analysis_utils.get_rms_from_histogram(total_angle_hist, bin_center)
fit_total, cov = curve_fit(analysis_utils.gauss, bin_center, total_angle_hist, p0=[np.amax(total_angle_hist), mean, rms])
bin_center = (beta_angle_hist_edges[1:] + beta_angle_hist_edges[:-1]) / 2.0
mean = analysis_utils.get_mean_from_histogram(beta_angle_hist, bin_center)
rms = analysis_utils.get_rms_from_histogram(beta_angle_hist, bin_center)
fit_beta, cov = curve_fit(analysis_utils.gauss, bin_center, beta_angle_hist, p0=[np.amax(beta_angle_hist), mean, rms])
bin_center = (alpha_angle_hist_edges[1:] + alpha_angle_hist_edges[:-1]) / 2.0
mean = analysis_utils.get_mean_from_histogram(alpha_angle_hist, bin_center)
rms = analysis_utils.get_rms_from_histogram(alpha_angle_hist, bin_center)
fit_alpha, cov = curve_fit(analysis_utils.gauss, bin_center, alpha_angle_hist, p0=[np.amax(alpha_angle_hist), mean, rms])
# total
track_angle_total.attrs.edges = total_angle_hist_edges
track_angle_total.attrs.edges = total_angle_hist_edges
track_angle_total.attrs.amp = fit_total[0]
track_angle_total.attrs.mean = fit_total[1]
track_angle_total.attrs.sigma = fit_total[2]
track_angle_total[:] = total_angle_hist
# x
track_angle_beta.attrs.edges = beta_angle_hist_edges
track_angle_beta.attrs.amp = fit_beta[0]
track_angle_beta.attrs.mean = fit_beta[1]
track_angle_beta.attrs.sigma = fit_beta[2]
track_angle_beta[:] = beta_angle_hist
# y
track_angle_alpha.attrs.edges = alpha_angle_hist_edges
track_angle_alpha.attrs.amp = fit_alpha[0]
track_angle_alpha.attrs.mean = fit_alpha[1]
track_angle_alpha.attrs.sigma = fit_alpha[2]
track_angle_alpha[:] = alpha_angle_hist
if plot:
plot_utils.plot_track_angle(input_track_angle_file=output_track_angle_file, output_pdf_file=None, dut_names=dut_names)
def calculate_in_pixel_hit_distribution(input_tracks_file, input_alignment_file, use_duts, pixel_size, n_bins, output_pdf_file=None, plot=True, force_prealignment=False, output_in_pixel_dist_file=None, chunk_size=1000000):
'''Takes the tracks and calculates in-pixel-hit-distributions for cluster sizes between 1 and 4, additionally the effective CS-1-Pitch is calculated.
The effective CS-1-pitch 'p_eff' gives the area of one pixel in which CS1 is formed depending on the sensor threshold. It can be used to estimate the
intrinsic resolution: sigma_int = p_eff / sqrt(12)
Parameters
----------
input_tracks_file : string
Filename of the input tracks file.
input_alignment_file : string
Filename of the input alignment file.
use_duts : iterable
Calculate in-pixel-hit-distribution for selected DUTs. If None, all duts are selected.
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension in um in column, row direction
e.g. for 2 DUTs: pixel_size = [(250, 50), (250, 50)]
n_bins : iterable
Number of bins for the histogram in x and y direction.
output_in_pixel_dist_file : string
Filename of the output in-pixel-hit-distribution file.
plot : bool
If True, create output plots. If false, only effective CS-1-Pitch will be calculated
force_prealignment : boolean
Take the prealignment, although if a coarse alignment is availale.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Calculating In-Pixel-Hit-Distribution for CS 1 - 4 ===')
use_prealignment = True if force_prealignment else False
def calculate_effective_cs1_pitch(cs_hist, pitch):
'''Calculates the effectice CS-1 pitch p_eff using the CS distribution: p_eff = sqrt(pitch^2 * N_CS1 / N_tot)
Where N_CS1 is the number of clusters with CS1 and N_tot the total number of clusters.
'''
perc_cluster_size_1 = 100. * ((cs_hist[0].astype(np.float) / np.sum(cs_hist).astype(np.float)))
return pitch[0] * np.sqrt(perc_cluster_size_1 / 100.), pitch[1] * np.sqrt(perc_cluster_size_1 / 100.)
def normalize_distributions(hists, fit_results):
'''Normalize CS distributions in order to compare them.
'''
hist_normed = [hists[0] - fit_results[0][3] + fit_results[2][0] + fit_results[2][3], # shift CS 1 distribution (x-axis projection) such that backgrounds of CS 2 and CS 1 is the same
hists[1], # CS 2 distribution (x-axis projection) stays the same
hists[2] - fit_results[1][3] + fit_results[3][0] + fit_results[3][3], # shift CS 1 distribution (y-axis projection) such that backgrounds of CS 2 and CS 1 is the same
hists[3]] # CS 2 distribution (y-axis projection) stays the same
return hist_normed
# read alignment for converting the track intersections with the respective dut (global reference system) to local track intersections
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
use_duts = use_duts if use_duts is not None else range(n_duts) # standard setting: fit tracks for all DUTs
if plot:
output_pdf = PdfPages(output_pdf_file + '.pdf')
else:
output_pdf = None
n_bins = [n_bins, n_bins] if not isinstance(n_bins, Iterable) else n_bins
effective_pitches = []
with tb.open_file(input_tracks_file, 'r') as in_file_h5:
for actual_dut in use_duts:
node = in_file_h5.get_node(in_file_h5.root, 'Tracks_DUT_%d' % actual_dut)
actual_pixel_size = pixel_size[actual_dut]
initialize = True
# create arrays for storing histogrammed intersections for different CS
projections_x_cs_hist = np.empty(shape=(4, n_bins[0]))
projections_y_cs_hist = np.empty(shape=(4, n_bins[1]))
projections_cs_2d_hist = np.empty(shape=(4, n_bins[0], n_bins[1]))
for tracks_chunk, _ in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): # read track file in chunks
# select only valid track intersection
tracks_chunk = tracks_chunk[~np.isnan(tracks_chunk[:]['offset_0'])]
# select different CS in order to histogram them and to deduce from the CS ratio the effective CS-1-pitch
n_hits = tracks_chunk[~np.isnan(tracks_chunk['n_hits_dut_%d' % actual_dut])]['n_hits_dut_%d' % actual_dut]
intersections = np.column_stack((tracks_chunk[:]['offset_0'], tracks_chunk[:]['offset_1'], tracks_chunk[:]['offset_2']))
# arrays for intersections for different CS
intersections_cs_1 = np.zeros(shape=(3, len(n_hits[n_hits == 1])))
intersections_cs_2 = np.zeros(shape=(3, len(n_hits[n_hits == 2])))
intersections_cs_3 = np.zeros(shape=(3, len(n_hits[n_hits == 3])))
intersections_cs_4 = np.zeros(shape=(3, len(n_hits[n_hits == 4])))
# read track intersections with actual dut for cluster sizes between 1 and 4
for dim in range(3):
intersections_cs_1[dim, :] = intersections[:, dim][tracks_chunk['n_hits_dut_%i' % actual_dut] == 1]
intersections_cs_2[dim, :] = intersections[:, dim][tracks_chunk['n_hits_dut_%i' % actual_dut] == 2]
intersections_cs_3[dim, :] = intersections[:, dim][tracks_chunk['n_hits_dut_%i' % actual_dut] == 3]
intersections_cs_4[dim, :] = intersections[:, dim][tracks_chunk['n_hits_dut_%i' % actual_dut] == 4]
# stack intersections of all CS together
intersections_cs = [intersections_cs_1, intersections_cs_2, intersections_cs_3, intersections_cs_4]
intersections_cs_local = [np.zeros_like(intersections_cs_1), np.zeros_like(intersections_cs_2),
np.zeros_like(intersections_cs_3), np.zeros_like(intersections_cs_4)]
# transoform to local coordinate system
for cs in range(4):
if use_prealignment:
intersections_cs_local[cs][0], intersections_cs_local[cs][1], intersections_cs_local[cs][2] = geometry_utils.apply_alignment(intersections_cs[cs][0], intersections_cs[cs][1], intersections_cs[cs][2],
dut_index=actual_dut,
prealignment=prealignment,
inverse=True)
else:
intersections_cs_local[cs][0], intersections_cs_local[cs][1], intersections_cs_local[cs][2] = geometry_utils.apply_alignment(intersections_cs[cs][0], intersections_cs[cs][1], intersections_cs[cs][2],
dut_index=actual_dut,
alignment=alignment,
inverse=True)
if not np.allclose(intersections_cs_local[cs][2][np.isfinite(intersections_cs_local[cs][2])], 0.0):
raise RuntimeError("Transformation into local coordinate system gives z != 0")
# project track intersections onto one pixel in order to increase statistics
projections_cs = [np.zeros_like(intersections_cs_1), np.zeros_like(intersections_cs_2),
np.zeros_like(intersections_cs_3), np.zeros_like(intersections_cs_4)]
for cs in range(4):
for dim in range(2):
projections_cs[cs][dim] = np.mod(intersections_cs_local[cs][dim],
np.array([actual_pixel_size[dim]] * len(intersections_cs_local[cs][dim])))
# histogram intersections and create cluster size histogram (for calculation of effective pitch)
if initialize:
for cs in range(4):
if cs == 0:
projections_x_cs_hist[cs], edges_x = np.histogram(projections_cs[cs][0], bins=n_bins[0], range=[0.0, actual_pixel_size[0]])
projections_y_cs_hist[cs], edges_y = np.histogram(projections_cs[cs][1], bins=n_bins[1], range=[0.0, actual_pixel_size[1]])
else:
projections_x_cs_hist[cs], _ = np.histogram(projections_cs[cs][0], bins=n_bins[0], range=[0.0, actual_pixel_size[0]])
projections_y_cs_hist[cs], _ = np.histogram(projections_cs[cs][1], bins=n_bins[1], range=[0.0, actual_pixel_size[1]])
projections_cs_2d_hist[cs], _, _ = np.histogram2d(x=projections_cs[cs][0], y=projections_cs[cs][1], bins=[edges_x, edges_y])
cs_edges = np.arange(1.0 - 0.5, 1000 + 0.5, 1)
cs_hist, cs_edges = np.histogram(n_hits, bins=cs_edges, density=False)
initialize = False
else: # if already read first chunk, add histograms up
for cs in range(4):
projections_x_cs_hist[cs, :] += np.histogram(projections_cs[cs][0], bins=edges_x)[0]
projections_y_cs_hist[cs, :] += np.histogram(projections_cs[cs][1], bins=edges_y)[0]
projections_cs_2d_hist[cs] += np.histogram2d(x=projections_cs[cs][0], y=projections_cs[cs][1], bins=[edges_x, edges_y])[0]
cs_hist += np.histogram(n_hits, bins=cs_edges, density=False)[0]
# calculate effective pitch from cluster size ratio
effective_pitch = calculate_effective_cs1_pitch(cs_hist=cs_hist, pitch=[actual_pixel_size[0], actual_pixel_size[1]])
effective_pitches.append(effective_pitch)
if plot:
# plot histograms for cluster sizes between 1 and 4
for cs in range(4):
plot_utils.plot_in_pixel_hit_hist(x_intersections_hist=projections_x_cs_hist[cs],
y_intersections_hist=projections_y_cs_hist[cs],
intersections_2d_hist=projections_cs_2d_hist[cs],
pixel_pitch=actual_pixel_size,
output_pdf=output_pdf,
bins=[edges_x, edges_y],
plot_title=('In-pixel Cluster Size %i Hit Distribution for DUT%d' % (cs + 1, actual_dut)))
# take slice out of middle
slice_width = 5 # set withd of slice to 5 um
slice_start_x = (actual_pixel_size[0] - slice_width) / 2.0
slice_stop_x = (actual_pixel_size[0] + slice_width) / 2.0
slice_start_y = (actual_pixel_size[1] - slice_width) / 2.0
slice_stop_y = (actual_pixel_size[1] + slice_width) / 2.0
start_x = int(slice_start_x / (actual_pixel_size[0] / n_bins[0]))
stop_x = int(slice_stop_x / (actual_pixel_size[0] / n_bins[0]))
start_y = int(slice_start_y / (actual_pixel_size[0] / n_bins[1]))
stop_y = int(slice_stop_y / (actual_pixel_size[0] / n_bins[1]))
# calculate sliced hists
sliced_cs_hists_x = np.empty(shape=(2, n_bins[0]))
sliced_cs_hists_y = np.empty(shape=(2, n_bins[1]))
for cs in range(2):
sliced_cs_hists_y[cs] = np.sum(projections_cs_2d_hist[cs][start_x:stop_x, ], axis=0)
sliced_cs_hists_x[cs] = np.sum(projections_cs_2d_hist[cs][:, start_y:stop_y], axis=1)
sliced_hist_cs_all = [sliced_cs_hists_x[0], sliced_cs_hists_x[1], sliced_cs_hists_y[0], sliced_cs_hists_y[1]]
# fit projections onto x and y-axis of CS 1 and CS 2 distributions
fit_params_cs_sliced = np.zeros(shape=(2, 2, 4))
for cs in range(2):
fit_params_cs_sliced[cs][0] = analysis_utils.fit_in_pixel_hit_hist(hist=sliced_cs_hists_x[cs],
edges=(edges_x[1:] + edges_x[:-1]) / 2.0)
fit_params_cs_sliced[cs][1] = analysis_utils.fit_in_pixel_hit_hist(hist=sliced_cs_hists_y[cs],
edges=(edges_y[1:] + edges_y[:-1]) / 2.0)
fit_params_all = [fit_params_cs_sliced[0][0], fit_params_cs_sliced[0][1], fit_params_cs_sliced[1][0], fit_params_cs_sliced[1][1]]
if np.any(np.isnan(fit_params_all)):
logging.warning("Some fits could not performed.")
continue
# deduce normalization (shift) from fit, norm such that background of CS 1 distribution is the same as background of CS 2 distribution
x_intersections_hist_cs_1_normed, x_intersections_hist_cs_2_normed, y_intersections_hist_cs_1_normed, y_intersections_hist_cs_2_normed = normalize_distributions(hists=sliced_hist_cs_all, fit_results=fit_params_all)
if plot:
# plot effective CS-1 pitch from ratio of CS distribution
plot_utils.plot_in_pixel_hit_hist_with_eff_pitch(x_intersections_hist_cs_1=x_intersections_hist_cs_1_normed,
x_intersections_hist_cs_2=x_intersections_hist_cs_2_normed,
y_intersections_hist_cs_1=y_intersections_hist_cs_1_normed,
y_intersections_hist_cs_2=y_intersections_hist_cs_2_normed,
intersections_2d_hist=projections_cs_2d_hist[0] - projections_cs_2d_hist[1],
fit_results=fit_params_all,
pixel_pitch=actual_pixel_size,
bins=[edges_x, edges_y],
effective_pitch=effective_pitch,
output_pdf=output_pdf,
plot_title=('Effective Cluster Size 1 Pitch for DUT%d' % actual_dut))
if output_pdf is not None:
output_pdf.close()
return effective_pitches
| {
"repo_name": "YannickDieter/testbeam_analysis",
"path": "testbeam_analysis/result_analysis.py",
"copies": "1",
"size": "104146",
"license": "mit",
"hash": 5332964350258174000,
"line_mean": 67.0692810458,
"line_max": 373,
"alpha_frac": 0.5197415167,
"autogenerated": false,
"ratio": 4.065344679522211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.508508619622221,
"avg_score": null,
"num_lines": null
} |
# all functions related to user accounts
import hashlib
from models import *
from base64 import b64decode, b64encode
import json
import requests
from search_helpers import get_article_object
GET = requests.get
async def create_pmid(handler, user, repo_name, pmid, github_token):
""" Create a file for this PMID. """
p = int(pmid)
pmid_data = {
"message": "Add {0}.json".format(p),
"content": encode_for_github(
{})}
await handler.github_request(requests.put,
"repos/{0}/{1}/contents/{2}.json".format(
user,
repo_name,
p),
github_token,
pmid_data)
return True
async def get_or_create_pmid(handler, user, collection_name, pmid, github_token):
""" Get the contents of this PMID file, or create it if it doesn't
exist. """
p = int(pmid)
repo_name = get_repo_name_from_collection(collection_name)
try:
pmid_contents = await handler.github_request(
GET, "repos/{0}/{1}/contents/{2}.json".format(
user, repo_name, p), github_token)
return pmid_contents
except OSError as e:
# The article didn't already exist
await create_pmid(handler, user, repo_name, p, github_token)
pmid_contents = await handler.github_request(
requests.get, "repos/{0}/{1}/contents/{2}.json".format(
user, repo_name, p), github_token)
return pmid_contents
def encode_for_github(d):
""" Encode with base64 and utf-8. Take a dictionary. """
return b64encode(
json.dumps(
d,
indent=4,
sort_keys=True).encode("utf-8")).decode("utf-8")
def decode_from_github(s):
""" Reverse the encode_dict_for_github function.
Return a dictionary. """
return json.loads(b64decode(s).decode("utf-8"))
def get_repo_name_from_collection(name):
""" Convert a collection name to a repository name for GitHub. """
return "brainspell-neo-collection-" + name.replace(" ", "-")
def get_collection_from_repo_name(name):
""" Convert a repo name to the user-specified collection name. """
return name[len("brainspell-neo-collection-"):]
def get_github_username_from_api_key(api_key):
""" Fetch the GitHub username corresponding to a given API key. """
user = User.select().where((User.password == api_key))
user_obj = next(user.execute())
return user_obj.username
def valid_api_key(api_key):
""" Return whether an API key exists in our database. """
user = User.select().where((User.password == api_key))
return user.execute().count >= 1
def get_user_object_from_api_key(api_key):
""" Return a PeeWee user object from an API key. """
return User.select().where(User.password == api_key).execute()
def register_github_user(user_dict):
""" Add a GitHub user to our database. """
# if the user doesn't already exist
if (User.select().where(User.username ==
user_dict["login"]).execute().count == 0):
username = user_dict["login"]
email = user_dict["email"]
hasher = hashlib.sha1()
# password (a.k.a. API key) is a hash of the Github ID
hasher.update(str(user_dict["id"]).encode('utf-8'))
password = hasher.hexdigest()
User.create(username=username, emailaddress=email, password=password)
return True
else:
return False # user already exists
def add_collection_to_brainspell_database(
collection_name,
description,
api_key,
cold_run=True):
""" Create a collection in our database if it doesn't exist,
or return false if the collection already exists. """
if valid_api_key(api_key):
user = list(get_user_object_from_api_key(api_key))[0]
# get the dict of user collections
if not user.collections:
user_collections = {}
else:
# unfortunately, because malformatted JSON exists in our database,
# we have to use eval instead of using JSON.decode()
user_collections = eval(user.collections)
# if the collection doesn't already exist
if collection_name not in user_collections:
# create the collection
user_collections[collection_name] = {}
user_collections[collection_name]["description"] = str(description)
user_collections[collection_name]["pmids"] = []
if not cold_run:
q = User.update(
collections=json.dumps(user_collections)).where(
User.username == user.username)
q.execute()
return True
return False
def bulk_add_articles_to_brainspell_database_collection(
collection, pmids, api_key, cold_run=True):
""" Adds the PMIDs to collection_name, if such a collection exists under
the given user. Assumes that the collection exists. Does not add repeats.
Takes collection_name *without* "brainspell-collection".
Return False if an assumption is violated, True otherwise. """
user = get_user_object_from_api_key(api_key)
if user.count > 0:
user = list(user)[0]
if user.collections:
# assumes collections are well-formed JSON
target = json.loads(user.collections)
if collection not in target:
target[collection] = {
"description": "None",
"pmids": []
}
pmid_set = set(map(lambda x: str(x), target[collection]["pmids"]))
for pmid in pmids:
pmid_set.add(str(pmid))
target[collection]["pmids"] = list(pmid_set)
if not cold_run:
q = User.update(
collections=json.dumps(target)).where(
User.password == api_key)
q.execute()
return True
else:
return False # user has no collections; violates assumptions
return False # user does not exist
def remove_all_brainspell_database_collections(api_key):
""" Dangerous! Drops all of a user's Brainspell collections
from our local database. Does not affect GitHub.
Called from CollectionsEndpointHandler."""
if valid_api_key(api_key):
q = User.update(
collections=json.dumps({})).where(
User.password == api_key)
q.execute()
def get_brainspell_collections_from_api_key(api_key):
"""
Return a user's collections from Brainspell's database given an API key.
May be inconsistent with GitHub.
"""
response = {}
if valid_api_key(api_key):
user = list(get_user_object_from_api_key(api_key))[0]
if user.collections:
return json.loads(user.collections)
return response
def add_article_to_brainspell_database_collection(
collection, pmid, api_key, cold_run=True):
"""
Add a collection to our local database. Do not add to GitHub in this function.
Assumes that the collection already exists. Assumes that the user exists.
Takes collection_name *without* "brainspell-collection".
Returns False if the article is already in the collection, or if an assumption
is violated.
This is an O(N) operation with respect to the collection size.
If you're adding many articles, it's O(N^2). If you're adding multiple articles,
please use bulk_add_articles_to_brainspell_database_collection.
Called by AddToCollectionEndpointHandler.
"""
user = get_user_object_from_api_key(api_key)
if user.count > 0:
user = list(user)[0]
if user.collections:
# assumes collections are well-formed JSON
target = json.loads(user.collections)
if collection not in target:
target[collection] = {
"description": "None",
"pmids": []
}
pmids_list = set(
map(lambda x: str(x), target[collection]["pmids"]))
# provide a check for if the PMID is already in the collection
if str(pmid) not in pmids_list:
pmids_list.add(str(pmid))
target[collection]["pmids"] = list(pmids_list)
if not cold_run:
q = User.update(
collections=json.dumps(target)).where(
User.password == api_key)
q.execute()
return True
else:
return False # article already in collection
else:
return False # user has no collections; violates assumptions
return False # user does not exist
def remove_article_from_brainspell_database_collection(
collection, pmid, api_key, cold_run=True):
""" Remove an article from the Brainspell repo. Do not affect GitHub.
Takes collection_name *without* "brainspell-collection".
Similar implementation to add_article_to_brainspell_database_collection. """
user = get_user_object_from_api_key(api_key)
if user.count == 0:
return False
user = list(user)[0]
if not user.collections:
return False
# assumes collections are well-formed JSON
target = json.loads(user.collections)
if collection not in target:
return False
pmids_list = list(
map(lambda x: str(x), target[collection]["pmids"]))
if str(pmid) not in pmids_list:
return False
pmids_list.remove(str(pmid))
target[collection]["pmids"] = pmids_list
if not cold_run:
q = User.update(
collections=json.dumps(target)).where(
User.password == api_key)
q.execute()
return True
def cache_user_collections(api_key, collections_obj):
""" Force overwrite the existing user collection field with
the passed collections_object data. """
q = User.update(
collections=json.dumps(collections_obj)).where(
User.password == api_key)
q.execute()
def add_unmapped_article_to_cached_collections(api_key, pmid, collection_name):
query = list(
User.select(
User.collections).where(
User.password == api_key).execute())[0]
collections = json.loads(query.collections)
relevant_article = list(get_article_object(pmid))[0]
target_collection = [
x for x in collections if x['name'] == collection_name][0]
target_collection['unmapped_articles'].append({
'title': relevant_article.title,
'pmid': relevant_article.pmid,
'authors': relevant_article.authors,
'reference': relevant_article.reference,
})
cache_user_collections(api_key, collections)
| {
"repo_name": "OpenNeuroLab/brainspell-neo",
"path": "brainspell/user_account_helpers.py",
"copies": "2",
"size": "10928",
"license": "mit",
"hash": -4011764996796839400,
"line_mean": 32.9378881988,
"line_max": 84,
"alpha_frac": 0.6009333821,
"autogenerated": false,
"ratio": 4.022083179977916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018758351673318885,
"num_lines": 322
} |
__all__ = ('GameButton')
import string
from kivy.properties import *
from kivy.uix.image import Image
from kivy.core.image import Image as CoreImage
from kivy.graphics import Rectangle
from kivy.core.window import Window
from kivy.core.text import Label as CoreLabel
from pocketthrone.managers.pipe import L
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.entities.event import *
from pocketthrone.entities.enum import WidgetState, WidgetAction
class GameButton(Image):
# widget constants
ID_DEFAULT = "NO_ID"
# widget state (DEFAULT, PRESSED, DISABLED, INVALID, INVISIBLE)
state = WidgetState(initial=WidgetState.STATE_DEFAULT)
# widget action (NONE, ATTACK, MOVE, BUILD, NEXTTURN)
action = WidgetAction(initial=WidgetAction.ACTION_NONE)
extra = None
link = None
label = None
text = ""
_corelabel = None
_dirty = True
_tag = "[GameButton] "
def __init__(self, link=None, state=state, action=action, extra=extra ,**kwargs):
super(GameButton, self).__init__(**kwargs)
self.link = link
EventManager.register(self)
L.WidgetManager.register(link, self)
# set optional properties
self.button_tag = string.upper(link)
self.extra = extra
self.update()
def update(self):
'''update text and background image when neccessary'''
if self._dirty == True:
self.update_source()
self.update_label()
self._dirty = False
def on_touch_down(self, touch):
'''triggerd when button was pressed'''
# check if touch collides with button
if self.collide_point(*touch.pos):
if touch.button == "left":
# translate y pos (0|0 is on top left of the window)
touch_inv_y = Window.height - touch.y
# fire MouseClickedEvent
ev_button_clicked = ButtonTouchedEvent(self.link, state=self.get_state(), action=self.get_action(), extra=self.get_extra())
EventManager.fire(ev_button_clicked)
def set_source(self, icon_source):
'''set image root related path as background icon'''
self.source = L.RootDirectory + "/img/" + icon_source
self.update_source()
def set_state(self, state):
'''sets the ActionButtons state ("sub-tag")'''
self.state = state
self.update()
print(self._tag + "button state is now "+ repr(state))
def get_state(self):
'''returns ActionButtons GameButtonState ("sub-tag")'''
return self.state
def set_action(self, value):
'''sets buttons action (what it does)'''
self.action = value
self.update()
def get_action(self):
'''returns buttons action (what it does)'''
return self.action
def update_source(self):
'''update buttons background resource'''
image_dir = L.RootDirectory + "/img/"
action_str = str(self.get_action()).lower()
# background image name is <link>_bg_<action>.png
background_src = image_dir + self.link + "_bg_" + action_str + ".png"
self.source = background_src
# print icon image path
print(self._tag + "background image is " + background_src + " for " + str(self.link))
def get_extra(self):
'''returns buttons extra information'''
return self.extra
def set_extra(self, value):
'''sets buttons extra information'''
self.extra = value
def update_label(self):
'''update buttons text'''
if self.label == None:
# create new label
label = CoreLabel(text=self.get_text(), font_size=12, color=(0, 0, 1))
label.refresh();
self.label = label
labeltexture= self.label.texture
labelsize = list(labeltexture.size)
# self.canvas.add(Rectangle(texture=labeltexture, size=(self.width, self.height)))
def set_text(self, value):
'''sets buttons text'''
self.text = value
self.update()
def get_text(self):
'''returns buttons text'''
return self.text
def get_source(self):
'''returns absolute path of this ActionButtons background icon'''
return self.source
def trigger_redraw(self):
'''trigger a widget full redraw'''
self._dirty = True
def on_event(self, event):
# redraw the map when required each TickEvent
if isinstance(event, TickEvent):
# self.update()
pass
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/widgets/gamebutton.py",
"copies": "2",
"size": "4047",
"license": "bsd-2-clause",
"hash": -6211700491131788000,
"line_mean": 28.1151079137,
"line_max": 127,
"alpha_frac": 0.7044724487,
"autogenerated": false,
"ratio": 3.255832662912309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49603051116123087,
"avg_score": null,
"num_lines": null
} |
__all__ = ('GameLabel')
from kivy.graphics import Rectangle, Color
from kivy.core.text import Label as CoreLabel
from kivy.uix.label import Label
from pocketthrone.managers.pipe import L
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.entities.event import *
class GameLabel(Label):
_tag = "[GameLabel] "
_dirty = True
# text size weighting default
_weight_mod = 1.0
# default fontsize in px
_std_fontsize = 12
# default font color
_std_fontcolor = (0, 0, 0, 1)
# CoreLabel kivy class for getting font texture
_corelabel = None
def __init__(self, link=None, weight=1, icon_source="none.png", **kwargs):
# initialize widget
super(GameLabel, self).__init__(**kwargs)
EventManager.register(self)
L.WidgetManager.register(link, self)
# set initial GameLabel state
self.link = link
self.weight = weight
self.font_color = self._std_fontcolor
self.icon_source = L.RootDirectory + "img/" + icon_source
self.halign = "left"
self.valign = "top"
# trigger a widget full redraw
def trigger_redraw(self):
self._dirty = True
# update all neccessary stuff
def update(self):
if self._dirty:
self.update_font()
self.update_plaintext()
self.update_layout()
self._dirty = False
# update font related calculations; use class variables!
def update_font(self):
# update weight & related font size
calculated_weight = self._weight_mod *self.weight
if not self.font_color:
self.font_color = self._std_fontcolor
# update layout data; use class variables!
def update_layout(self):
if not self._corelabel:
# create new label
corelabel = CoreLabel(text=self.text, font_size=self.font_size, color=self.font_color)
corelabel.refresh();
self._corelabel = corelabel
labeltexture = self._corelabel.texture
self.canvas.add(Rectangle(texture=labeltexture, size=(self.width, self.height)))
# update text content; use class variables!
def update_plaintext(self):
if self.text == None:
self.text = ""
# get the GameLabels tag
def get_label_tag(self):
return self.label_tag
# set the GameLabels tag
def set_label_tag(self, value):
self.label_tag = value
self.trigger_redraw()
# get the GameLabels weight (text size modifier)
def get_weight(self):
return self.weight
# set the GameLabels weight (text size modifier)
def set_weight(self, value):
self.weight = value
self.update()
# set horizontal and vertical text alignment
def set_aligns(self, halign, valign):
self.halign = halign
self.valign = valign
# get tha absolute path to the icons Image source
def get_icon_source(self):
return self.icon_source
# set the absolute path to the icons Image source
def set_icon_source(self, value):
self.icon_source = value
self.update()
# get the GameLabels text content as string
def get_plaintext(self):
return self.text
# get_plaintext() alias
def get_text(self):
return self.get_plaintext()
# set_plaintext(value) alias
def set_text(self, value):
self.set_plaintext(value)
# set the GameLabels text content as string
def set_plaintext(self, value):
self.text = value
self.trigger_redraw()
# handles game-intern Events
def on_event(self, event):
if (isinstance(event, TickEvent)):
self.update()
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/widgets/gamelabel.py",
"copies": "2",
"size": "3311",
"license": "bsd-2-clause",
"hash": -7531994545574267000,
"line_mean": 25.7016129032,
"line_max": 89,
"alpha_frac": 0.7212322561,
"autogenerated": false,
"ratio": 3.171455938697318,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48926881947973183,
"avg_score": null,
"num_lines": null
} |
# All game related code
import json
import random
import card_parser
class Game:
def __init__(self):
self.players = {}
self.picked_cards = []
def add_player(self, conn, data):
player = Player(conn, data, self.picked_cards)
self.players[player.get_name()] = player
conn.sendMessage(json.dumps({'action': 'accepted', 'data': ''}))
def check_for_start(self):
if len(self.players) == 3:
self.start_game()
def attribute_selected(self, data):
# Get all cards with players
player_cards = self.players.items()
cards = [(player, player.get_card()) for _, player in player_cards]
all_card_values = [card.get_values() for player, card in cards]
# Get current card and compare it
attr = data['data']['attributeToCompare']
winner, winner_card = Card.compare(attr, cards)
data = {'all_cards': all_card_values, 'winner_card': winner_card.get_values()}
winner.add_cards([card for _, card in cards])
# Check if game has ended
has_ended, player = self.check_game_end()
if has_ended:
data['loser'] = player
has_next_round = True if len(self.players) > 1 else False
self.broadcast(data, 'playerLost', next_card=has_next_round)
if not has_next_round:
self.end_connections()
else:
for p in player:
self.players[p].get_connection().sendClose()
del self.players[player]
# Update after comparison
data['turn'] = winner.get_name()
data['attribute_compared'] = attr
self.broadcast(data, 'next')
def start_game(self):
# Randomly select player to start
random.seed(None)
name, player = random.choice(self.players.items())
names = [name for name, _ in self.players.items()]
data = {'turn': name, 'players': names}
self.broadcast(data, 'start')
def check_game_end(self):
# Check if all players still have cards left
has_ended = False
players = []
for name, player in self.players.items():
if not player.has_cards():
has_ended = True
players.append(name)
return has_ended, players
def broadcast(self, data, action, next_card=True):
# Send data to all players
for name, player in self.players.items():
if next_card:
data['card'] = player.next_card()
conn = player.get_connection()
conn.sendMessage(json.dumps({'action': action, 'data': data}))
def end_connections(self):
# Close all connections
for _, player in self.players.items():
player.get_connection().sendClose()
class Player:
def __init__(self, conn, data, picked_cards):
self.name = data['name']
self.connection = conn
self.cards = self.receive_cards(data, picked_cards)
self.current_card = None
def get_name(self):
return self.name
def get_card(self):
return self.current_card
def get_connection(self):
return self.connection
def receive_cards(self, player_data, picked_cards):
# Get card information from mobile.de API based on location
long = player_data['data']['long']
lat = player_data['data']['lat']
cards = card_parser.main(lat, long, picked_cards)
return [Card(json.loads(values)) for values in cards]
def next_card(self):
try:
self.current_card = self.cards.pop(0)
except:
return {}
return self.current_card.get_values()
def add_cards(self, cards):
try:
self.cards.extend(cards)
except:
print "SOMETHING WENT WRONG!"
def has_cards(self):
return len(self.cards) > 0
class Card:
comparisons = {'price': min,
'power': max,
'registration': max,
'mileage': min,
'consumption': min}
def __init__(self, values):
self.values = values
@staticmethod
def compare(attr, player_cards):
# Select min or max according to selected attribute
comp = Card.comparisons[attr]
# Compare card value from (player, card) as float
return comp(player_cards, key=lambda pair: float(pair[1].get(attr)))
def get_values(self):
return self.values
def get(self, attr):
return self.values[attr]
| {
"repo_name": "HPI-Hackathon/cartets",
"path": "backend/game.py",
"copies": "1",
"size": "4587",
"license": "mit",
"hash": 4229776447937971700,
"line_mean": 30.2040816327,
"line_max": 86,
"alpha_frac": 0.5713974275,
"autogenerated": false,
"ratio": 3.995644599303136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5067042026803136,
"avg_score": null,
"num_lines": null
} |
__all__ = [ "gamma_matrix", "gamma_matrix_pass"]
import numpy as np
def gamma_matrix(rm, tm, dta=1.0, dd=0.05):
'''Compute matrix of gammma indices.
:param rm: reference matrix (relative values assumed)
:param tm: tested matrix (relative values assumed)
:param dta: maximum distance-to-agreement (in voxels)
:param dd: maximum dose difference (absolute, not percent!)
:type rm: numpy.ndarray
:type tm: numpy.ndarray
:type dta: float
:type dd: float
:rtype: numpy.ndarray
It can be evaluated on matrices of any dimensionality.
'''
# Check validity of input
if rm.shape != tm.shape:
raise Exception("Cannot compute for matrices of different sizes.")
# Result matrix
output = np.ndarray(rm.shape, dtype=np.float64)
# Help scaling variables
dta_scale = dta ** 2
dd_scale = dd ** 2
# Index matrices
indices = np.indices(rm.shape)
it = np.nditer(rm, ("multi_index",))
while not it.finished:
index = it.multi_index
# Dose difference to every point (squared)
dd2 = (tm - it.value) ** 2
# Distance to every point (squared)
dist2 = np.sum((indices - np.array(index).reshape(len(rm.shape),1,1,1)) ** 2, axis=0)
# Minimum of the sum
output[index] = np.sqrt(np.nanmin(dd2 / dd_scale + dist2 / dta_scale))
it.iternext()
return output
def gamma_matrix_pass(rm, tm, dta=1.0, dd=0.05, ignore=lambda value: False):
'''Effectively check which points in a matrix pair pass gamma index test.
:param rm: reference matrix (relative values assumed)
:param tm: tested matrix (relative values assumed)
:param dta: maximum distance-to-agreement (in voxels)
:param dd: maximum dose difference
:param ignore: function called on dose in rm values
if it returns True, point is ignored (gammma <- np.nan)
:rtype: numpy.ndarray
It can be evaluated on matrices of any dimensionality.
Optimized in that only surrounding region that can possibly
pass dta criterion is checked for each point.
'''
# Check validity of input
if rm.shape != tm.shape:
raise Exception("Cannot compute for matrices of different sizes.")
shape = rm.shape
ndim = rm.ndim
# Result matrix
output = np.ndarray(rm.shape, dtype=bool)
# Help scaling variables
dta_scale = dta ** 2
dd_scale = dd ** 2
# Index matrices
indices = np.indices(rm.shape)
# How many points (*2 + 1)
npoints = int(dta)
it = np.nditer(rm, ("multi_index",))
while not it.finished:
index = tuple(it.multi_index)
if ignore(it.value):
output[index] = np.nan
it.iternext()
continue
slices = [ slice(max(0, index[i] - npoints), min(shape[i], index[i] + npoints + 1)) for i in range(ndim) ]
subtm = tm[slices]
# Dose difference to every point (squared)
dd2 = (subtm - it.value) ** 2
# Distance to every point (squared)
dist2 = np.sum((indices[[slice(None, None)] + slices] - np.array(index).reshape(ndim,1,1,1)) ** 2, axis=0)
# Minimum of the sum
output[index] = np.sqrt(np.nanmin(dd2 / dd_scale + dist2 / dta_scale)) < 1.0
it.iternext()
return output
def gamma_matrix_c(rm, tm, dta=1.0, dd=0.05):
'''Compute matrix of gammma indices (faster method using C)
:param rm: reference matrix (relative values assumed)
:param tm: tested matrix (relative values assumed)
:param dta: maximum distance-to-agreement (in voxels)
:param dd: maximum dose difference (absolute, not percent!)
:type rm: numpy.ndarray
:type tm: numpy.ndarray
:type dta: float
:type dd: float
:rtype: numpy.ndarray
It can be evaluated on matrices of any dimensionality (but of a same shape).
Requires previously built libgamma.so (see Makefile) and works only in Linux.
'''
import ctypes
import os
so_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "libgamma.so")
dll = ctypes.CDLL(so_path)
gamma_index_f = dll.gamma_index
# Check validity of input
if rm.shape != tm.shape:
raise Exception("Cannot compute for matrices of different sizes.")
matrix1 = np.ascontiguousarray(rm)
matrix2 = np.ascontiguousarray(tm)
output = np.ndarray(rm.shape, dtype=np.float64)
p_matrix1 = matrix1.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
p_matrix2 = matrix2.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
p_output = output.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
c_shape = matrix1.ctypes.shape_as(ctypes.c_int)
c_dd = ctypes.c_double(dd)
c_dta = ctypes.c_double(dta)
gamma_index_f(len(matrix1.shape), c_shape, p_matrix1, p_matrix2, p_output, c_dd, c_dta)
return output | {
"repo_name": "janpipek/gamma_index",
"path": "gamma_index/__init__.py",
"copies": "1",
"size": "4857",
"license": "mit",
"hash": 8531389207002892000,
"line_mean": 30.3419354839,
"line_max": 114,
"alpha_frac": 0.6396952852,
"autogenerated": false,
"ratio": 3.481720430107527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4621415715307527,
"avg_score": null,
"num_lines": null
} |
__all__ = ["GatekeeperException", "Report", "Bounce", "Ban", "create_embed"]
import datetime
import discord
from lifesaver.utils import human_delta
class GatekeeperException(RuntimeError):
"""An exception thrown during Gatekeeper processes."""
class CheckFailure(GatekeeperException):
"""An exception thrown due to a check failing to pass."""
#: The name of the check that failed to pass.
check_name = None
#: The check function that failed to pass.
check = None
class Report(GatekeeperException):
"""A Gatekeeper exception that immediately halts all processing and sends
the specified text to the broadcasting channel.
"""
class Bounce(CheckFailure):
"""A Gatekeeper exception that will prevent a user from joining a guild when raised."""
class Ban(CheckFailure):
"""A Gatekeeper exception that will ban a user from the guild when raised."""
def create_embed(
member: discord.Member, *, color: discord.Color, title: str, reason: str
) -> discord.Embed:
"""Create a Gatekeeper bounce or ban embed."""
embed = discord.Embed(color=color, title=title, description=reason)
embed.timestamp = datetime.datetime.utcnow()
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(
name="Account Creation",
value=f"{human_delta(member.created_at)} ago\n{member.created_at}",
)
return embed
| {
"repo_name": "sliceofcode/dogbot",
"path": "dog/ext/gatekeeper/core.py",
"copies": "2",
"size": "1390",
"license": "mit",
"hash": -7146194055101655000,
"line_mean": 27.9583333333,
"line_max": 91,
"alpha_frac": 0.7050359712,
"autogenerated": false,
"ratio": 3.904494382022472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5609530353222472,
"avg_score": null,
"num_lines": null
} |
__all__ = ['GaussianKernel']
import numpy as np
import sympy as sym
from functools import wraps
from gp.ext import gaussian_c
from . import Kernel
DTYPE = np.float64
EPS = np.finfo(DTYPE).eps
class GaussianKernel(Kernel):
r"""
Gaussian kernel function.
Parameters
----------
h : float
Output scale kernel parameter
w : float
Input scale kernel parameter
Notes
-----
The Gaussian kernel is defined as:
.. math:: K(x_1, x_2) = \frac{h^2}{\sqrt{2\pi w^2}}\exp\left(-\frac{(x_1-x_2)^2}{2w^2}\right),
where :math:`w` is the input scale parameter (equivalent to the
standard deviation of the Gaussian) and :math:`h` is the output
scale parameter.
"""
def __init__(self, h, w):
self.h = None #: Output scale kernel parameter
self.w = None #: Input scale kernel parameter
self.set_param('h', h)
self.set_param('w', w)
@property
def params(self):
r"""
Kernel parameters.
Returns
-------
params : numpy.ndarray ``(h, w)``
"""
return np.array([self.h, self.w], dtype=DTYPE)
@params.setter
def params(self, val):
self.set_param('h', val[0])
self.set_param('w', val[1])
def set_param(self, name, val):
if name == 'h':
if val < EPS:
raise ValueError("invalid value for h: %s" % val)
self.h = DTYPE(val)
elif name == 'w':
if val < EPS:
raise ValueError("invalid value for w: %s" % val)
self.w = DTYPE(val)
else:
raise ValueError("unknown parameter: %s" % name)
@property
@wraps(Kernel.sym_K)
def sym_K(self):
h = sym.Symbol('h')
w = sym.Symbol('w')
d = sym.Symbol('d')
h2 = h ** 2
w2 = w ** 2
d2 = d ** 2
f = h2 * (1. / sym.sqrt(2 * sym.pi * w2)) * sym.exp(-d2 / (2.0 * w2))
return f
@wraps(Kernel.K)
def K(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.K(out, x1, x2, self.h, self.w)
return out
@wraps(Kernel.jacobian)
def jacobian(self, x1, x2, out=None):
if out is None:
out = np.empty((2, x1.size, x2.size), dtype=DTYPE)
gaussian_c.jacobian(out, x1, x2, self.h, self.w)
return out
@wraps(Kernel.hessian)
def hessian(self, x1, x2, out=None):
if out is None:
out = np.empty((2, 2, x1.size, x2.size), dtype=DTYPE)
gaussian_c.hessian(out, x1, x2, self.h, self.w)
return out
def dK_dh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.dK_dh(out, x1, x2, self.h, self.w)
return out
def dK_dw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.dK_dw(out, x1, x2, self.h, self.w)
return out
def d2K_dhdh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.d2K_dhdh(out, x1, x2, self.h, self.w)
return out
def d2K_dhdw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.d2K_dhdw(out, x1, x2, self.h, self.w)
return out
def d2K_dwdh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.d2K_dwdh(out, x1, x2, self.h, self.w)
return out
def d2K_dwdw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
gaussian_c.d2K_dwdw(out, x1, x2, self.h, self.w)
return out
| {
"repo_name": "jhamrick/gaussian_processes",
"path": "gp/kernels/gaussian.py",
"copies": "1",
"size": "3867",
"license": "mit",
"hash": -8352020878438381000,
"line_mean": 25.8541666667,
"line_max": 98,
"alpha_frac": 0.5314197052,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9029794658436064,
"avg_score": 0.0003250093527871306,
"num_lines": 144
} |
__all__ = ['GaussianProcess']
import numpy as np
import matplotlib.pyplot as pl
from . import cholInvert
import numpy.core.umath_tests
import scipy.optimize as opt
class GaussianProcess:
def __init__(self, xInput, lambdaGPInitial=1.0, sigmaGPInitial=1.0):
self.lambdaGP = lambdaGPInitial
self.sigmaGP = sigmaGPInitial
self.xInput = xInput
self.CInv = 0
def covariance(self, x, pars):
"""
Covariance matrix. It returns the value of the covariance matrix
and the derivative of the matrix wrt to the hyperparameters
"""
lambdaGP = pars[0]
sigmaGP = pars[1]
expon = np.exp(-0.5 * lambdaGP * x**2)
# Covariance matrix
K = sigmaGP * expon
# Derivatives of the covariance matrix
dKdsigmaGP = expon
dKdlambdaGP = -0.5 * K * x**2
return K, dKdlambdaGP, dKdsigmaGP
# Returns the marginal likelihood for a Gaussian process
def marginalLikelihood(self, pars, *args):
xInput = self.xInput
yInput = args[0]
sigmaNoise = args[1]
lambdaGP = np.exp(pars[0])
sigmaGP = np.exp(pars[1])
K, dKdl, dKds = self.covariance(xInput[np.newaxis,:]-xInput[:,np.newaxis], [lambdaGP, sigmaGP])
C = K + sigmaNoise**2 * np.identity(len(xInput))
CInv, logD = cholInvert(C)
likelihood = 0.5 * np.dot(np.dot(yInput.T,CInv),yInput) + 0.5 * logD
# Jacobian
jac = np.zeros(2)
# dLdlambda
yInput2 = np.dot(CInv, yInput)
jac[0] = -0.5 * np.sum(numpy.core.umath_tests.inner1d(CInv, dKdl.T)) + 0.5*np.dot(np.dot(yInput2.T,dKdl),yInput2)
jac[0] = -jac[0] * lambdaGP
# dLdsigma
jac[1] = -0.5 * np.sum(numpy.core.umath_tests.inner1d(CInv, dKds.T)) + 0.5*np.dot(np.dot(yInput2.T,dKds),yInput2)
jac[1] = -jac[1] * sigmaGP
return likelihood, jac
def fit(self, y, sigmaNoise):
x0 = [np.log(self.lambdaGP), np.log(self.sigmaGP)]
args = [y, sigmaNoise]
res = opt.minimize(self.marginalLikelihood, x0, method='BFGS', jac=True, args=args)
self.lambdaGP, self.sigmaGP = np.exp(res.x)
self.yNoise = y
# Final evaluation of the covariance matrix
K, dKdl, dKds = self.covariance(self.xInput[np.newaxis,:]-self.xInput[:,np.newaxis], [self.lambdaGP, self.sigmaGP])
C = K + sigmaNoise**2 * np.identity(len(self.xInput))
self.CInv, logD = cholInvert(C)
def predict(self, xStar):
nx = len(x)
predicted = np.zeros(nx)
for i in range(nx):
KStar = self.sigmaGP * np.exp(-0.5 * self.lambdaGP * (xStar[i]-self.xInput)**2)
predicted[i] = np.dot(KStar,np.dot(self.CInv, self.yNoise))
return predicted
if __name__ == "__main__":
# Example with a simple case
nPoints = 50
sigmaNoise = 0.7
x = np.linspace(0,8,nPoints)
y = np.sin(x)
yNoise = y + sigmaNoise*np.random.randn(nPoints)
gp = GaussianProcess(x)
gp.fit(y, sigmaNoise)
res = gp.predict(x)
pl.plot(x, yNoise)
pl.plot(x, res)
pl.plot(x, y)
| {
"repo_name": "aasensio/pyAndres",
"path": "gpAndres.py",
"copies": "1",
"size": "2810",
"license": "mit",
"hash": -6662630494104640000,
"line_mean": 26.0192307692,
"line_max": 117,
"alpha_frac": 0.6647686833,
"autogenerated": false,
"ratio": 2.5475974614687216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3712366144768721,
"avg_score": null,
"num_lines": null
} |
__all__ = ['GaussianProcess']
import numpy as np
import matplotlib.pyplot as pl
from ..linalg import cholInvert
import numpy.core.umath_tests
import scipy.optimize as opt
class GaussianProcess:
"""
Gaussian process
"""
def __init__(self, xInput, lambdaGPInitial=1.0, sigmaGPInitial=1.0):
self.lambdaGP = lambdaGPInitial
self.sigmaGP = sigmaGPInitial
self.xInput = xInput
self.CInv = 0
def covariance(self, x, pars):
"""
Covariance matrix. It returns the value of the covariance matrix
and the derivative of the matrix wrt to the hyperparameters
"""
lambdaGP = pars[0]
sigmaGP = pars[1]
expon = np.exp(-0.5 * lambdaGP * x**2)
# Covariance matrix
K = sigmaGP * expon
# Derivatives of the covariance matrix
dKdsigmaGP = expon
dKdlambdaGP = -0.5 * K * x**2
return K, dKdlambdaGP, dKdsigmaGP
# Returns the marginal likelihood for a Gaussian process
def marginalLikelihood(self, pars, *args):
xInput = self.xInput
yInput, sigmaNoise = args
lambdaGP = np.exp(pars[0])
sigmaGP = np.exp(pars[1])
K, dKdl, dKds = self.covariance(xInput[np.newaxis,:]-xInput[:,np.newaxis], [lambdaGP, sigmaGP])
C = K + sigmaNoise**2 * np.identity(len(xInput))
CInv, logD = cholInvert(C)
likelihood = 0.5 * np.dot(np.dot(yInput.T,CInv),yInput) + 0.5 * logD
# Jacobian
jac = np.zeros(2)
# dLdlambda
yInput2 = np.dot(CInv, yInput)
jac[0] = -0.5 * np.sum(numpy.core.umath_tests.inner1d(CInv, dKdl.T)) + 0.5*np.dot(np.dot(yInput2.T,dKdl),yInput2)
jac[0] = -jac[0] * lambdaGP
# dLdsigma
jac[1] = -0.5 * np.sum(numpy.core.umath_tests.inner1d(CInv, dKds.T)) + 0.5*np.dot(np.dot(yInput2.T,dKds),yInput2)
jac[1] = -jac[1] * sigmaGP
return likelihood, jac
def fit(self, y, sigmaNoise):
x0 = [np.log(self.lambdaGP), np.log(self.sigmaGP)]
res = opt.minimize(self.marginalLikelihood, x0, method='BFGS', jac=True, args=(y, sigmaNoise))
self.lambdaGP, self.sigmaGP = np.exp(res.x)
self.yNoise = y
# Final evaluation of the covariance matrix
K, dKdl, dKds = self.covariance(self.xInput[np.newaxis,:]-self.xInput[:,np.newaxis], [self.lambdaGP, self.sigmaGP])
C = K + sigmaNoise**2 * np.identity(len(self.xInput))
self.CInv, logD = cholInvert(C)
def predict(self, xStar):
K_XStar_XStar, _, _ = self.covariance(xStar[np.newaxis,:]-xStar[:,np.newaxis], [self.lambdaGP, self.sigmaGP])
K_XStar_X, _, _ = self.covariance(xStar[:,np.newaxis]-self.xInput[np.newaxis,:], [self.lambdaGP, self.sigmaGP])
K_X_XStar, _, _ = self.covariance(self.xInput[:,np.newaxis]-xStar[np.newaxis,:], [self.lambdaGP, self.sigmaGP])
predicted = K_XStar_X.dot(self.CInv).dot(self.yNoise)
covariance = K_XStar_XStar - K_XStar_X.dot(self.CInv).dot(K_X_XStar)
return predicted, covariance
if __name__ == "__main__":
# Example with a simple case
nPoints = 50
sigmaNoise = 0.7
x = np.linspace(0,8,nPoints)
y = np.sin(x)
yNoise = y + sigmaNoise*np.random.randn(nPoints)
gp = GaussianProcess(x)
gp.fit(y, sigmaNoise)
res, variance = gp.predict(x)
pl.plot(x, yNoise)
pl.plot(x, res)
pl.plot(x, y)
| {
"repo_name": "aasensio/pyiacsun",
"path": "pyiacsun/machinelearn/gpAndres.py",
"copies": "1",
"size": "3103",
"license": "mit",
"hash": 8192556024779039000,
"line_mean": 28.2735849057,
"line_max": 117,
"alpha_frac": 0.6703190461,
"autogenerated": false,
"ratio": 2.579384871155445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37497039172554447,
"avg_score": null,
"num_lines": null
} |
__all__ = ['gauss_numerical_integration', 'sample_function']
import numpy as np
# coefficients from https://pomax.github.io/bezierinfo/legendre-gauss.html
gauss0 = [
[1.0000000000000000, -0.5773502691896257],
[1.0000000000000000, 0.5773502691896257]
]
gauss10 = [
[0.2955242247147529, -0.1488743389816312],
[0.2955242247147529, 0.1488743389816312],
[0.2692667193099963, -0.4333953941292472],
[0.2692667193099963, 0.4333953941292472],
[0.2190863625159820, -0.6794095682990244],
[0.2190863625159820, 0.6794095682990244],
[0.1494513491505806, -0.8650633666889845],
[0.1494513491505806, 0.8650633666889845],
[0.0666713443086881, -0.9739065285171717],
[0.0666713443086881, 0.9739065285171717]
]
gauss20 = [
[0.1527533871307258, -0.0765265211334973],
[0.1527533871307258, 0.0765265211334973],
[0.1491729864726037, -0.2277858511416451],
[0.1491729864726037, 0.2277858511416451],
[0.1420961093183820, -0.3737060887154195],
[0.1420961093183820, 0.3737060887154195],
[0.1316886384491766, -0.5108670019508271],
[0.1316886384491766, 0.5108670019508271],
[0.1181945319615184, -0.6360536807265150],
[0.1181945319615184, 0.6360536807265150],
[0.1019301198172404, -0.7463319064601508],
[0.1019301198172404, 0.7463319064601508],
[0.0832767415767048, -0.8391169718222188],
[0.0832767415767048, 0.8391169718222188],
[0.0626720483341091, -0.9122344282513259],
[0.0626720483341091, 0.9122344282513259],
[0.0406014298003869, -0.9639719272779138],
[0.0406014298003869, 0.9639719272779138],
[0.0176140071391521, -0.9931285991850949],
[0.0176140071391521, 0.9931285991850949],
]
gauss30 = [
[0.1028526528935588, -0.0514718425553177],
[0.1028526528935588, 0.0514718425553177],
[0.1017623897484055, -0.1538699136085835],
[0.1017623897484055, 0.1538699136085835],
[0.0995934205867953, -0.2546369261678899],
[0.0995934205867953, 0.2546369261678899],
[0.0963687371746443, -0.3527047255308781],
[0.0963687371746443, 0.3527047255308781],
[0.0921225222377861, -0.4470337695380892],
[0.0921225222377861, 0.4470337695380892],
[0.0868997872010830, -0.5366241481420199],
[0.0868997872010830, 0.5366241481420199],
[0.0807558952294202, -0.6205261829892429],
[0.0807558952294202, 0.6205261829892429],
[0.0737559747377052, -0.6978504947933158],
[0.0737559747377052, 0.6978504947933158],
[0.0659742298821805, -0.7677774321048262],
[0.0659742298821805, 0.7677774321048262],
[0.0574931562176191, -0.8295657623827684],
[0.0574931562176191, 0.8295657623827684],
[0.0484026728305941, -0.8825605357920527],
[0.0484026728305941, 0.8825605357920527],
[0.0387991925696271, -0.9262000474292743],
[0.0387991925696271, 0.9262000474292743],
[0.0287847078833234, -0.9600218649683075],
[0.0287847078833234, 0.9600218649683075],
[0.0184664683110910, -0.9836681232797472],
[0.0184664683110910, 0.9836681232797472],
[0.0079681924961666, -0.9968934840746495],
[0.0079681924961666, 0.9968934840746495]
]
gauss40 = [
[0.0775059479784248, -0.0387724175060508],
[0.0775059479784248, 0.0387724175060508],
[0.0770398181642480, -0.1160840706752552],
[0.0770398181642480, 0.1160840706752552],
[0.0761103619006262, -0.1926975807013711],
[0.0761103619006262, 0.1926975807013711],
[0.0747231690579683, -0.2681521850072537],
[0.0747231690579683, 0.2681521850072537],
[0.0728865823958041, -0.3419940908257585],
[0.0728865823958041, 0.3419940908257585],
[0.0706116473912868, -0.4137792043716050],
[0.0706116473912868, 0.4137792043716050],
[0.0679120458152339, -0.4830758016861787],
[0.0679120458152339, 0.4830758016861787],
[0.0648040134566010, -0.5494671250951282],
[0.0648040134566010, 0.5494671250951282],
[0.0613062424929289, -0.6125538896679802],
[0.0613062424929289, 0.6125538896679802],
[0.0574397690993916, -0.6719566846141796],
[0.0574397690993916, 0.6719566846141796],
[0.0532278469839368, -0.7273182551899271],
[0.0532278469839368, 0.7273182551899271],
[0.0486958076350722, -0.7783056514265194],
[0.0486958076350722, 0.7783056514265194],
[0.0438709081856733, -0.8246122308333117],
[0.0438709081856733, 0.8246122308333117],
[0.0387821679744720, -0.8659595032122595],
[0.0387821679744720, 0.8659595032122595],
[0.0334601952825478, -0.9020988069688743],
[0.0334601952825478, 0.9020988069688743],
[0.0279370069800234, -0.9328128082786765],
[0.0279370069800234, 0.9328128082786765],
[0.0222458491941670, -0.9579168192137917],
[0.0222458491941670, 0.9579168192137917],
[0.0164210583819079, -0.9772599499837743],
[0.0164210583819079, 0.9772599499837743],
[0.0104982845311528, -0.9907262386994570],
[0.0104982845311528, 0.9907262386994570],
[0.0045212770985332, -0.9982377097105593],
[0.0045212770985332, 0.9982377097105593],
]
gauss50 = [
[0.0621766166553473, -0.0310983383271889],
[0.0621766166553473, 0.0310983383271889],
[0.0619360674206832, -0.0931747015600861],
[0.0619360674206832, 0.0931747015600861],
[0.0614558995903167, -0.1548905899981459],
[0.0614558995903167, 0.1548905899981459],
[0.0607379708417702, -0.2160072368760418],
[0.0607379708417702, 0.2160072368760418],
[0.0597850587042655, -0.2762881937795320],
[0.0597850587042655, 0.2762881937795320],
[0.0586008498132224, -0.3355002454194373],
[0.0586008498132224, 0.3355002454194373],
[0.0571899256477284, -0.3934143118975651],
[0.0571899256477284, 0.3934143118975651],
[0.0555577448062125, -0.4498063349740388],
[0.0555577448062125, 0.4498063349740388],
[0.0537106218889962, -0.5044581449074642],
[0.0537106218889962, 0.5044581449074642],
[0.0516557030695811, -0.5571583045146501],
[0.0516557030695811, 0.5571583045146501],
[0.0494009384494663, -0.6077029271849502],
[0.0494009384494663, 0.6077029271849502],
[0.0469550513039484, -0.6558964656854394],
[0.0469550513039484, 0.6558964656854394],
[0.0443275043388033, -0.7015524687068222],
[0.0443275043388033, 0.7015524687068222],
[0.0415284630901477, -0.7444943022260685],
[0.0415284630901477, 0.7444943022260685],
[0.0385687566125877, -0.7845558329003993],
[0.0385687566125877, 0.7845558329003993],
[0.0354598356151462, -0.8215820708593360],
[0.0354598356151462, 0.8215820708593360],
[0.0322137282235780, -0.8554297694299461],
[0.0322137282235780, 0.8554297694299461],
[0.0288429935805352, -0.8859679795236131],
[0.0288429935805352, 0.8859679795236131],
[0.0253606735700124, -0.9130785566557919],
[0.0253606735700124, 0.9130785566557919],
[0.0217802431701248, -0.9366566189448780],
[0.0217802431701248, 0.9366566189448780],
[0.0181155607134894, -0.9566109552428079],
[0.0181155607134894, 0.9566109552428079],
[0.0143808227614856, -0.9728643851066920],
[0.0143808227614856, 0.9728643851066920],
[0.0105905483836510, -0.9853540840480058],
[0.0105905483836510, 0.9853540840480058],
[0.0067597991957454, -0.9940319694320907],
[0.0067597991957454, 0.9940319694320907],
[0.0029086225531551, -0.9988664044200710],
[0.0029086225531551, 0.9988664044200710]
]
gauss60 = [
[0.0519078776312206, -0.0259597723012478],
[0.0519078776312206, 0.0259597723012478],
[0.0517679431749102, -0.0778093339495366],
[0.0517679431749102, 0.0778093339495366],
[0.0514884515009809, -0.1294491353969450],
[0.0514884515009809, 0.1294491353969450],
[0.0510701560698556, -0.1807399648734254],
[0.0510701560698556, 0.1807399648734254],
[0.0505141845325094, -0.2315435513760293],
[0.0505141845325094, 0.2315435513760293],
[0.0498220356905502, -0.2817229374232617],
[0.0498220356905502, 0.2817229374232617],
[0.0489955754557568, -0.3311428482684482],
[0.0489955754557568, 0.3311428482684482],
[0.0480370318199712, -0.3796700565767980],
[0.0480370318199712, 0.3796700565767980],
[0.0469489888489122, -0.4271737415830784],
[0.0469489888489122, 0.4271737415830784],
[0.0457343797161145, -0.4735258417617071],
[0.0457343797161145, 0.4735258417617071],
[0.0443964787957871, -0.5186014000585697],
[0.0443964787957871, 0.5186014000585697],
[0.0429388928359356, -0.5622789007539445],
[0.0429388928359356, 0.5622789007539445],
[0.0413655512355848, -0.6044405970485104],
[0.0413655512355848, 0.6044405970485104],
[0.0396806954523808, -0.6449728284894770],
[0.0396806954523808, 0.6449728284894770],
[0.0378888675692434, -0.6837663273813555],
[0.0378888675692434, 0.6837663273813555],
[0.0359948980510845, -0.7207165133557304],
[0.0359948980510845, 0.7207165133557304],
[0.0340038927249464, -0.7557237753065856],
[0.0340038927249464, 0.7557237753065856],
[0.0319212190192963, -0.7886937399322641],
[0.0319212190192963, 0.7886937399322641],
[0.0297524915007889, -0.8195375261621458],
[0.0297524915007889, 0.8195375261621458],
[0.0275035567499248, -0.8481719847859296],
[0.0275035567499248, 0.8481719847859296],
[0.0251804776215212, -0.8745199226468983],
[0.0251804776215212, 0.8745199226468983],
[0.0227895169439978, -0.8985103108100460],
[0.0227895169439978, 0.8985103108100460],
[0.0203371207294573, -0.9200784761776275],
[0.0203371207294573, 0.9200784761776275],
[0.0178299010142077, -0.9391662761164232],
[0.0178299010142077, 0.9391662761164232],
[0.0152746185967848, -0.9557222558399961],
[0.0152746185967848, 0.9557222558399961],
[0.0126781664768160, -0.9697017887650528],
[0.0126781664768160, 0.9697017887650528],
[0.0100475571822880, -0.9810672017525982],
[0.0100475571822880, 0.9810672017525982],
[0.0073899311633455, -0.9897878952222218],
[0.0073899311633455, 0.9897878952222218],
[0.0047127299269536, -0.9958405251188381],
[0.0047127299269536, 0.9958405251188381],
[0.0020268119688738, -0.9992101232274361],
[0.0020268119688738, 0.9992101232274361],
]
gauss_table = [np.swapaxes(gauss0, 0, 1), np.swapaxes(gauss10, 0, 1), np.swapaxes(gauss20, 0, 1),
np.swapaxes(gauss30, 0, 1), np.swapaxes(gauss40, 0, 1), np.swapaxes(gauss50, 0, 1),
np.swapaxes(gauss60, 0, 1)]
def gauss_numerical_integration(f, x1, x2, precision, *f_args):
x1, x2 = (x2 - x1) / 2, (x2 + x1) / 2
return x1 * np.sum(gauss_table[precision][0][:, None] *
f(x1[None, :] * gauss_table[precision][1][:, None] + x2[None, :], *f_args), 0)
def sample_function(f, precision=3):
def sampled_function(x12_array, *args):
x1_array, x2_array = x12_array
return gauss_numerical_integration(f, x1_array, x2_array, precision, *list(args))
return sampled_function
| {
"repo_name": "ucl-exoplanets/pylightcurve",
"path": "pylightcurve/analysis/numerical_integration.py",
"copies": "1",
"size": "10872",
"license": "mit",
"hash": -7850884599787367000,
"line_mean": 40.6551724138,
"line_max": 101,
"alpha_frac": 0.7193708609,
"autogenerated": false,
"ratio": 2.146071851559416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3365442712459416,
"avg_score": null,
"num_lines": null
} |
__all__ = ['GbxProtocol']
import asyncio
from xmlrpc.client import dumps, loads, Fault
from .log import logger
class GbxProtocol(asyncio.Protocol):
def __init__(self, loop, handle_callback):
self.loop = loop
self.handle_callback = handle_callback
self.transport = None
self.check_connection = True
self.next_handle = 0x80000000
self.futures = {}
self.calls = {}
self.buffer = b''
self.read_xml = False
self.len_xml = 0
self.handle = 0
def connection_made(self, transport):
self.transport = transport
logger.debug('connection_made .. OK')
def data_received(self, data):
#print('Data received: {!r}'.format(data))
data = self.buffer + data
if self.check_connection:
if len(data) >= 15:
self.connection_check(data[:15])
data = data[15:]
self.buffer = b''
else:
self.buffer = data
return
while len(data):
if not self.read_xml:
if len(data) >= 8:
self.len_xml = int.from_bytes(data[:3], 'little')
self.handle = int.from_bytes(data[4:8], 'little')
#print(self.len_xml)
#print(self.handle)
#print(data)
data = data[8:]
self.buffer = b''
self.read_xml = True
else:
self.buffer = data
return
else:
if len(data) >= self.len_xml:
d = data[:self.len_xml]
#print(data)
#print(len(data))
try:
response, methodname = loads(d)
except Fault as exception:
self.handle_response_exception(self.handle, exception)
else:
if methodname != None:
logger.debug('callback received ' + str((methodname, response)))
self.handle_callback(self.handle, (methodname, response))
else:
self.handle_response(self.handle, (methodname, response))
finally:
data = data[self.len_xml:]
self.buffer = b''
self.read_xml = False
else:
self.buffer = data
return
def handle_response(self, handle, response):
#logger.debug('response to ' + str(self.calls[handle]) + ' is ' + str(response))
future = self.futures[handle]
# unpack response
if len(response[1]) == 1:
response = response[1][0]
else:
response = response[1]
#logger.debug(response)
future.set_result(response)
del self.futures[handle]
del self.calls[handle]
def handle_response_exception(self, handle, exception):
logger.debug('response to ' + str(self.calls[handle]) + ' is ' + str(exception))
future = self.futures[handle]
future.set_exception(exception)
del self.futures[handle]
del self.calls[handle]
def connection_lost(self, exc):
logger.info('The server closed the connection: ' + str(exc))
logger.info('Stop the event lop')
self.loop.stop()
#raise exc
def connection_check(self, data):
size = int.from_bytes(data[:3], 'little')
protocol = data[4:].decode()
if size != 11 or protocol != 'GBXRemote 2':
self.loop.stop()
raise Exception('wrong protocol response')
logger.error(data)
logger.debug('connection_check .. OK')
self.check_connection = False
def rpc(self, name, params):
request = dumps(params, name)
t = request.encode(errors='surrogateescape')
handle = self.send_request(t)
future = asyncio.Future(loop=self.loop)
self.futures[handle] = future
self.calls[handle] = name, params
return future
def send_request(self, request):
datalen = len(request).to_bytes(4, 'little')
if self.next_handle == 0xffffffff:
self.next_handle = 0x80000000
else:
self.next_handle = self.next_handle + 1;
handle = self.next_handle.to_bytes(4, 'little')
data = datalen + handle + request
#print(data)
self.transport.write(data)
return int.from_bytes(handle, 'little')
# # only gets called when the attribute is not defined
# def __getattr__(self, name):
# # magic method dispatcher
# return _Method(self.rpc, name)
# # note: to call a remote object with an non-standard name, use
# # result getattr(server, "strange-python-name")(args) | {
"repo_name": "juergenz/pie",
"path": "src/pie/gbx_protocol.py",
"copies": "1",
"size": "5011",
"license": "mit",
"hash": 2048693090173621200,
"line_mean": 28.6568047337,
"line_max": 92,
"alpha_frac": 0.5142686091,
"autogenerated": false,
"ratio": 4.3422876949740035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356556304074004,
"avg_score": null,
"num_lines": null
} |
__all__ = ["GenerateId", \
"InitSessionParams", \
"ExecuteInitSession", \
"ExecuteQuit", \
"ExecuteGetStatus"]
import random
import os
from misc.capabilities import Capabilities
from misc.xwalk_launcher import LaunchXwalk
from browser.status import *
from browser.version import kXwalkDriverVersion
from base.log import VLOG
# generate session id
def GenerateId():
rand = hex(random.randrange(0, 2**64))
if rand.endswith('L'):
msb = rand[2:-1]
else:
msb = rand[2:]
rand = hex(random.randrange(0, 2**64))
if rand.endswith('L'):
lsb = rand[2:-1]
else:
lsb = rand[2:]
sessionId = msb + lsb
return sessionId
class InitSessionParams(object):
def __init__(self, socket_factory, device_manager, port_server, port_manager):
self.socket_factory = socket_factory
self.device_manager = device_manager
self.port_server = port_server
self.port_manager = port_manager
def _CreateCapabilities(xwalk):
caps = {}
caps["browserName"] = "xwalk"
caps["version"] = "webview"
caps["version"] = xwalk.GetVersion()
caps["xwalk.xwalkdriverVersion"] = kXwalkDriverVersion
caps["platform"] = xwalk.GetOperatingSystemName()
caps["platform"] = "ANDROID"
caps["javascriptEnabled"] = True
caps["takesScreenshot"] = True
caps["takesHeapSnapshot"] = True
caps["handlesAlerts"] = True
caps["databaseEnabled"] = False
caps["locationContextEnabled"] = True
caps["applicationCacheEnabled"] = False
caps["browserConnectionEnabled"] = False
caps["cssSelectorsEnabled"] = True
caps["webStorageEnabled"] = True
caps["rotatable"] = False
caps["acceptSslCerts"] = True
caps["nativeEvents"] = True
xwalk_caps = {}
if xwalk.GetAsDesktop():
xwalk_caps["userDataDir"] = xwalk.GetAsDesktop().command().GetSwitchValueNative("user-data-dir")
caps["xwalk"] = xwalk_caps
return caps
def _InitSessionHelper(bound_params, session, params, value):
#TODO
#session->driver_log.reset(
# new WebDriverLog(WebDriverLog::kDriverType, Log::kAll));
desired_caps = params.get("desiredCapabilities")
if type(desired_caps) != dict:
return Status(kUnknownError, "cannot find dict 'desiredCapabilities'")
capabilities = Capabilities()
status = capabilities.Parse(desired_caps)
if status.IsError():
return status
#VLOG(0, "after parse capabilities: " + status.Message())
#TODO
#Log::Level driver_level = Log::kWarning;
#if (capabilities.logging_prefs.count(WebDriverLog::kDriverType))
# driver_level = capabilities.logging_prefs[WebDriverLog::kDriverType];
#session->driver_log->set_min_level(driver_level);
# Create Log's and DevToolsEventListener's for ones that are DevTools-based.
# Session will own the Log's, Xwalk will own the listeners.
devtools_event_listeners = []
#TODO
#status = CreateLogs(capabilities,
# &session->devtools_logs,
# &devtools_event_listeners);
#if (status.IsError())
# return status;
status = LaunchXwalk(bound_params.socket_factory, \
bound_params.device_manager, \
bound_params.port_server, \
bound_params.port_manager, \
capabilities, \
devtools_event_listeners, \
session)
#VLOG(0, "after launchxwalk: " + status.Message())
if status.IsError():
return status
web_view_ids = []
status = session.xwalk.GetWebViewIds(web_view_ids)
if status.IsError():
VLOG(0, "session.xwalk.GetWebViewIds: " + status.Message())
return status
if not web_view_ids:
VLOG(0, "web_view_ids is []: " + status.Message())
return Status(kUnknownError, "unable to discover open window in xwalk")
VLOG(0, "web_view_ids is: " + str(web_view_ids))
session.window = web_view_ids[0]
session.detach = capabilities.detach
session.force_devtools_screenshot = capabilities.force_devtools_screenshot
session.capabilities = _CreateCapabilities(session.xwalk)
value.clear()
value.update(session.capabilities)
return Status(kOk)
def ExecuteInitSession(bound_params, session, params, value):
status = _InitSessionHelper(bound_params, session, params, value);
if status.IsError():
session.quit = True
return status
def ExecuteQuit(allow_detach, session, params, value):
if allow_detach and session.detach:
return Status(kOk)
else:
return session.xwalk.Quit()
def ExecuteGetStatus(value):
build = {}
build["version"] = "alpha"
os_uname = os.uname() # Operating System Name
os_info = {}
os_info["name"] = os_uname[0] # Operating System Name
os_info["version"] = os_uname[2] # Operating System Version
os_info["arch"] = os_uname[-1] # Operating System Architecture
info = {}
info["build"] = build
info["os"] = os_info
value.clear()
value.update(info)
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "command/init_session_commands.py",
"copies": "1",
"size": "4884",
"license": "bsd-3-clause",
"hash": 1095098220295913100,
"line_mean": 32.6827586207,
"line_max": 100,
"alpha_frac": 0.674037674,
"autogenerated": false,
"ratio": 3.5701754385964914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47442131125964915,
"avg_score": null,
"num_lines": null
} |
__all__ = ['generate', 'verify']
decimal_decoder = lambda s: int(s, 10)
decimal_encoder = lambda i: str(i)
def luhn_sum_mod_base(string, base=10, decoder=decimal_decoder):
# Adapted from http://en.wikipedia.org/wiki/Luhn_algorithm
digits = list(map(decoder, string))
return (sum(digits[::-2]) +
sum(list(map(lambda d: sum(divmod(2*d, base)), digits[-2::-2])))) % base
def generate(string, base=10, encoder=decimal_encoder,
decoder=decimal_decoder):
"""
Calculates the Luhn mod N check character for the given input string. This
character should be appended to the input string to produce a valid Luhn
mod N string in the given base.
>>> value = '4205092350249'
>>> generate(value)
'1'
When operating in a base other than decimal, encoder and decoder callables
should be supplied. The encoder should take a single argument, an integer,
and return the character corresponding to that integer in the operating
base. Conversely, the decoder should take a string containing a single
character and return its integer value in the operating base. Note that
the mapping between values and characters defined by the encoder and
decoder should be one-to-one.
For example, when working in hexadecimal:
>>> hex_alphabet = '0123456789abcdef'
>>> hex_encoder = lambda i: hex_alphabet[i]
>>> hex_decoder = lambda s: hex_alphabet.index(s)
>>> value = 'a8b56f'
>>> generate(value, base=16, encoder=hex_encoder, decoder=hex_decoder)
'b'
>>> verify('a8b56fb', base=16, decoder=hex_decoder)
True
>>> verify('a8b56fc', base=16, decoder=hex_decoder)
False
"""
d = luhn_sum_mod_base(string + encoder(0), base=base, decoder=decoder)
if d != 0:
d = base - d
return encoder(d)
def verify(string, base=10, decoder=decimal_decoder):
"""
Verifies that the given string is a valid Luhn mod N string.
>>> verify('5105105105105100') # MasterCard test number
True
When operating in a base other than decimal, encoder and decoder callables
should be supplied. The encoder should take a single argument, an integer,
and return the character corresponding to that integer in the operating
base. Conversely, the decoder should take a string containing a single
character and return its integer value in the operating base. Note that
the mapping between values and characters defined by the encoder and
decoder should be one-to-one.
For example, 'b' is the correct check character for the hexadecimal string
'a8b56f':
>>> hex_decoder = lambda s: '0123456789abcdef'.index(s)
>>> verify('a8b56fb', base=16, decoder=hex_decoder)
True
Any other check digit (in this example: 'c'), will result in a failed
verification:
>>> verify('a8b56fc', base=16, decoder=hex_decoder)
False
"""
return luhn_sum_mod_base(string, base=base, decoder=decoder) == 0
| {
"repo_name": "benhodgson/baluhn",
"path": "src/baluhn.py",
"copies": "1",
"size": "2975",
"license": "unlicense",
"hash": 4172818675821686300,
"line_mean": 34.4166666667,
"line_max": 80,
"alpha_frac": 0.683697479,
"autogenerated": false,
"ratio": 3.8736979166666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5057395395666666,
"avg_score": null,
"num_lines": null
} |
__all__ = [ 'Generator' ]
import numpy as np
import molecules
a0 = 0.52917721092
class Generator( dict ):
"""
Used to create molecules, write dalton .mol files
using -param for study with use_calculator.py
water currently implemented only
plans to implement methanol
"""
def __init__(self, *args, **kwargs):
#This waater is TIP3P model,
self[ ("water", "tip3p", "a_hoh", "degree") ] = 104.52
self[ ("water", "tip3p", "r_oh", "AA") ] = 0.9572
#This waater is SPC model,
self[ ("water", "spc", "a_hoh", "degree") ] = 109.47
self[ ("water", "spc", "r_oh", "AA") ] = 1.0
self[ ("methanol", "gas_opt", "r_oh", "AA" ) ] = 0.967
self[ ("methanol", "gas_opt", "r_co", "AA" ) ] = 1.428
self[ ("methanol", "gas_opt", "r_ch", "AA" ) ] = 1.098
self[ ("methanol", "gas_opt", "a_coh", "degree" ) ] = 107.16
self[ ("methanol", "gas_opt", "a_hch", "degree" ) ] = 109.6
self[ ("methanol", "gas_opt", "a_hco", "degree" ) ] = 109.342
self[ ("methanol", "gas_opt", "d_hcoh", "h4", "degree" ) ] = 60.0
self[ ("methanol", "gas_opt", "d_hcoh", "h5", "degree" ) ] = -60.0
self[ ("methanol", "gas_opt", "d_hcoh", "h6", "degree" ) ] = 180.0
#Default options for water
for val in ["r", "tau", "theta", "rho1", "rho2", "rho3", ]:
self[ ( val, 'min') ] = 0.0
self[ ( val, 'max') ] = 0.0
self[ ( val, 'points') ] = 1
self[ ( 'r', 'min') ] = 5.0
self[ ( 'r', 'max') ] = 10.0
self[ ( 'r', 'points') ] = 1
# Set by default all parameters to False
for val in ["r", "tau", "theta", "rho1", "rho2", "rho3", ]:
self[ ( val, "active" ) ] = False
@staticmethod
def get_pe_b3lyp_dal( co = 1.0, AA = True, max_l = 2, sites = 3):
r_order = max_l + 1
if AA:
aa = "AA"
else:
aa = "AU"
co /= a0
st = """**DALTON INPUT
.RUN WAVE FUNCTION
.DIRECT
.PARALLELL
.PEQM
*PEQM
.BORDER
REDIST -%d %.1f %s %d
**WAVE FUNCTION
.DFT
B3LYP
**END OF DALTON INPUT""" % (max_l+1, co, aa, sites)
return st
@staticmethod
def get_qmmm_b3lyp_dal( damp = False):
if damp:
damp = "\n.DAMP"
else:
damp = ""
st = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
*QMMM
.QMMM%s
**WAVE FUNCTION
.DFT
B3LYP
**END OF DALTON INPUT""" % damp
return st
@staticmethod
def get_lin_dal( _string ):
if _string == 'hflin':
return Generator.get_hflin_dal()
elif _string == 'b3lyplin':
return Generator.get_b3lyplin_dal()
elif _string == 'camb3lyplin':
return Generator.get_camb3lyplin_dal()
elif _string == 'ccsdlin':
return Generator.get_ccsdlin_dal()
@staticmethod
def get_hf_imag_dal( freq = ("0.0",), functional = 'B3PW91' ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
**RESPONSE
*ABSORP
.ALPHA
.IMAG F
.FREQUE
"""
_string += str(len(freq)) + '\n'
freqs = " ".join( map( str, freq ) )
_string += freqs
_string += '\n'
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_b3lyplin_dal():
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
B3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_b3lypqua_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
B3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_hflin_freq_dal( freq = "0.0", au = True, nm = False ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
.FREQUE
1
%s
""" %( freq )
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_hfqua_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_hflin_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_b3lyplin_freq_dal( freq = "0.0", au = True, nm = False ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
B3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
.FREQUE
1
%s
""" %( freq )
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_camb3lyplin_dal():
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
CAMB3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_camb3lypqua_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
CAMB3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_camb3lyplin_freq_dal( freq = "0.0", au = True, nm = False ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
CAMB3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
.FREQUE
1
%s
""" %( freq )
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_ccsdlin_dal():
return """**DALTON INPUT
.RUN RESPONSE
**INTEGRALS
.DIPLEN
.SECMOM
**WAVE FUNCTION
.CC
*CC INPUT
.CCSD
*CCFOP
.DIPMOM
*CCLR
.DIPOLE
**END OF DALTON INPUT
"""
@staticmethod
def get_ccsdqua_dal():
return """
**DALTON INPUT
.RUN RESPONSE
**INTEGRALS
.DIPLEN
.SECMOM
**WAVE FUNCTION
.CC
*CC INPUT
.CCSD
*CCFOP
.DIPMOM
*CCLR
.DIPOLE
*CCQR
.DIPOLE
**END OF DALTON INPUT
"""
def get_mol( self,
center = [0,0,0],
mol = "water",
model = "tip3p",
AA = False ):
"""return molecule in center, all molecules have different definition
of euler angles
for water place O in origo
for methanol place C=O bond in origo
"""
if mol == "water":
#Geometrical parameters, dependent om model
if model == "tip3p":
r_oh = self[ ("water", "tip3p", "r_oh", "AA") ]
a_hoh = self[ ("water", "tip3p", "a_hoh","degree") ]
if model == "spc":
r_oh = self[ ("water", "spc", "r_oh", "AA") ]
a_hoh = self[ ("water", "spc", "a_hoh","degree") ]
if not AA:
r_oh = r_oh / a0
d = (90 - a_hoh/2 ) * np.pi / 180
xo = center[0]
yo = center[1]
zo = center[2]
xh1 = (center[0] + r_oh * np.cos(d))
yh1 = center[1]
zh1 = (center[2] + r_oh* np.sin(d))
xh2 = (center[0] - r_oh * np.cos(d))
yh2 = center[1]
zh2 = (center[2] + r_oh* np.sin(d))
h1 = molecules.Atom( **{ "AA" : AA,
"x" : xh1,
"y" : yh1,
"z" : zh1,
"element" : "H"} )
h2 = molecules.Atom( **{ "AA" : AA,
"x" : xh2,
"y" : yh2,
"z" : zh2,
"element" : "H"} )
o = molecules.Atom( **{ "AA" : AA,
"x" : xo,
"y" : yo,
"z" : zo,
"element" : "O"} )
w = molecules.Water( AA = AA)
w.append( o )
w.append( h1 )
w.append( h2 )
return w
elif mol == "methanol":
r_co = self[ ("methanol", "gas_opt", "r_co", "AA" )]
r_oh = self[ ("methanol", "gas_opt", "r_oh", "AA" )]
r_ch = self[ ("methanol", "gas_opt", "r_ch", "AA" )]
a_coh = self[ ("methanol", "gas_opt", "a_coh", "degree" ) ]
#a_hch = self[ ("methanol","gas_opt", "a_hch", "degree" ) ]
a_hco = self[ ("methanol", "gas_opt", "a_hco", "degree" ) ]
a_coh *= np.pi / 180
a_hco *= np.pi / 180
d_hcoh_4 = self[ ("methanol","gas_opt", "d_hcoh", "h4", "degree" ) ]
d_hcoh_4 *= np.pi / 180
d_hcoh_5 = self[ ("methanol","gas_opt", "d_hcoh", "h5", "degree" ) ]
d_hcoh_5 *= np.pi / 180
d_hcoh_6 = self[ ("methanol","gas_opt", "d_hcoh", "h6", "degree" ) ]
d_hcoh_6 *= np.pi / 180
if not AA:
r_co, r_oh, r_ch = r_co/a0, r_oh/a0, r_ch/a0
c1 = molecules.Atom( **{"x":0, "y":0, "z":-r_co/2, "AA": AA, "element":"C" } )
o2 = molecules.Atom( **{"x":0, "y":0, "z": r_co/2, "AA": AA, "element":"O" } )
h3 = molecules.Atom( **{"x":r_oh*np.cos( a_coh-np.pi/2),
"y":0,
"z":r_oh*np.sin( a_coh-np.pi/2) + r_co/2,
"AA": AA, "element":"H" } )
h4 = molecules.Atom( **{"x": r_ch*np.sin( a_hco ) * np.cos( d_hcoh_4 ),
"y": r_ch*np.sin( a_hco) * np.sin( d_hcoh_4 ),
"z": r_ch*np.cos( a_hco) - r_co/2 ,
"AA": AA, "element":"H" } )
h5 = molecules.Atom( **{"x": r_ch*np.sin( a_hco ) * np.cos( d_hcoh_5 ),
"y": r_ch*np.sin( a_hco) * np.sin( d_hcoh_5 ),
"z": r_ch*np.cos( a_hco) - r_co/2 ,
"AA": AA, "element":"H" } )
h6 = molecules.Atom( **{"x": r_ch*np.sin( a_hco ) * np.cos( d_hcoh_6 ),
"y": r_ch*np.sin( a_hco) * np.sin( d_hcoh_6 ),
"z": r_ch*np.cos( a_hco) - r_co/2 ,
"AA": AA, "element":"H" } )
m = Methanol()
m.append(c1)
m.append(o2)
m.append(h3)
m.append(h4)
m.append(h5)
m.append(h6)
return m
def gen_mols_param(self, mol = "water",
model = 'tip3p',
basis = ["ano-1 2", "ano-1 4 3 1"],
AA = True,
worst = False):
r = np.linspace( self[ ('r', 'min')] , self[ ('r', 'max')] ,
self[ ('r', 'points' ) ] )
tau = np.linspace( self[ ('tau', 'min')] , self[ ('tau', 'max')] ,
self[ ('tau', 'points' ) ] )
theta = np.linspace( self[ ('theta', 'min')] , self[ ('theta', 'max')] ,
self[ ('theta', 'points' ) ] )
rho1 = np.linspace( self[ ('rho1', 'min')], self[ ('rho1', 'max')],
self[ ('rho1', 'points' ) ] )
rho2 = np.linspace( self[ ('rho2', 'min')], self[ ('rho2', 'max')],
self[ ('rho2', 'points' ) ] )
rho3 = np.linspace( self[ ('rho3', 'min')], self[ ('rho3', 'max')],
self[ ('rho3', 'points' ) ] )
if model == 'tip3p':
r_oh = self[ ("water", 'tip3p', "r_oh", "AA") ]
a_hoh = np.pi * self[ ("water", 'tip3p', "a_hoh", "degree" )] / 180.0
else:
r_oh = self[ ("water", 'tip3p', "r_oh", "AA") ]
a_hoh = np.pi * self[ ("water", 'tip3p', "a_hoh", "degree" )] / 180.0
for i in r:
for j in tau:
for k in theta:
for l in rho1:
for m in rho2:
for n in rho3:
w1 = molecules.Water.get_standard( AA = AA)
w1.t( -w1.o.r )
if worst:
w1 = self.get_mol( [0, 0, 0],
mol = mol,
model = model, AA = AA)
w1.populate_bonds()
w1.populate_angles()
w1.h1.scale_angle( 0.988 )
w1.h1.scale_bond( 0.985 )
w1.h2.scale_bond( 1.015 )
w1.inv_rotate()
w2 = molecules.Water.get_standard( AA = AA )
w2.t( -w2.o.r )
x, y, z = self.polar_to_cartesian( i, j, k )
w2.rotate( l, m, n )
w2.t( np.array( [x, y, z]) )
name = ""
name += "-".join( map( str, ["%3.2f"%i, "%3.2f"%j, "%3.2f"%k, "%3.2f"%l, "%3.2f"%m, "%3.2f"%n] ) )
name += ".mol"
c = molecules.Cluster( w1, w2 )
tmp_mol = c.get_mol_string( basis = tuple(basis))
f_ = open(name, 'w')
f_.write( tmp_mol )
return 0
def vary_parameters( self, opts ):
"""Given two parameters, e.g. r and theta, keeps all other static
param_list should be list of strings of parameters
["r":{"min": 2, "max":5, "points": 10}, "rho1" , ... ]
Has sane defaults, but can be overrided by passing arguments to
main program as:
-r_min 5
-r_max 10
-r_points 10
Which overrides defaults
"""
for val in opts:
self[ (val, 'active') ] = True
self[ (val, 'min') ] = opts[val][ "min" ]
self[ (val, 'max') ] = opts[val][ "max" ]
self[ (val, 'points') ] = opts[val][ "points" ]
def polar_to_cartesian(self, r, tau, theta):
x, y, z = r* np.sin( theta )*np.cos( tau ) \
, r* np.sin( theta )*np.sin( tau ) \
, r* np.cos( theta )
return x , y , z
def one_mol_gen(self, mol = 'water', model = 'tip3p',):
"""
Only implemented for water so far"""
if mol == "water":
d = self[ ("r_oh_dev", "max") ]
p = self[ ("r_oh_dev", "points") ]
r_d = 0.01*np.linspace( -d, d, p )
d = self[ ("theta_hoh_dev", "max") ]
p = self[ ("theta_hoh_dev", "points") ]
theta_d = 0.01*np.linspace( -d, d, p )
#a_hoh = self[ ( mol, model, "a_hoh", "degree" ) ] *np.pi/180
#r_oh = self[ ( mol, model, "r_oh", "AA" ) ]
for i in r_d:
for j in r_d:
for k in theta_d:
scale_bond1 = 1 + i
scale_bond2 = 1 + j
scale_angle = 1 + k
names = map( lambda x:"%.3f"%x, [i, j, k] )
w = self.get_mol( mol = mol, model = model)
w.populate_bonds() ; w.populate_angles()
w.h1.scale_bond( scale_bond1 )
w.h2.scale_bond( scale_bond2 )
w.h1.scale_angle( scale_angle )
w.inv_rotate()
open( "_".join([model]+names) + ".mol",'w').write(w.get_mol_string())
def build_pna( self, xyz = "tmp.xyz", waters = 0,
min_r = 2.0,
mult_r = 10,
seed = 111 ):
pna = Molecule.from_xyz( xyz )
freqs = [ "0.0", "0.0238927", "0.0428227", "0.0773571" ]
np.random.seed( seed )
c = molecules.Cluster()
c.add_mol(pna, in_qm = True)
cnt = 0
while cnt < waters:
# Random rotation angles
t1 = np.random.uniform( 0, np.pi/2 )
t2 = np.random.uniform( 0, np.pi )
t3 = np.random.uniform( 0, np.pi/2 )
# random length, rho and tau
r = np.random.uniform( min_r , min_r * mult_r)
tau = np.random.uniform( 0, np.pi*2)
theta = np.random.uniform( 0,np.pi)
center = self.polar_to_cartesian( r, tau, theta )
wat = self.get_mol( center = pna.com + center,
mol = "water")
wat.rotate( t1, t2, t3 )
wat._res_id = cnt
if c.mol_too_close( wat ):
continue
#We are satisfied with this position, add properties to the water, and rotate them according to t1, t2, t3 so they match the water orientation
c.add_mol( wat, in_mm = True )
cnt += 1
for f_mm in freqs:
for dist in ["nodist", "dist"]:
for wat in [ m for m in c if m.in_mm ]:
t1, t2, t3 = wat.get_euler()
kwargs_dict = Template().get( *("TIP3P", "HF", "ANOPVDZ",
dist == "dist",f_mm ) )
for at in wat:
Property.add_prop_from_template( at, kwargs_dict )
Property.transform_ut_properties( wat.h1.Property, t1,t2,t3 )
Property.transform_ut_properties( wat.h2.Property, t1,t2,t3 )
Property.transform_ut_properties( wat.o.Property, t1,t2,t3 )
#Write out QM and MM region separately with properties
open("pna.mol" ,'w').write(c.get_qm_mol_string(
basis= ("ano-1 2 1", "ano-1 3 2 1"),
AA = True))
open("%dmm_%s_%s.pot" %(waters, f_mm, dist ),'w').write(c.get_qmmm_pot_string( in_AA = True ))
open("tmp.xyz", 'w').write( c.get_xyz_string() )
| {
"repo_name": "fishstamp82/moltools",
"path": "moltools/generator.py",
"copies": "1",
"size": "18121",
"license": "mit",
"hash": -5328535197113275000,
"line_mean": 24.8871428571,
"line_max": 142,
"alpha_frac": 0.4625020694,
"autogenerated": false,
"ratio": 2.914281119330975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8767720961465436,
"avg_score": 0.021812445453107725,
"num_lines": 700
} |
__all__ = ["generic", "boinc", "condor", "ganga", "mpi", "multicore"]
from .generic import *
import sys
from optparse import OptionParser
for interface in __all__:
from pymw.interfaces import interface
def parse_options(parser=None, args=None):
"""Parses the standard options associated with a PyMW application.
Additional options will be returned for additional parsing.
Returns options, args
"""
if not parser:
parser = OptionParser(usage="usage: %prog")
if not args:
args = sys.argv[1:]
parser.add_option("-i", "--interface", dest="interface", default="generic",
help="specify the interface (generic/multicore/mpi/condor/boinc)",
metavar="INTERFACE")
parser.add_option("-n", "--num_workers", dest="n_workers", default="4",
help="number of workers", metavar="N")
parser.add_option("-g", "--ganga_loc", dest="g_loc", default="~/Ganga/bin/ganga",
help="directory of GANGA executable (GANGA interface)", metavar="FILE")
parser.add_option("-p", "--project_home", dest="p_home", default="",
help="directory of the project (BOINC interface)", metavar="DIR")
parser.add_option("-c", "--app_path", dest="custom_app_dir", default="",
help="directory of a custom worker application (BOINC interface)",
metavar="DIR")
parser.add_option("-a", "--app_args", dest="custom_app_args", default="",
help="arguments for a custom worker application (BOINC interface)",
metavar="DIR")
return parser.parse_args(args)
def get_interface(options):
"""Returns a PyMW interface instance specifed in the options or the generic
interface if none was specified.
"""
n_workers = int(options.n_workers)
if options.interface == "generic":
interface_obj = generic.GenericInterface(num_workers=n_workers)
elif options.interface == "multicore":
interface_obj = multicore.MulticoreInterface(num_workers=n_workers)
elif options.interface == "mpi":
interface_obj = mpi.MPIInterface(num_workers=n_workers)
elif options.interface == "condor":
interface_obj = condor.CondorInterface()
elif options.interface == "ganga":
interface_obj = interfaces.ganga.GANGAInterface(ganga_loc=options.g_loc)
elif options.interface == "boinc":
interface_obj = boinc.BOINCInterface(project_home=options.p_home,\
custom_app_dir=options.custom_app_dir,\
custom_args=[options.custom_app_args])
else:
print(("Interface", options.interface, "unknown."))
exit()
return interface_obj
| {
"repo_name": "auxten/pymw",
"path": "pymw/interfaces/__init__.py",
"copies": "2",
"size": "2511",
"license": "mit",
"hash": -1631532286107788000,
"line_mean": 34.3913043478,
"line_max": 83,
"alpha_frac": 0.6837913182,
"autogenerated": false,
"ratio": 3.402439024390244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9896986087261361,
"avg_score": 0.03784885106577629,
"num_lines": 69
} |
__all__ = ["generic"]
from types import ClassType, InstanceType
classtypes = type, ClassType
# This is version 0.6 of Philip J. Eby's simplegeneric module
# (http://cheeseshop.python.org/pypi/simplegeneric) patched to work
# with Python 2.3 (which doesn't support assigning to __name__)
def generic(func):
"""Create a simple generic function"""
_sentinel = object()
def _by_class(*args, **kw):
cls = args[0].__class__
for t in type(cls.__name__, (cls,object), {}).__mro__:
f = _gbt(t, _sentinel)
if f is not _sentinel:
return f(*args, **kw)
else:
return func(*args, **kw)
_by_type = {object: func, InstanceType: _by_class}
_gbt = _by_type.get
def when_type(t):
"""Decorator to add a method that will be called for type `t`"""
if not isinstance(t, classtypes):
raise TypeError(
"%r is not a type or class" % (t,)
)
def decorate(f):
if _by_type.setdefault(t,f) is not f:
raise TypeError(
"%r already has method for type %r" % (func, t)
)
return f
return decorate
_by_object = {}
_gbo = _by_object.get
def when_object(o):
"""Decorator to add a method that will be called for object `o`"""
def decorate(f):
if _by_object.setdefault(id(o), (o,f))[1] is not f:
raise TypeError(
"%r already has method for object %r" % (func, o)
)
return f
return decorate
def dispatch(*args, **kw):
f = _gbo(id(args[0]), _sentinel)
if f is _sentinel:
for t in type(args[0]).__mro__:
f = _gbt(t, _sentinel)
if f is not _sentinel:
return f(*args, **kw)
else:
return func(*args, **kw)
else:
return f[1](*args, **kw)
try:
dispatch.__name__ = func.__name__
except TypeError:
pass
dispatch.__dict__ = func.__dict__.copy()
dispatch.__doc__ = func.__doc__
dispatch.__module__ = func.__module__
dispatch.when_type = when_type
dispatch.when_object = when_object
dispatch.default = func
dispatch.has_object = lambda o: id(o) in _by_object
dispatch.has_type = lambda t: t in _by_type
return dispatch
def test_suite():
import doctest
return doctest.DocFileSuite(
'README.txt',
optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE,
)
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/ipython-0.8.2-py2.5.egg/IPython/external/simplegeneric.py",
"copies": "1",
"size": "2669",
"license": "bsd-3-clause",
"hash": -5792710187480624000,
"line_mean": 19.5307692308,
"line_max": 74,
"alpha_frac": 0.5140502061,
"autogenerated": false,
"ratio": 3.8183118741058655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48323620802058653,
"avg_score": null,
"num_lines": null
} |
__all__ = ['gen_marked_value', 'MarkedValue']
try:
from __builtin__ import unicode
except ImportError:
unicode = str
def gen_new(cls):
def __new__(arg_cls, value, mark):
r = super(arg_cls, arg_cls).__new__(arg_cls, value)
r.mark = mark
r.value = value
return r
return __new__
class MarkedUnicode(unicode):
__new__ = gen_new(unicode)
def _proc_partition(self, part_result):
pointdiff = 1
r = []
for s in part_result:
mark = self.mark.copy()
# XXX Does not work properly with escaped strings, but this requires
# saving much more information in mark.
mark.column += pointdiff
mark.pointer += pointdiff
r.append(MarkedUnicode(s, mark))
pointdiff += len(s)
return tuple(r)
def rpartition(self, sep):
return self._proc_partition(super(MarkedUnicode, self).rpartition(sep))
def partition(self, sep):
return self._proc_partition(super(MarkedUnicode, self).partition(sep))
class MarkedInt(int):
__new__ = gen_new(int)
class MarkedFloat(float):
__new__ = gen_new(float)
class MarkedDict(dict):
__new__ = gen_new(dict)
def __init__(self, value, mark):
super(MarkedDict, self).__init__(value)
def copy(self):
return MarkedDict(super(MarkedDict, self).copy(), self.mark)
class MarkedValue:
def __init__(self, value, mark):
self.mark = mark
self.value = value
specialclasses = {
unicode: MarkedUnicode,
int: MarkedInt,
float: MarkedFloat,
dict: MarkedDict,
}
classcache = {}
def gen_marked_value(value, mark, use_special_classes=True):
if use_special_classes and value.__class__ in specialclasses:
Marked = specialclasses[value.__class__]
elif value.__class__ in classcache:
Marked = classcache[value.__class__]
else:
class Marked(MarkedValue):
for func in value.__class__.__dict__:
if func == 'copy':
def copy(self):
return self.__class__(self.value.copy(), self.mark)
elif func not in set(('__init__', '__new__', '__getattribute__')):
if func in set(('__eq__',)):
# HACK to make marked dictionaries always work
exec (('def {0}(self, *args):\n'
' return self.value.{0}(*[arg.value if isinstance(arg, MarkedValue) else arg for arg in args])').format(func))
else:
exec (('def {0}(self, *args, **kwargs):\n'
' return self.value.{0}(*args, **kwargs)\n').format(func))
classcache[value.__class__] = Marked
return Marked(value, mark)
| {
"repo_name": "wezhang/vim-setup",
"path": "bundle/powerline/powerline/lint/markedjson/markedvalue.py",
"copies": "1",
"size": "2387",
"license": "apache-2.0",
"hash": -3459524816231421000,
"line_mean": 23.6082474227,
"line_max": 118,
"alpha_frac": 0.6501885212,
"autogenerated": false,
"ratio": 3.0330368487928845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41832253699928845,
"avg_score": null,
"num_lines": null
} |
__all__ = ["GeolocationOverrideManager"]
from devtools_event_listener import DevToolsEventListener
from misc.geoposition import Geoposition
from status import *
import copy
# Overrides the geolocation, if requested, for the duration of the
# given |DevToolsClient|'s lifetime.
class GeolocationOverrideManager(DevToolsEventListener):
def __init__(self, client):
DevToolsEventListener.__init__(self)
self.client = client
self.client.AddListener(self)
self.overridden_geoposition = Geoposition()
def OverrideGeolocation(self, geoposition):
self.overridden_geoposition = copy.deepcopy(geoposition)
return self._ApplyOverrideIfNeeded()
# Overridden from DevToolsEventListener:
def OnConnected(self, client):
return self._ApplyOverrideIfNeeded()
def OnEvent(self, client, method, params):
if method == "Page.frameNavigated":
if not params["frame"].has_key("params"):
return self._ApplyOverrideIfNeeded()
return Status(kOk)
def _ApplyOverrideIfNeeded(self):
if self.overridden_geoposition:
return Status(kOk)
params = {}
params["latitude"] = self.overridden_geoposition.latitude
params["longitude"] = self.overridden_geoposition.longitude
params["accuracy"] = self.overridden_geoposition.accuracy
return self.client.SendCommand("Page.setGeolocationOverride", params)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/geolocation_override_manager.py",
"copies": "1",
"size": "1361",
"license": "bsd-3-clause",
"hash": -7070247220797021000,
"line_mean": 33.025,
"line_max": 73,
"alpha_frac": 0.7457751653,
"autogenerated": false,
"ratio": 3.944927536231884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0314460458585106,
"num_lines": 40
} |
__all__ = ('GestureDatabase', 'GestureDatabaseItem')
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import NumericProperty, StringProperty
from kivy.properties import ListProperty, ObjectProperty
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.graphics import Rectangle, Color
from kivy.multistroke import Recognizer
# local libraries
from helpers import InformationPopup
Builder.load_file('gesturedatabase.kv')
class GestureExportPopup(Popup):
pass
class GestureImportPopup(Popup):
pass
class GestureDatabaseItem(FloatLayout):
name = StringProperty('(no name)')
template_count = NumericProperty(0)
gesture_list = ListProperty([])
def __init__(self, **kwargs):
super(GestureDatabaseItem, self).__init__(**kwargs)
self.rect = None
self._draw_trigger = Clock.create_trigger(self.draw_item, 0)
self.update_template_count()
self.bind(gesture_list=self.update_template_count)
self.register_event_type('on_select')
self.register_event_type('on_deselect')
def toggle_selected(self, *l):
self._draw_rect(clear=True)
if self.ids.select.state == 'down':
self.dispatch('on_select')
self.ids.select.text = 'Deselect'
else:
self.dispatch('on_deselect')
self.ids.select.text = 'Select'
def update_template_count(self, *l):
tpl_count = 0
for g in self.gesture_list:
tpl_count += len(g.templates)
self.template_count = tpl_count
def draw_item(self, *l):
self.ids.namelbl.pos = self.pos
self.ids.namelbl.y += 90
self.ids.stats.pos = self.pos
self.ids.stats.y += 40
self.ids.select.pos = self.pos
self._draw_rect()
def _draw_rect(self, clear=False, *l):
col = self.ids.select.state == 'down' and 1 or .2
with self.canvas:
Color(col, 0, 0, .15)
if self.rect or clear:
self.canvas.remove(self.rect)
self.rect = Rectangle(size=self.size, pos=self.pos)
def on_select(*l):
pass
def on_deselect(*l):
pass
class GestureDatabase(GridLayout):
selected_count = NumericProperty(0)
recognizer = ObjectProperty(None)
export_popup = ObjectProperty(GestureExportPopup())
import_popup = ObjectProperty(GestureImportPopup())
info_popup = ObjectProperty(InformationPopup())
def __init__(self, **kwargs):
super(GestureDatabase, self).__init__(**kwargs)
self.redraw_all = Clock.create_trigger(self._redraw_gesture_list, 0)
self.export_popup.ids.save_btn.bind(on_press=self.perform_export)
self.import_popup.ids.filechooser.bind(on_submit=self.perform_import)
def import_gdb(self):
self.gdict = {}
for gesture in self.recognizer.db:
if gesture.name not in self.gdict:
self.gdict[gesture.name] = []
self.gdict[gesture.name].append(gesture)
self.selected_count = 0
self.ids.gesture_list.clear_widgets()
for k in sorted(self.gdict, key=lambda n: n.lower()):
gitem = GestureDatabaseItem(name=k, gesture_list=self.gdict[k])
gitem.bind(on_select=self.select_item)
gitem.bind(on_deselect=self.deselect_item)
self.ids.gesture_list.add_widget(gitem)
def select_item(self, *l):
self.selected_count += 1
def deselect_item(self, *l):
self.selected_count -= 1
def mass_select(self, *l):
if self.selected_count:
for i in self.ids.gesture_list.children:
if i.ids.select.state == 'down':
i.ids.select.state = 'normal'
i.draw_item()
else:
for i in self.ids.gesture_list.children:
if i.ids.select.state == 'normal':
i.ids.select.state = 'down'
i.draw_item()
def unload_gestures(self, *l):
if not self.selected_count:
self.recognizer.db = []
self.ids.gesture_list.clear_widgets()
self.selected_count = 0
return
for i in self.ids.gesture_list.children[:]:
if i.ids.select.state == 'down':
self.selected_count -= 1
for g in i.gesture_list:
# if g in self.recognizer.db: # not needed, for testing
self.recognizer.db.remove(g)
self.ids.gesture_list.remove_widget(i)
def perform_export(self, *l):
path = self.export_popup.ids.filename.text
if not path:
self.export_popup.dismiss()
self.info_popup.text = 'Missing filename'
self.info_popup.open()
return
elif not path.lower().endswith('.kg'):
path += '.kg'
self.save_selection_to_file(path)
self.export_popup.dismiss()
self.info_popup.text = 'Gestures exported!'
self.info_popup.open()
def perform_import(self, filechooser, *l):
count = len(self.recognizer.db)
for f in filechooser.selection:
self.recognizer.import_gesture(filename=f)
self.import_gdb()
self.info_popup.text = ("Imported %d gestures.\n" %
(len(self.recognizer.db) - count))
self.import_popup.dismiss()
self.info_popup.open()
def save_selection_to_file(self, filename, *l):
if not self.selected_count:
self.recognizer.export_gesture(filename=filename)
else:
tmpgdb = Recognizer()
for i in self.ids.gesture_list.children:
if i.ids.select.state == 'down':
for g in i.gesture_list:
tmpgdb.db.append(g)
tmpgdb.export_gesture(filename=filename)
def _redraw_gesture_list(self, *l):
for child in self.ids.gesture_list.children:
child._draw_trigger()
| {
"repo_name": "inclement/kivy",
"path": "examples/demo/multistroke/gesturedatabase.py",
"copies": "21",
"size": "6097",
"license": "mit",
"hash": 5288151034312363000,
"line_mean": 32.8722222222,
"line_max": 77,
"alpha_frac": 0.596358865,
"autogenerated": false,
"ratio": 3.633492252681764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022916012416981124,
"num_lines": 180
} |
__all__ = ('GestureHistoryManager', 'GestureVisualizer')
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.graphics import Color, Line
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.compat import PY2
# local libraries
from helpers import InformationPopup
from settings import MultistrokeSettingsContainer
# refuse heap permute for gestures with more strokes than 3
# (you can increase it, but 4 strokes = 384 templates, 5 = 3840)
MAX_PERMUTE_STROKES = 3
Builder.load_file('historymanager.kv')
class GestureHistoryManager(GridLayout):
selected = ObjectProperty(None, allownone=True)
def __init__(self, **kwargs):
super(GestureHistoryManager, self).__init__(**kwargs)
self.gesturesettingsform = GestureSettingsForm()
rr = self.gesturesettingsform.rrdetails
rr.bind(on_reanalyze_selected=self.reanalyze_selected)
self.infopopup = InformationPopup()
self.recognizer = App.get_running_app().recognizer
def reanalyze_selected(self, *l):
# recognize() can block the UI with max_gpf=100, show a message
self.infopopup.text = 'Please wait, analyzing ..'
self.infopopup.auto_dismiss = False
self.infopopup.open()
# Get a reference to the original GestureContainer object
gesture_obj = self.selected._result_obj._gesture_obj
# Reanalyze the candidate strokes using current database
res = self.recognizer.recognize(gesture_obj.get_vectors(),
max_gpf=100)
# Tag the result with the gesture object (it didn't change)
res._gesture_obj = gesture_obj
# Tag the selected item with the updated ProgressTracker
self.selected._result_obj = res
res.bind(on_complete=self._reanalyze_complete)
def _reanalyze_complete(self, *l):
self.gesturesettingsform.load_visualizer(self.selected)
self.infopopup.dismiss()
def add_selected_to_database(self, *l):
if self.selected is None:
raise Exception('add_gesture_to_database before load_visualizer?')
if self.gesturesettingsform.addsettings is None:
raise Exception('add_gesture_to_database missing addsetings?')
ids = self.gesturesettingsform.addsettings.ids
name = ids.name.value.strip()
if name == '':
self.infopopup.auto_dismiss = True
self.infopopup.text = 'You must specify a name for the gesture'
self.infopopup.open()
return
permute = ids.permute.value
sensitive = ids.orientation_sens.value
strokelen = ids.stroke_sens.value
angle_sim = ids.angle_sim.value
cand = self.selected._result_obj._gesture_obj.get_vectors()
if permute and len(cand) > MAX_PERMUTE_STROKES:
t = "Can't heap permute %d-stroke gesture " % (len(cand))
self.infopopup.text = t
self.infopopup.auto_dismiss = True
self.infopopup.open()
return
self.recognizer.add_gesture(
name,
cand,
use_strokelen=strokelen,
orientation_sensitive=sensitive,
angle_similarity=angle_sim,
permute=permute)
self.infopopup.text = 'Gesture added to database'
self.infopopup.auto_dismiss = True
self.infopopup.open()
def clear_history(self, *l):
if self.selected:
self.visualizer_deselect()
self.ids.history.clear_widgets()
def visualizer_select(self, visualizer, *l):
if self.selected is not None:
self.selected.selected = False
else:
self.add_widget(self.gesturesettingsform)
self.gesturesettingsform.load_visualizer(visualizer)
self.selected = visualizer
def visualizer_deselect(self, *l):
self.selected = None
self.remove_widget(self.gesturesettingsform)
def add_recognizer_result(self, result, *l):
'''The result object is a ProgressTracker with additional
data; in main.py it is tagged with the original GestureContainer
that was analyzed (._gesture_obj)'''
# Create a GestureVisualizer that draws the gesture on canvas
visualizer = GestureVisualizer(result._gesture_obj,
size_hint=(None, None), size=(150, 150))
# Tag it with the result object so AddGestureForm.load_visualizer
# has the results to build labels in the scrollview
visualizer._result_obj = result
visualizer.bind(on_select=self.visualizer_select)
visualizer.bind(on_deselect=self.visualizer_deselect)
# Add the visualizer to the list of gestures in 'history' screen
self.ids.history.add_widget(visualizer)
self._trigger_layout()
self.ids.scrollview.update_from_scroll()
class RecognizerResultLabel(Label):
'''This Label subclass is used to show a single result from the
gesture matching process (is a child of GestureHistoryManager)'''
pass
class RecognizerResultDetails(BoxLayout):
'''Contains a ScrollView of RecognizerResultLabels, ie the list of
matched gestures and their score/distance (is a child of
GestureHistoryManager)'''
def __init__(self, **kwargs):
super(RecognizerResultDetails, self).__init__(**kwargs)
self.register_event_type('on_reanalyze_selected')
def on_reanalyze_selected(self, *l):
pass
class AddGestureSettings(MultistrokeSettingsContainer):
pass
class GestureSettingsForm(BoxLayout):
'''This is the main content of the GestureHistoryManager, the form for
adding a new gesture to the recognizer. It is added to the widget tree
when a GestureVisualizer is selected.'''
def __init__(self, **kwargs):
super(GestureSettingsForm, self).__init__(**kwargs)
self.infopopup = InformationPopup()
self.rrdetails = RecognizerResultDetails()
self.addsettings = None
self.app = App.get_running_app()
def load_visualizer(self, visualizer):
if self.addsettings is None:
self.addsettings = AddGestureSettings()
self.ids.settings.add_widget(self.addsettings)
self.visualizer = visualizer
analysis = self.ids.analysis
analysis.clear_widgets()
analysis.add_widget(self.rrdetails)
scrollv = self.rrdetails.ids.result_scrollview
resultlist = self.rrdetails.ids.result_list
resultlist.clear_widgets()
r = visualizer._result_obj.results
if not len(r):
lbl = RecognizerResultLabel(text='[b]No match[/b]')
resultlist.add_widget(lbl)
scrollv.scroll_y = 1
return
if PY2:
d = r.iteritems
else:
d = r.items
for one in sorted(d(), key=lambda x: x[1]['score'],
reverse=True):
data = one[1]
lbl = RecognizerResultLabel(
text='Name: [b]' + data['name'] + '[/b]' +
'\n Score: ' + str(data['score']) +
'\n Distance: ' + str(data['dist']))
resultlist.add_widget(lbl)
# Make sure the top is visible
scrollv.scroll_y = 1
class GestureVisualizer(Widget):
selected = BooleanProperty(False)
def __init__(self, gesturecontainer, **kwargs):
super(GestureVisualizer, self).__init__(**kwargs)
self._gesture_container = gesturecontainer
self._trigger_draw = Clock.create_trigger(self._draw_item, 0)
self.bind(pos=self._trigger_draw, size=self._trigger_draw)
self._trigger_draw()
self.register_event_type('on_select')
self.register_event_type('on_deselect')
def on_touch_down(self, touch):
if not self.collide_point(touch.x, touch.y):
return
self.selected = not self.selected
self.dispatch(self.selected and 'on_select' or 'on_deselect')
# FIXME: This seems inefficient, is there a better way??
def _draw_item(self, dt):
g = self._gesture_container
bb = g.bbox
minx, miny, maxx, maxy = bb['minx'], bb['miny'], bb['maxx'], bb['maxy']
width, height = self.size
xpos, ypos = self.pos
if g.height > g.width:
to_self = (height * 0.85) / g.height
else:
to_self = (width * 0.85) / g.width
self.canvas.remove_group('gesture')
cand = g.get_vectors()
col = g.color
for stroke in cand:
out = []
append = out.append
for vec in stroke:
x, y = vec
x = (x - minx) * to_self
w = (maxx - minx) * to_self
append(x + xpos + (width - w) * .85 / 2)
y = (y - miny) * to_self
h = (maxy - miny) * to_self
append(y + ypos + (height - h) * .85 / 2)
with self.canvas:
Color(col[0], col[1], col[2], mode='rgb')
Line(points=out, group='gesture', width=2)
def on_select(self, *l):
pass
def on_deselect(self, *l):
pass
| {
"repo_name": "kivy/kivy",
"path": "examples/demo/multistroke/historymanager.py",
"copies": "25",
"size": "9418",
"license": "mit",
"hash": -6461181286350109000,
"line_mean": 33.1231884058,
"line_max": 79,
"alpha_frac": 0.6177532385,
"autogenerated": false,
"ratio": 3.7838489353153877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.