text stringlengths 0 1.05M | meta dict |
|---|---|
# A CAN bus node (or Board unit)
class Node(object):
"""An NODE on the CAN bus.
"""
def __init__(self,
name,
comment,
dbc_specifics=None):
self._name = name
self._comment = comment
self._dbc = dbc_specifics
@property
def name(self):
"""The node name as a string.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def comment(self):
"""The node comment, or ``None`` if unavailable.
"""
return self._comment
@comment.setter
def comment(self, value):
self._comment = value
@property
def dbc(self):
"""An object containing dbc specific properties like e.g. attributes.
"""
return self._dbc
@dbc.setter
def dbc(self, value):
self._dbc = value
def __repr__(self):
return "node('{}', {})".format(
self._name,
"'" + self._comment + "'" if self._comment is not None else None)
| {
"repo_name": "eerimoq/cantools",
"path": "cantools/database/can/node.py",
"copies": "1",
"size": "1088",
"license": "mit",
"hash": 3054141994942228500,
"line_mean": 18.7818181818,
"line_max": 77,
"alpha_frac": 0.5064338235,
"autogenerated": false,
"ratio": 4.152671755725191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
# A CAN message.
import binascii
from copy import deepcopy
from ..utils import format_or
from ..utils import start_bit
from ..utils import encode_data
from ..utils import decode_data
from ..utils import create_encode_decode_formats
from ..errors import Error
from ..errors import EncodeError
from ..errors import DecodeError
class Message(object):
"""A CAN message with frame id, comment, signals and other
information.
If `strict` is ``True`` an exception is raised if any signals are
overlapping or if they don't fit in the message.
"""
def __init__(self,
frame_id,
name,
length,
signals,
comment=None,
senders=None,
send_type=None,
cycle_time=None,
dbc_specifics=None,
is_extended_frame=False,
bus_name=None,
signal_groups=None,
strict=True,
protocol=None):
frame_id_bit_length = frame_id.bit_length()
if is_extended_frame:
if frame_id_bit_length > 29:
raise Error(
'Extended frame id 0x{:x} is more than 29 bits in '
'message {}.'.format(frame_id, name))
elif frame_id_bit_length > 11:
raise Error(
'Standard frame id 0x{:x} is more than 11 bits in '
'message {}.'.format(frame_id, name))
self._frame_id = frame_id
self._is_extended_frame = is_extended_frame
self._name = name
self._length = length
self._signals = signals
self._signals.sort(key=start_bit)
# if the 'comment' argument is a string, we assume that is an
# english comment. this is slightly hacky because the
# function's behavior depends on the type of the passed
# argument, but it is quite convenient...
if isinstance(comment, str):
# use the first comment in the dictionary as "The" comment
self._comments = { None: comment }
else:
# assume that we have either no comment at all or a
# multi-lingual dictionary
self._comments = comment
self._senders = senders if senders else []
self._send_type = send_type
self._cycle_time = cycle_time
self._dbc = dbc_specifics
self._bus_name = bus_name
self._signal_groups = signal_groups
self._codecs = None
self._signal_tree = None
self._strict = strict
self._protocol = protocol
self.refresh()
def _create_codec(self, parent_signal=None, multiplexer_id=None):
"""Create a codec of all signals with given parent signal. This is a
recursive function.
"""
signals = []
multiplexers = {}
# Find all signals matching given parent signal name and given
# multiplexer id. Root signals' parent and multiplexer id are
# both None.
for signal in self._signals:
if signal.multiplexer_signal != parent_signal:
continue
if ((multiplexer_id is not None)
and (multiplexer_id not in signal.multiplexer_ids)):
continue
if signal.is_multiplexer:
children_ids = set()
for s in self._signals:
if s.multiplexer_signal != signal.name:
continue
children_ids.update(s.multiplexer_ids)
# Some CAN messages will have muxes containing only
# the multiplexer and no additional signals. At Tesla
# these are indicated in advance by assigning them an
# enumeration. Here we ensure that any named
# multiplexer is included, even if it has no child
# signals.
if signal.choices:
children_ids.update(signal.choices.keys())
for child_id in children_ids:
codec = self._create_codec(signal.name, child_id)
if signal.name not in multiplexers:
multiplexers[signal.name] = {}
multiplexers[signal.name][child_id] = codec
signals.append(signal)
return {
'signals': signals,
'formats': create_encode_decode_formats(signals,
self._length),
'multiplexers': multiplexers
}
def _create_signal_tree(self, codec):
"""Create a multiplexing tree node of given codec. This is a recursive
function.
"""
nodes = []
for signal in codec['signals']:
multiplexers = codec['multiplexers']
if signal.name in multiplexers:
node = {
signal.name: {
mux: self._create_signal_tree(mux_codec)
for mux, mux_codec in multiplexers[signal.name].items()
}
}
else:
node = signal.name
nodes.append(node)
return nodes
@property
def frame_id(self):
"""The message frame id.
"""
return self._frame_id
@frame_id.setter
def frame_id(self, value):
self._frame_id = value
@property
def is_extended_frame(self):
"""``True`` if the message is an extended frame, ``False`` otherwise.
"""
return self._is_extended_frame
@is_extended_frame.setter
def is_extended_frame(self, value):
self._is_extended_frame = value
@property
def name(self):
"""The message name as a string.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def length(self):
"""The message data length in bytes.
"""
return self._length
@length.setter
def length(self, value):
self._length = value
@property
def signals(self):
"""A list of all signals in the message.
"""
return self._signals
@property
def signal_groups(self):
"""A list of all signal groups in the message.
"""
return self._signal_groups
@signal_groups.setter
def signal_groups(self, value):
self._signal_groups = value
@property
def comment(self):
"""The message comment, or ``None`` if unavailable.
Note that we implicitly try to return the comment's language
to be English comment if multiple languages were specified.
"""
if self._comments is None:
return None
elif self._comments.get(None) is not None:
return self._comments.get(None)
return self._comments.get('EN', None)
@property
def comments(self):
"""The dictionary with the descriptions of the message in multiple languages. ``None`` if unavailable.
"""
return self._comments
@comment.setter
def comment(self, value):
self._comments = { None: value }
@comments.setter
def comments(self, value):
self._comments = value
@property
def senders(self):
"""A list of all sender nodes of this message.
"""
return self._senders
@property
def send_type(self):
"""The message send type, or ``None`` if unavailable.
"""
return self._send_type
@property
def cycle_time(self):
"""The message cycle time, or ``None`` if unavailable.
"""
return self._cycle_time
@property
def dbc(self):
"""An object containing dbc specific properties like e.g. attributes.
"""
return self._dbc
@dbc.setter
def dbc(self, value):
self._dbc = value
@property
def bus_name(self):
"""The message bus name, or ``None`` if unavailable.
"""
return self._bus_name
@bus_name.setter
def bus_name(self, value):
self._bus_name = value
@property
def protocol(self):
"""The message protocol, or ``None`` if unavailable. Only one protocol
is currently supported; ``'j1939'``.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
self._protocol = value
@property
def signal_tree(self):
"""All signal names and multiplexer ids as a tree. Multiplexer signals
are dictionaries, while other signals are strings.
>>> foo = db.get_message_by_name('Foo')
>>> foo.signal_tree
['Bar', 'Fum']
>>> bar = db.get_message_by_name('Bar')
>>> bar.signal_tree
[{'A': {0: ['C', 'D'], 1: ['E']}}, 'B']
"""
return self._signal_tree
def _get_mux_number(self, decoded, signal_name):
mux = decoded[signal_name]
if isinstance(mux, str):
signal = self.get_signal_by_name(signal_name)
mux = signal.choice_string_to_number(mux)
return mux
def _check_signals_ranges_scaling(self, signals, data):
for signal in signals:
value = data[signal.name]
# Choices are checked later.
if isinstance(value, str):
continue
if signal.minimum is not None:
if value < signal.minimum:
raise EncodeError(
"Expected signal '{}' value greater than or equal to "
"{} in message '{}', but got {}.".format(signal.name,
signal.minimum,
self._name,
value))
if signal.maximum is not None:
if value > signal.maximum:
raise EncodeError(
"Expected signal '{}' value less than or equal to "
"{} in message '{}', but got {}.".format(signal.name,
signal.maximum,
self.name,
value))
def _check_signals(self, signals, data, scaling):
for signal in signals:
if signal.name not in data:
raise EncodeError(
"Expected signal value for '{}' in data, but got {}.".format(
signal.name,
data))
if scaling:
self._check_signals_ranges_scaling(signals, data)
def _encode(self, node, data, scaling, strict):
if strict:
self._check_signals(node['signals'], data, scaling)
encoded = encode_data(data,
node['signals'],
node['formats'],
scaling)
padding_mask = node['formats'].padding_mask
multiplexers = node['multiplexers']
for signal in multiplexers:
mux = self._get_mux_number(data, signal)
try:
node = multiplexers[signal][mux]
except KeyError:
raise EncodeError('expected multiplexer id {}, but got {}'.format(
format_or(multiplexers[signal]),
mux))
mux_encoded, mux_padding_mask = self._encode(node,
data,
scaling,
strict)
encoded |= mux_encoded
padding_mask &= mux_padding_mask
return encoded, padding_mask
def encode(self, data, scaling=True, padding=False, strict=True):
"""Encode given data as a message of this type.
If `scaling` is ``False`` no scaling of signals is performed.
If `padding` is ``True`` unused bits are encoded as 1.
If `strict` is ``True`` all signal values must be within their
allowed ranges, or an exception is raised.
>>> foo = db.get_message_by_name('Foo')
>>> foo.encode({'Bar': 1, 'Fum': 5.0})
b'\\x01\\x45\\x23\\x00\\x11'
"""
encoded, padding_mask = self._encode(self._codecs,
data,
scaling,
strict)
if padding:
encoded |= padding_mask
encoded |= (0x80 << (8 * self._length))
encoded = hex(encoded)[4:].rstrip('L')
return binascii.unhexlify(encoded)[:self._length]
def _decode(self, node, data, decode_choices, scaling):
decoded = decode_data(data,
node['signals'],
node['formats'],
decode_choices,
scaling)
multiplexers = node['multiplexers']
for signal in multiplexers:
mux = self._get_mux_number(decoded, signal)
try:
node = multiplexers[signal][mux]
except KeyError:
raise DecodeError('expected multiplexer id {}, but got {}'.format(
format_or(multiplexers[signal]),
mux))
decoded.update(self._decode(node,
data,
decode_choices,
scaling))
return decoded
def decode(self, data, decode_choices=True, scaling=True):
"""Decode given data as a message of this type.
If `decode_choices` is ``False`` scaled values are not
converted to choice strings (if available).
If `scaling` is ``False`` no scaling of signals is performed.
>>> foo = db.get_message_by_name('Foo')
>>> foo.decode(b'\\x01\\x45\\x23\\x00\\x11')
{'Bar': 1, 'Fum': 5.0}
"""
data = data[:self._length]
return self._decode(self._codecs, data, decode_choices, scaling)
def get_signal_by_name(self, name):
for signal in self._signals:
if signal.name == name:
return signal
raise KeyError(name)
def is_multiplexed(self):
"""Returns ``True`` if the message is multiplexed, otherwise
``False``.
>>> foo = db.get_message_by_name('Foo')
>>> foo.is_multiplexed()
False
>>> bar = db.get_message_by_name('Bar')
>>> bar.is_multiplexed()
True
"""
return bool(self._codecs['multiplexers'])
def _check_signal(self, message_bits, signal):
signal_bits = signal.length * [signal.name]
if signal.byte_order == 'big_endian':
padding = start_bit(signal) * [None]
signal_bits = padding + signal_bits
else:
signal_bits += signal.start * [None]
if len(signal_bits) < len(message_bits):
padding = (len(message_bits) - len(signal_bits)) * [None]
reversed_signal_bits = padding + signal_bits
else:
reversed_signal_bits = signal_bits
signal_bits = []
for i in range(0, len(reversed_signal_bits), 8):
signal_bits = reversed_signal_bits[i:i + 8] + signal_bits
# Check that the signal fits in the message.
if len(signal_bits) > len(message_bits):
raise Error(
'The signal {} does not fit in message {}.'.format(
signal.name,
self.name))
# Check that the signal does not overlap with other
# signals.
for offset, signal_bit in enumerate(signal_bits):
if signal_bit is not None:
if message_bits[offset] is not None:
raise Error(
'The signals {} and {} are overlapping in message {}.'.format(
signal.name,
message_bits[offset],
self.name))
message_bits[offset] = signal.name
def _check_mux(self, message_bits, mux):
signal_name, children = list(mux.items())[0]
self._check_signal(message_bits,
self.get_signal_by_name(signal_name))
children_message_bits = deepcopy(message_bits)
for multiplexer_id in sorted(children):
child_tree = children[multiplexer_id]
child_message_bits = deepcopy(children_message_bits)
self._check_signal_tree(child_message_bits, child_tree)
for i, child_bit in enumerate(child_message_bits):
if child_bit is not None:
message_bits[i] = child_bit
def _check_signal_tree(self, message_bits, signal_tree):
for signal_name in signal_tree:
if isinstance(signal_name, dict):
self._check_mux(message_bits, signal_name)
else:
self._check_signal(message_bits,
self.get_signal_by_name(signal_name))
def _check_signal_lengths(self):
for signal in self._signals:
if signal.length <= 0:
raise Error(
'The signal {} length {} is not greater than 0 in '
'message {}.'.format(
signal.name,
signal.length,
self.name))
def refresh(self, strict=None):
"""Refresh the internal message state.
If `strict` is ``True`` an exception is raised if any signals
are overlapping or if they don't fit in the message. This
argument overrides the value of the same argument passed to
the constructor.
"""
self._check_signal_lengths()
self._codecs = self._create_codec()
self._signal_tree = self._create_signal_tree(self._codecs)
if strict is None:
strict = self._strict
if strict:
message_bits = 8 * self.length * [None]
self._check_signal_tree(message_bits, self.signal_tree)
def __repr__(self):
return "message('{}', 0x{:x}, {}, {}, {})".format(
self._name,
self._frame_id,
self._is_extended_frame,
self._length,
self._comments)
| {
"repo_name": "eerimoq/cantools",
"path": "cantools/database/can/message.py",
"copies": "1",
"size": "18762",
"license": "mit",
"hash": 7071173031594177000,
"line_mean": 29.8078817734,
"line_max": 110,
"alpha_frac": 0.5115126319,
"autogenerated": false,
"ratio": 4.671812749003984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00030757399067008194,
"num_lines": 609
} |
"""ac_annotator URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from music_annotator.views import *
from annotator.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', index, name='index'),
url(r'^annotate/(?P<fsid>[^\/]+)/$', annotate, name='annotate-sound'),
url(r'^generate_annotations/', generate_annotations, name='generate-annotations'),
url(r'^taxonomy_table/', taxonomy_table, name='taxonomy-table'),
url(r'^hierarchy_path/', get_hierachy_paths, name='get_hierachy_paths'),
]
| {
"repo_name": "AudioCommons/ac-annotator",
"path": "ac_annotator/urls.py",
"copies": "1",
"size": "1258",
"license": "apache-2.0",
"hash": -1239458855955330600,
"line_mean": 38.3125,
"line_max": 86,
"alpha_frac": 0.700317965,
"autogenerated": false,
"ratio": 3.5041782729805013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47044962379805016,
"avg_score": null,
"num_lines": null
} |
# A CAN signal.
class Decimal(object):
"""Holds the same values as
:attr:`~cantools.database.can.Signal.scale`,
:attr:`~cantools.database.can.Signal.offset`,
:attr:`~cantools.database.can.Signal.minimum` and
:attr:`~cantools.database.can.Signal.maximum`, but as
``decimal.Decimal`` instead of ``int`` and ``float`` for higher
precision (no rounding errors).
"""
def __init__(self, scale=None, offset=None, minimum=None, maximum=None):
self._scale = scale
self._offset = offset
self._minimum = minimum
self._maximum = maximum
@property
def scale(self):
"""The scale factor of the signal value as ``decimal.Decimal``.
"""
return self._scale
@scale.setter
def scale(self, value):
self._scale = value
@property
def offset(self):
"""The offset of the signal value as ``decimal.Decimal``.
"""
return self._offset
@offset.setter
def offset(self, value):
self._offset = value
@property
def minimum(self):
"""The minimum value of the signal as ``decimal.Decimal``, or ``None``
if unavailable.
"""
return self._minimum
@minimum.setter
def minimum(self, value):
self._minimum = value
@property
def maximum(self):
"""The maximum value of the signal as ``decimal.Decimal``, or ``None``
if unavailable.
"""
return self._maximum
@maximum.setter
def maximum(self, value):
self._maximum = value
class Signal(object):
"""A CAN signal with position, size, unit and other information. A
signal is part of a message.
Signal bit numbering in a message:
.. code:: text
Byte: 0 1 2 3 4 5 6 7
+--------+--------+--------+--------+--------+--------+--------+--------+--- - -
| | | | | | | | |
+--------+--------+--------+--------+--------+--------+--------+--------+--- - -
Bit: 7 0 15 8 23 16 31 24 39 32 47 40 55 48 63 56
Big endian signal with start bit 2 and length 5 (0=LSB, 4=MSB):
.. code:: text
Byte: 0 1 2 3
+--------+--------+--------+--- - -
| |432|10| | |
+--------+--------+--------+--- - -
Bit: 7 0 15 8 23 16 31
Little endian signal with start bit 2 and length 9 (0=LSB, 8=MSB):
.. code:: text
Byte: 0 1 2 3
+--------+--------+--------+--- - -
|543210| | |876| |
+--------+--------+--------+--- - -
Bit: 7 0 15 8 23 16 31
"""
def __init__(self,
name,
start,
length,
byte_order='little_endian',
is_signed=False,
initial=None,
scale=1,
offset=0,
minimum=None,
maximum=None,
unit=None,
choices=None,
dbc_specifics=None,
comment=None,
receivers=None,
is_multiplexer=False,
multiplexer_ids=None,
multiplexer_signal=None,
is_float=False,
decimal=None,
spn=None):
self._name = name
self._start = start
self._length = length
self._byte_order = byte_order
self._is_signed = is_signed
self._initial = initial
self._scale = scale
self._offset = offset
self._minimum = minimum
self._maximum = maximum
self._decimal = Decimal() if decimal is None else decimal
self._unit = unit
self._choices = choices
self._dbc = dbc_specifics
# if the 'comment' argument is a string, we assume that is an
# english comment. this is slightly hacky because the
# function's behavior depends on the type of the passed
# argument, but it is quite convenient...
if isinstance(comment, str):
# use the first comment in the dictionary as "The" comment
self._comments = { None: comment }
else:
# assume that we have either no comment at all or a
# multi-lingual dictionary
self._comments = comment
self._receivers = [] if receivers is None else receivers
self._is_multiplexer = is_multiplexer
self._multiplexer_ids = multiplexer_ids
self._multiplexer_signal = multiplexer_signal
self._is_float = is_float
self._spn = spn
@property
def name(self):
"""The signal name as a string.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def start(self):
"""The start bit position of the signal within its message.
"""
return self._start
@start.setter
def start(self, value):
self._start = value
@property
def length(self):
"""The length of the signal in bits.
"""
return self._length
@length.setter
def length(self, value):
self._length = value
@property
def byte_order(self):
"""Signal byte order as ``'little_endian'`` or ``'big_endian'``.
"""
return self._byte_order
@byte_order.setter
def byte_order(self, value):
self._byte_order = value
@property
def is_signed(self):
"""``True`` if the signal is signed, ``False`` otherwise. Ignore this
attribute if :data:`~cantools.db.Signal.is_float` is
``True``.
"""
return self._is_signed
@is_signed.setter
def is_signed(self, value):
self._is_signed = value
@property
def is_float(self):
"""``True`` if the signal is a float, ``False`` otherwise.
"""
return self._is_float
@is_float.setter
def is_float(self, value):
self._is_float = value
@property
def initial(self):
"""The initial value of the signal, or ``None`` if unavailable.
"""
return self._initial
@initial.setter
def initial(self, value):
self._initial = value
@property
def scale(self):
"""The scale factor of the signal value.
"""
return self._scale
@scale.setter
def scale(self, value):
self._scale = value
@property
def offset(self):
"""The offset of the signal value.
"""
return self._offset
@offset.setter
def offset(self, value):
self._offset = value
@property
def minimum(self):
"""The minimum value of the signal, or ``None`` if unavailable.
"""
return self._minimum
@minimum.setter
def minimum(self, value):
self._minimum = value
@property
def maximum(self):
"""The maximum value of the signal, or ``None`` if unavailable.
"""
return self._maximum
@maximum.setter
def maximum(self, value):
self._maximum = value
@property
def decimal(self):
"""The high precision values of
:attr:`~cantools.database.can.Signal.scale`,
:attr:`~cantools.database.can.Signal.offset`,
:attr:`~cantools.database.can.Signal.minimum` and
:attr:`~cantools.database.can.Signal.maximum`.
See :class:`~cantools.database.can.signal.Decimal` for more
details.
"""
return self._decimal
@property
def unit(self):
"""The unit of the signal as a string, or ``None`` if unavailable.
"""
return self._unit
@unit.setter
def unit(self, value):
self._unit = value
@property
def choices(self):
"""A dictionary mapping signal values to enumerated choices, or
``None`` if unavailable.
"""
return self._choices
@property
def dbc(self):
"""An object containing dbc specific properties like e.g. attributes.
"""
return self._dbc
@dbc.setter
def dbc(self, value):
self._dbc = value
@property
def comment(self):
"""The signal comment, or ``None`` if unavailable.
Note that we implicitly try to return the comment's language
to be English comment if multiple languages were specified.
"""
if self._comments is None:
return None
elif self._comments.get(None) is not None:
return self._comments.get(None)
return self._comments.get('EN', None)
@property
def comments(self):
"""The dictionary with the descriptions of the signal in multiple
languages. ``None`` if unavailable.
"""
return self._comments
@comment.setter
def comment(self, value):
self._comments = { None: value }
@comments.setter
def comments(self, value):
self._comments = value
@property
def receivers(self):
"""A list of all receiver nodes of this signal.
"""
return self._receivers
@property
def is_multiplexer(self):
"""``True`` if this is the multiplexer signal in a message, ``False``
otherwise.
"""
return self._is_multiplexer
@is_multiplexer.setter
def is_multiplexer(self, value):
self._is_multiplexer = value
@property
def multiplexer_ids(self):
"""The multiplexer ids list if the signal is part of a multiplexed
message, ``None`` otherwise.
"""
return self._multiplexer_ids
@multiplexer_ids.setter
def multiplexer_ids(self, value):
self._multiplexer_ids = value
@property
def multiplexer_signal(self):
"""The multiplexer signal if the signal is part of a multiplexed
message, ``None`` otherwise.
"""
return self._multiplexer_signal
@multiplexer_signal.setter
def multiplexer_signal(self, value):
self._multiplexer_signal = value
@property
def spn(self):
"""The J1939 Suspect Parameter Number (SPN) value if the signal
has this attribute, ``None`` otherwise.
"""
return self._spn
@spn.setter
def spn(self, value):
self._spn = value
def choice_string_to_number(self, string):
for choice_number, choice_string in self.choices.items():
if choice_string == string:
return choice_number
def __repr__(self):
if self._choices is None:
choices = None
else:
choices = '{{{}}}'.format(', '.join(
["{}: '{}'".format(value, text)
for value, text in self._choices.items()]))
return "signal('{}', {}, {}, '{}', {}, {}, {}, {}, {}, {}, '{}', {}, {}, {}, {}, {})".format(
self._name,
self._start,
self._length,
self._byte_order,
self._is_signed,
self._initial,
self._scale,
self._offset,
self._minimum,
self._maximum,
self._unit,
self._is_multiplexer,
self._multiplexer_ids,
choices,
self._spn,
self._comments)
| {
"repo_name": "eerimoq/cantools",
"path": "cantools/database/can/signal.py",
"copies": "1",
"size": "11610",
"license": "mit",
"hash": 6895642284732940000,
"line_mean": 23.9677419355,
"line_max": 101,
"alpha_frac": 0.5148148148,
"autogenerated": false,
"ratio": 4.371234939759036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010065892359029831,
"num_lines": 465
} |
# A Canvas fork to include dkim signing.
from django.core.mail.backends.base import BaseEmailBackend
from django.conf import settings
import dkim
import threading
from boto.ses import SESConnection
__version__ = '0.1'
__author__ = 'Harry Marr'
class SESBackend(BaseEmailBackend):
"""A Django Email backend that uses Amazon's Simple Email Service.
"""
def __init__(self, fail_silently=False, *args, **kwargs):
super(SESBackend, self).__init__(fail_silently=fail_silently, *args,
**kwargs)
self._access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
self._access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
self._api_endpoint = getattr(settings, 'AWS_SES_API_HOST',
SESConnection.DefaultHost)
self.connection = None
self._lock = threading.RLock()
def open(self):
"""Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return False
try:
self.connection = SESConnection(
aws_access_key_id=self._access_key_id,
aws_secret_access_key=self._access_key,
host=self._api_endpoint,
)
except:
if not self.fail_silently:
raise
def close(self):
"""Close any open HTTP connections to the API server.
"""
try:
self.connection.close()
self.connection = None
except:
if not self.fail_silently:
raise
def dkim_sign(self, message):
# Courtesy of http://djangosnippets.org/snippets/1995/
raw_message = message.message().as_string()
if settings.DKIM_PRIVATE_KEY:
included_headers = ["Content-Type", "MIME-Version", "Content-Transfer-Encoding", "Subject", "From", "To"]
dkim_header = dkim.sign(raw_message, settings.DKIM_SELECTOR, settings.DKIM_DOMAIN, settings.DKIM_PRIVATE_KEY, include_headers=included_headers)
raw_message = dkim_header + raw_message
return raw_message
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
self._lock.acquire()
try:
new_conn_created = self.open()
if not self.connection:
# Failed silently
return
num_sent = 0
for message in email_messages:
raw_message = self.dkim_sign(message)
try:
self.connection.send_raw_email(
source=message.from_email,
destinations=message.recipients(),
raw_message=raw_message,
)
num_sent += 1
except SESConnection.ResponseError:
if not self.fail_silently:
raise
pass
if new_conn_created:
self.close()
finally:
self._lock.release()
return num_sent
| {
"repo_name": "canvasnetworks/canvas",
"path": "common/django_ses/__init__.py",
"copies": "2",
"size": "3303",
"license": "bsd-3-clause",
"hash": -4958418020549362000,
"line_mean": 31.067961165,
"line_max": 155,
"alpha_frac": 0.5464729034,
"autogenerated": false,
"ratio": 4.3981358189081226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011066843035742163,
"num_lines": 103
} |
'''A capitalization action for the Manipulate plugin for Coda'''
import cp_actions as cp
from titlecase import titlecase
from sentencecase import sentencecase
def act(controller, bundle, options):
'''
Required action method
Set desired capitalization with 'to_case' option
'''
context = cp.get_context(controller)
to_case = cp.get_option(options, 'to_case', 'upper').lower()
line_ending = cp.get_line_ending(context)
text, range = cp.selection_and_range(context)
# if nothing is selected, assume we're talking about the line.
if range.length == 0:
text, range = cp.lines_and_range(context)
# we really only want most of this... lines_and_range returns a newline char at the end
if text.endswith(line_ending):
range = cp.new_range(range.location, range.length - len(line_ending))
text = cp.get_selection(context, range)
if to_case == 'upper':
text = text.upper()
elif to_case == 'lower':
text = text.lower()
elif to_case == 'title':
text = line_ending.join([titlecase(x) for x in text.split(line_ending)])
elif to_case == 'sentence':
text = sentencecase(text)
elif to_case == 'invert':
text = text.swapcase()
else:
return
# insert and select the resulting insertion
cp.insert_text_and_select(context, text, range, cp.new_range(range.location, len(text))) | {
"repo_name": "bobthecow/ManipulateCoda",
"path": "src/Support/Scripts/Capitalize.py",
"copies": "1",
"size": "1462",
"license": "mit",
"hash": -5602839621924567000,
"line_mean": 33.023255814,
"line_max": 95,
"alpha_frac": 0.633378933,
"autogenerated": false,
"ratio": 3.8072916666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9778563216102736,
"avg_score": 0.03242147671278628,
"num_lines": 43
} |
# Acars data access
# Thanks to Rich Mamrosh @ NOAA for pointing the availability of this data out to me
import re
import netCDF4
import gzip
import io
import tempfile
import os
import numpy as np
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from datetime import datetime
from pymeteo import dynamics, thermo
try:
# For Python 3.0 and later
from urllib import request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as request
data_url = "https://madis-data.ncep.noaa.gov/madisPublic1/data/point/acars/netcdf/"
airport_ids = {}
def getAvailableDatasets():
# crawl links at https://madis-data.ncep.noaa.gov/madisPublic1/data/point/acars/netcdf/
req = request.Request(data_url)
linkMatcher = re.compile(r"\"([0-9_]+\.gz)\"")
try:
print("[+] Fetching list of resources available")
with request.urlopen(req) as f:
print("[-] Parsing list")
data = str(f.read())
sets = linkMatcher.findall(data)
return sets
except:
print("error")
def getDataSet(set):
req = request.Request(data_url + set)
try:
print("[+] Fetching dataset {0}".format(set))
with request.urlopen(req) as f:
compressedData = io.BytesIO(f.read()) # gzipped data
print("[-] Decompressing response data")
data = gzip.GzipFile(fileobj=compressedData)
return data
except:
print("error")
def getAirportByCode(airport_id):
print("[+] Looking up airport id '{0}'".format(airport_id))
datfile = os.path.join(os.path.dirname(__file__), "airport_info.dat")
if not bool(airport_ids):
with open(datfile, "r") as f:
for _, line in enumerate(f):
fields = line.strip().split()
airport_ids[int(fields[0])] = fields[1]
return airport_ids[airport_id]
def processDataSet(data):
print("[+] Writing data into temporary file")
tdata = tempfile.NamedTemporaryFile()
tdata.write(data.read())
print("[-] Data written to {0}".format(tdata.name))
print("[+] Opening data as NetCDF")
d = data.read()
with netCDF4.Dataset(tdata.name, mode='r') as nc:
print("[-] Dataset open with")
_z = nc["altitude"][:]
_T = nc["temperature"][:]
_qv = nc["waterVaporMR"][:]
windSpeed = nc["windSpeed"][:]
windDir = nc["windDir"][:]
_lon = nc["longitude"][:]
_lat = nc["latitude"][:]
flag = nc["sounding_flag"][:]
_airport = nc["sounding_airport_id"][:]
time = nc["soundingSecs"][:]
print ("[-] {0} Records".format(len(_z)))
#conversions
_p = thermo.p_from_pressure_altitude(_z, _T)
_u, _v = dynamics.wind_deg_to_uv(windDir, windSpeed)
_th = thermo.theta(_T, _p)
# split the arrays when the flag changes sign
splits = np.where(np.diff(time))[0]+1
_z = np.split(_z, splits)
_p = np.split(_p, splits)
_th = np.split(_th, splits)
_qv = np.split(_qv, splits)
_u = np.split(_u, splits)
_v = np.split(_v, splits)
_lat = np.split(_lat, splits)
_lon = np.split(_lon, splits)
_airport = np.split(_airport, splits)
time = np.split(time, splits)
flag = np.split(flag, splits)
print("[-] Found {0} profiles".format(len(_z)))
#re-shape data
outputData = []
for i in range(len(_z)):
ts = time[i].compressed()
if len(ts) == 0:
# profiles without timestamps invalid?
continue
profileDir = flag[i][0]
if (profileDir == 0):
continue
z = _z[i].filled()
p = _p[i].filled()
th = _th[i].filled()
qv = _qv[i].filled()
u = _u[i].filled()
v = _v[i].filled()
lat = _lat[i].filled()
lon = _lon[i].filled()
airport = getAirportByCode(_airport[i][0])
profileData = {
"i": i,
"n": len(z),
"z": z if profileDir > 0 else z[::-1],
"p": p if profileDir > 0 else p[::-1],
"th": th if profileDir > 0 else th[::-1],
"qv": qv if profileDir > 0 else qv[::-1],
"u": u if profileDir > 0 else u[::-1],
"v": v if profileDir > 0 else v[::-1],
"lat": lat if profileDir > 0 else lat[::-1],
"lon": lon if profileDir > 0 else lon[::-1],
"airport": airport,
"time": datetime.utcfromtimestamp(ts.mean()).strftime("%H%MZ"),
"flag": profileDir
}
outputData.append(profileData)
return outputData
| {
"repo_name": "cwebster2/pyMeteo",
"path": "pymeteo/data/acars.py",
"copies": "1",
"size": "4859",
"license": "bsd-3-clause",
"hash": 3369926280863791600,
"line_mean": 32.7430555556,
"line_max": 91,
"alpha_frac": 0.5361185429,
"autogenerated": false,
"ratio": 3.5415451895043732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45776637324043734,
"avg_score": null,
"num_lines": null
} |
"""A caseless dictionary implementation."""
try:
from collections import MutableMapping
except ImportError:
from intessa.vendor.abcoll import MutableMapping
class CaselessDictionary(MutableMapping):
"""
A dictionary-like object which ignores but preserves the case of strings.
Example::
>>> cdict = CaselessDictionary()
Access is case-insensitive::
>>> cdict['a'] = 1
>>> cdict['A']
1
As is writing::
>>> cdict['key'] = 123
>>> cdict['KeY'] = 456
>>> cdict['key']
456
And deletion::
>>> del cdict['A']
>>> 'a' in cdict
False
>>> 'A' in cdict
False
However, the case of keys is preserved (the case of overridden keys will be
the first one which was set)::
>>> cdict['aBcDeF'] = 1
>>> sorted(list(cdict))
['aBcDeF', 'key']
"""
def __init__(self, *args, **kwargs):
self._dict = {}
temp_dict = dict(*args, **kwargs)
for key, value in temp_dict.iteritems():
if isinstance(key, basestring):
key = CaselessString.make_caseless(key)
self._dict[key] = value
def __repr__(self):
return '<CaselessDictionary(%r)>' % self._dict
def __getitem__(self, key):
return self._dict[CaselessString.make_caseless(key)]
def __setitem__(self, key, value):
self._dict[CaselessString.make_caseless(key)] = value
def __delitem__(self, key):
del self._dict[CaselessString.make_caseless(key)]
def __contains__(self, key):
return CaselessString.make_caseless(key) in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
class CaselessString(object):
"""A mixin to make a string subclass case-insensitive in dict lookups."""
def __hash__(self):
return hash(self.lower())
def __eq__(self, other):
return self.lower() == other.lower()
def __cmp__(self, other):
return self.lower().__cmp__(other.lower())
@classmethod
def make_caseless(cls, string):
if isinstance(string, unicode):
return CaselessUnicode(string)
return CaselessStr(string)
class CaselessStr(CaselessString, str):
pass
class CaselessUnicode(CaselessString, unicode):
pass
| {
"repo_name": "pombredanne/intessa",
"path": "lib/intessa/utils/caseless_dict.py",
"copies": "1",
"size": "2389",
"license": "unlicense",
"hash": -1534013194152289800,
"line_mean": 22.4215686275,
"line_max": 79,
"alpha_frac": 0.5801590624,
"autogenerated": false,
"ratio": 3.955298013245033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 102
} |
# A catering company has hired you to help with organizing and preparing
# customer's orders. You are given a list of each customer's desired items, and
# must write a program that will count the number of each items needed for the
# chefs to prepare. The items that a customer can order are: salad, hamburger,
# and water.
#
# Write a function called item_order that takes as input a string named order.
# The string contains only words for the items the customer can order separated
# by one space. The function returns a string that counts the number of each
# item and consolidates them in the following order: salad:[# salad]
# hamburger:[# hambruger] water:[# water]
#
# If an order does not contain an item, then the count for that item is 0.
# Notice that each item is formatted as
# [name of the item][a colon symbol][count of the item] and all item groups are
# separated by a space.
#
# For example:
#
# If order = "salad water hamburger salad hamburger" then the function returns
# "salad:2 hamburger:2 water:1"
#
# If order = "hamburger water hamburger" then the function returns
# "salad:0 hamburger:2 water:1"
def item_order(order):
numSalad = order.count('salad')
numHamburger = order.count('hamburger')
numWater = order.count('water')
return 'salad:%(salad)-d hamburger:%(hamburger)-d water:%(water)-d' % \
{ 'salad': numSalad, 'hamburger': numHamburger, 'water': numWater } | {
"repo_name": "emyarod/OSS",
"path": "1_intro/6.00.1x/Week 2/Problem Set 1/Problem 3 - Counting and Grouping.py",
"copies": "1",
"size": "1417",
"license": "mit",
"hash": 7652619145319307000,
"line_mean": 43.3125,
"line_max": 79,
"alpha_frac": 0.7297106563,
"autogenerated": false,
"ratio": 3.3419811320754715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9565441788375472,
"avg_score": 0.00125,
"num_lines": 32
} |
"""accelerations for LCP algorithms"""
import numpy as np
import math
from itertools import izip
def crossing(x, y):
return ( (-xi / (yi - xi)) if (xi - yi != 0) else 1 for xi, yi in izip(x, y) )
def max_step(x, y):
"""maximum step from x to y remaining positive"""
alpha = 1.0
index = 0
for i, (xi, yi) in enumerate(zip(x, y)):
if xi == yi: continue
value = -xi / (yi - xi)
if alpha > value > 0:
alpha = value
index = i
return index, alpha
# func = lambda (ix, x), (iy, y): (iy, y) if (y > 0 and y < x) else (ix, x)
# return reduce(func, enumerate(crossing(x, y)), (0, 1.0) )
def norm(x): return math.sqrt(x.dot(x))
def cr_pjacobi( x, (M, q), **kwargs ):
"""CR-PJacobi"""
n = q.size
z = np.zeros(n)
r = np.zeros(n)
p = np.zeros(n)
Mx = np.zeros(n)
Mz = np.zeros(n)
Mp = np.zeros(n)
mask = np.zeros(n)
y = np.zeros(n)
d = kwargs.get('diag', np.array(np.diag(M)) )
omega = kwargs.get('omega', 2.0 / n)
old_x = np.zeros(n)
old_Mx = np.zeros(n)
zMz = 1
beta = 1
k = 0
Mx[:] = M.dot(x)
def restart():
# print k, 'restart'
return np.zeros(n), np.zeros(n)
while True:
# backup
old_x[:] = x
old_Mx[:] = Mx
# jacobi
x -= omega * (q + Mx) / d
mask = x > 0
# projection
x[:] = mask * x
yield
# active-set change during jacobi ?
if any( mask != (old_x > 0) ):
p[:], Mp[:] = restart()
Mx[:] = M.dot(x)
else:
# preconditioned gradient
z[:] = (x - old_x) / omega
# this is the only matrix-vector product. note: it is
# *much* more numerically stable to compute Mx from Mz
# than the contrary
Mz[:] = M.dot(z)
Mx[:] += omega * Mz
old_zMz = zMz
zMz = z.dot( Mz )
if zMz <= 0:
break
beta = zMz / old_zMz
p[:] = beta * p + z
Mp[:] = beta * Mp + Mz
y[:] = mask * Mp / d
alpha = zMz / Mp.dot(y)
y[:] = old_x + alpha * p
index, step = max_step(x, y)
x[:] += step * (y - x)
Mx[:] += step * (alpha * Mp - omega * Mz)
# # sanity check:
# y[:] = M.dot(x)
# print norm(y - Mx)
if step < 1:
p[:], Mp[:] = restart()
# p[:], Mp[:] = z, Mz
# print k, 'step', step
k += 1
print 'breakdown, zMz:', zMz
def cg_pjacobi( x, (M, q), **kwargs ):
"""CG-PJacobi"""
n = q.size
z = np.zeros(n)
r = np.zeros(n)
p = np.zeros(n)
Mx = np.zeros(n)
Mz = np.zeros(n)
Mp = np.zeros(n)
mask = np.zeros(n)
y = np.zeros(n)
d = kwargs.get('diag', np.array(np.diag(M)) )
if 'diag' in kwargs:
print 'using mass-splitting prec'
omega = kwargs.get('omega', 2.0 / n)
old_x = np.zeros(n)
old_Mx = np.zeros(n)
zr = 1
beta = 1
k = 0
Mx[:] = M.dot(x)
def restart():
# print k, 'restart'
return np.zeros(n), np.zeros(n)
while True:
# backup
old_x[:] = x
old_Mx[:] = Mx
# jacobi
x -= omega * (q + Mx) / d
mask = x > 0
# projection
x[:] = mask * x
yield
# active-set change during jacobi ?
if any( mask != (old_x > 0) ):
p[:], Mp[:] = restart()
Mx[:] = M.dot(x)
else:
# preconditioned gradient
z[:] = (x - old_x) / omega
# gradient
r[:] = d * z
# this is the only matrix-vector product. note: it is
# *much* more numerically stable to compute Mx from Mz
# than the contrary
Mz[:] = M.dot(z)
Mx[:] += omega * Mz
old_zr = zr
zr = z.dot(r)
if zr <= 0: break
# reorthogonalize yo
# bob = d * r
# p -= (p.dot( bob ) / (r.dot(bob) )) * r
beta = zr / old_zr
p[:] = beta * p + z
Mp[:] = beta * Mp + Mz
# Mp[:] = M.dot(p)
alpha = zr / p.dot(Mp)
y[:] = old_x + alpha * p
index, step = max_step(x, y)
x[:] += step * (y - x)
Mx[:] += step * (alpha * Mp - omega * Mz)
# # sanity check:
# y[:] = M.dot(x)
# print norm(y - Mx)
if step < 1:
p[:], Mp[:] = restart()
pass
# print k, 'step', step
k += 1
print 'breakdown, zr:', zr
def doubidou( x, (M, q), **kwargs ):
"""doubidou"""
n = q.size
z = np.zeros(n)
r = np.zeros(n)
p = np.zeros(n)
Mx = np.zeros(n)
Mz = np.zeros(n)
Mp = np.zeros(n)
mask = np.zeros(n)
y = np.zeros(n)
d = kwargs.get('diag', np.diag(M) )
omega = kwargs.get('omega', 2.0 / n)
old_x = np.zeros(n)
old_Mx = np.zeros(n)
zMz = 1
k = 0
Mx[:] = M.dot(x)
def restart():
# print k, 'restart'
return np.zeros(n), np.zeros(n)
while zMz > 0:
# backup
old_x[:] = x
old_Mx[:] = Mx
# jacobi
x -= omega * (q + Mx) / d
mask = x > 0
# projection
x[:] = mask * x
yield
# preconditioned gradient
z[:] = (x - old_x) / omega
# this is the only matrix-vector product. note: it is
# *much* more numerically stable to compute Mx from Mz
# than the contrary
Mz[:] = M.dot(z)
Mx[:] += omega * Mz
old_zMz = zMz
zMz = z.dot( Mz )
# zMz = z.dot( d * z)
if zMz <= 0: break
beta = zMz / old_zMz
if beta > 1:
p[:], Mp[:] = restart()
else:
p[:] = beta * p + z
Mp[:] = beta * Mp + Mz
x[:] = old_x + omega * p
Mx[:] = old_Mx + omega * Mp
k += 1
def nlnscg_ls( solver, **kwargs ):
def res(x, (M, q)):
n = q.size
z = np.zeros(n)
p = np.zeros(n)
old = np.zeros(n)
diag = kwargs.get('diag', np.diag(M))
z2 = 1
sub = solver(x, (M, q), **kwargs)
old[:] = x
for error in sub:
yield error
z[:] = (x - old)
old_z2 = z2
z2 = z.dot(diag * z)
if z2 == 0: break
beta = z2 / old_z2
if beta > 1:
p[:] = np.zeros(n)
else:
p[:] = beta * p + z
x[:] = old + p
f_old = (old > 0) * (M.dot(old) + q)
f_new = (x > 0) * (M.dot(x) + q)
df = (f_new - f_old)
eps = 0
a = (eps - f_old.dot(df)) / (eps + df.dot(df))
x[:] = old + a * p
old[:] = x
res.__doc__ = "NLNSCG + {} ".format( solver.__doc__ )
return res
def nlnscg( solver, **kwargs ):
def res(x, (M, q)):
n = q.size
z = np.zeros(n)
p = np.zeros(n)
old = np.zeros(n)
d = kwargs.get('diag', np.ones(n))
z2 = 1
sub = solver(x, (M, q), **kwargs)
old[:] = x
for error in sub:
z[:] = (x - old)
old_z2 = z2
z2 = z.dot( d * z )
if z2 == 0: break
beta = z2 / old_z2
if beta > 1:
p[:] = np.zeros(n)
else:
p[:] = beta * p + z
x[:] = old + p
old[:] = x
yield
res.__doc__ = "NLNSCG + {} ".format( solver.__doc__ )
return res
def andy( x, (M, q), **kwargs):
"""andy"""
n = q.size
m = 2
old_x = np.zeros(n)
g = np.zeros(n)
old_g = np.zeros(n)
Mg = np.zeros(n)
old_Mg = np.zeros(n)
Mx = np.zeros(n)
old_Mx = np.zeros(n)
f = np.zeros(n)
old_f = np.zeros(n)
df = np.zeros(n)
d = kwargs.get('diag', np.diag(M))
omega = kwargs.get('omega', 2.0 / n)
eps = kwargs.get('eps', 0.0)
Mx[:] = M.dot(x)
while True:
# jacobi
x[:] -= omega * (q + Mx) / d
# projection
x *= (x > 0)
yield
# backup jacobi point
old_g[:] = g
g[:] = x
Mx[:] = M.dot(x)
old_Mg[:] = Mg
Mg[:] = Mx
# anderson
old_f[:] = f
f[:] = (x > 0) * (q + Mx)
# f[:] = np.minimum(x, Mx + q)
df[:] = f - old_f
df2 = df.dot(df)
# if df2 == 0: break
alpha = 1 if df2 == 0 else (eps - old_f.dot(df)) / (eps + df2)
x[:] = old_g + alpha * ( g - old_g )
Mx[:] = old_Mg + alpha * (Mg - old_Mg)
def anderson( solver, **kwargs ):
m = kwargs.get('m', 1.0 )
def res(x, (M, q)):
n = q.size
sub = solver(x, (M, q), **kwargs)
g = np.zeros( (n, m) )
f = np.zeros( (n, m) )
k = np.zeros( (m, m) )
tmp = np.zeros(m)
ones = np.ones(m)
delta = np.zeros(n)
d = kwargs.get('metric', np.ones(n) )
reset = kwargs.get('reset', False)
omega = kwargs.get('omega', 1.0)
diag = np.diag(M)
index = 0
g[:, m - 1] = x
flag = False
skip = False
diag_sqrt = np.sqrt( diag )
zob2 = 1
it = 0
for error in sub:
yield error
prev = (index + m - 1) % m
x_prev = g[:, prev]
primal = (M.dot(x) + q)
# mask = (x > 0) * (primal < 0)
mask = (x > 0)
# mask = primal < 0
# mask = (primal < 0) * (x > 0)
delta[:] = mask * primal
# print 0 + mask
# delta[:] = np.sqrt( np.abs(x * primal * diag) )
# delta[:] = x
# delta[:] = np.minimum(x * d, primal)
old_zob2 = zob2
zob = x - g[:, prev]
zob = delta
# delta = mask * zob
zob2 = zob.dot( zob / d )
beta = zob2 / old_zob2
# delta[:] = zob
# cond = r.dot(M.dot(r)) > r_prev.dot(M.dot(r_prev))
# cond = zob.dot(zob) > f[:, prev].dot(f[:, prev])
# cond = flag or delta.dot(d * delta) >= f[:, prev].dot(d * f[:, prev])
cond = any( (x_prev > 0) != (x > 0) )
# cond = beta > 1
if reset and cond:
g = np.zeros( (n, m) )
f = np.zeros( (n, m) )
k = np.zeros( (m, m) )
flag = False
skip = True
print it, 'restart'
g[:, index] = x
f[:, index] = delta
k[:, index] = f.transpose().dot( f[:, index] / d )
k[index, :] = k[:, index].transpose()
k[:, :] = f.transpose().dot(f)
eps = 0 # 1e-8
k[index, index] += eps
rhs = np.copy( ones )
rhs[index] += eps
tmp[:] = np.linalg.lstsq(k, ones)[0]
tmp /= sum(tmp)
delta[:] = g.dot(tmp)
if reset and not skip:
_, a = max_step(x, delta)
x[:] += a * (delta - x)
skip = False
# flag = any( (delta >= 0) != (x >= 0) )
if a < 1: flag = True
else:
x[:] = delta
# x[:] = delta
index = (index + 1) % m
it += 1
res.__doc__ = "anderson{}({}) + {}".format('*' if 'metric' in kwargs else '',
m,
solver.__doc__)
return res
def bokhoven(x, (M, q), **kwargs):
'''bokhoven'''
diag = np.diag(M)
prec = kwargs.get('prec', diag)
EpM = np.diag(prec) + M
EpMinv = np.linalg.inv(EpM)
n = q.size
z = np.zeros(n)
while True:
zabs = np.abs(z)
z[:] = -zabs + EpMinv.dot(2 * prec * zabs - q)
x[:] = z + np.abs(z)
yield
def bokhoven_gs(x, (M, q), **kwargs):
'''bokhoven gs'''
n = q.size
diag = np.diag(M)
prec = kwargs.get('prec', diag)
# prec = np.ones(n)
EpM = np.diag(prec) + M
EpMinv = np.linalg.inv(EpM)
z = np.zeros(n)
zabs = np.abs(z)
while True:
for i in range(n):
# z[i] = -zabs[i] + EpMinv[i, :].dot(2 * prec * zabs - q)
rhs = EpMinv[i, :].dot(2 * prec * zabs - q) - (2 * EpMinv[i, i] * prec[i] * zabs[i])
factor = lambda s: 1.0 + s * (1.0 - 2.0 * EpMinv[i, i] * prec[i])
fp = factor(1.0)
fm = factor(-1.0)
assert fp > 0
assert fm > 0
zp = rhs / fp
zm = rhs / fm
zp_ok = zp > 0
zm_ok = zm < 0
if zp_ok and zm_ok:
print('both')
if not zm_ok and zm_ok:
print('none')
if zp_ok: z[i] = zp
elif zm_ok: z[i] = zm
else: z[i] = 0
zabs[i] = abs(z[i])
x[:] = z + zabs
yield
def bokhoven_chol(x, (M, q), **kwargs):
'''bokhoven chol'''
n = q.size
diag = np.diag(M)
prec = kwargs.get('prec', diag)
EpM = np.diag(prec) + M
L = np.linalg.cholesky(EpM)
z = np.zeros(n)
zabs = np.abs(z)
Linv = np.linalg.inv(L)
# u = Linv.dot(2.0 * prec * zabs - q)
u = np.zeros(n)
while True:
# u = Linv.dot(2.0 * prec * zabs - q)
for i in range(n):
u[i] = ((2.0 * prec[i] * zabs[i] - q[i]) - L[i, :i].dot(u[:i])) / L[i, i]
# print(np.linalg.norm(L.dot(u) - (2 * prec * zabs - q)))
for j in range(n):
i = n - 1 - j
# u = Linv.dot(2 * prec * zabs - q)
u[i] += ((2.0 * prec[i] * zabs[i] - q[i]) - L[i, :].dot(u)) / L[i, i]
# z[i] = Linv.T[i, :].dot(u) - zabs[i]
# z[i] += (u[i] - L.T[i, :].dot(zabs + z)) / L[i, i]
z[i] = -zabs[i] + (u[i] - L.T[i, i+1:].dot(z[i+1:] + zabs[i+1:])) / L[i, i]
zabs[i] = abs(z[i])
x[:] = z + zabs
yield
| {
"repo_name": "maxime-tournier/lcpy",
"path": "accel.py",
"copies": "1",
"size": "15125",
"license": "mit",
"hash": -6596934823762542000,
"line_mean": 20.6690544413,
"line_max": 96,
"alpha_frac": 0.368661157,
"autogenerated": false,
"ratio": 3.17885666246322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40475178194632194,
"avg_score": null,
"num_lines": null
} |
# acceleration sine sweep signal generation
import matplotlib.pyplot as plt
from math import sin, cos, pi
# acc_sweep: generate constant magnitude acceleration sine sweep signal
# ---------------------------------------------------------------------
# step - signal time step
# stop - duration of the signal
# lofq - low frequency for the sweep
# hifq - high frequency for the sweep
# amag - acceleration magnitude
# acc_plot - path to acceleration signal plot (time-acc)
# vel_plot - path to velocity signal plot (time-velo)
# dsp_plot - path to displacement signal plot (time-disp)
# dsp_envelope - path to displacement signal envelop (frequency-disp)
# -------------------------------------------------------------------------------
# returned: (vt, vd, vv, va), where
# - vt is a list of time instants
# - vd is a list of displacement values
# - vv is a list of velocity values
# - va is a list of acceleration values, at those time instants
# -------------------------------------------------------------------------------
def acc_sweep (step, stop, lofq, hifq, amag, acc_plot = None, vel_plot = None, dsp_plot = None, dsp_envelope = None):
t = 0.0
v = 0.0
va = []
vv = []
vf = []
extend = 0.0
while t < stop+extend:
x = t + step/2. # mid-step time
a = amag * sin (2*pi*(lofq+(hifq-lofq)*x/stop)*x) # mid-step acceleration
v = v + a * step # mid-step integration of dv / dt = a into v
va.append (a)
vv.append (v)
vf.append (lofq + (hifq-lofq)*(t/stop))
if extend == 0.0 and len(vv) > 2 and vv[-1] < vv[-2]: extend = t
t += step
# find stabilized velocity level
# by avaraging the last 5 minima and maxima
imax = 0.0
vmax = 0.0
imin = 0.0
vmin = 0.0
i = len(vv)-2
while i > 0:
if vv[i-1] < vv[i] and vv[i] > vv[i+1]:
imax = imax + 1.0
vmax = vmax + vv[i]
if vv[i-1] > vv[i] and vv[i] < vv[i+1]:
imin = imin + 1.0
vmin = vmin + vv[i]
if imax == 5.0 and imin == 5.0: break
i = i - 1
vlevel = 0.1*(vmax+vmin)
# find when this level is crossed from the start
i = 0
while vv[i] < vlevel: i = i + 1
# trim histories to this moment
while i > 0:
va.pop(0)
vv.pop(0)
vf.pop(0)
i -= 1
# now produce displacement and time history
vt = []
vd = []
d = 0.0
t = 0.0
for v in vv:
vt.append (t)
vd.append (d)
t = t + step
d = d + v * step # integration of dd / dt = v
# displacement has positive drift => find tangens of the positive drift angle
i = len(vd)-1
while vd[i-1] > vd[i]: i -= 1 # first maximum
while vd[i-1] < vd[i]: i -= 1 # previous minimum
j = i
while vd[j-1] > vd[i]: j += 1 # previous maximum
# shift velocity down by the tangens of the drift angle
vshift = (vd[i]+vd[j]) / (vt[i]+vt[j])
for i in range (0, len(vv)): vv[i] -= vshift
# after velocity has been shifted down, produce displacement envelope
vd = []
d = 0.0
for v in vv:
d = d + v * step # integration of dd / dt = v
vd.append (d)
if acc_plot != None:
plt.clf ()
plt.plot (vt, va)
plt.xlim ((vt[0], vt[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('acceleration $(m/s^2)$')
plt.savefig (acc_plot)
if vel_plot != None:
plt.clf ()
plt.plot (vt, vv)
plt.xlim ((vt[0], vt[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('velocity $(m/s)$')
plt.savefig (vel_plot)
if dsp_plot != None:
plt.clf ()
plt.plot (vt, vd)
plt.xlim ((vt[0], vt[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('displacement $(m)$')
plt.savefig (dsp_plot)
if dsp_envelope != None:
plt.clf ()
plt.plot (vf, vd)
plt.xlim ((vf[0], vf[-1]))
plt.xlabel ('frequency $(Hz)$')
plt.ylabel ('displacement $(m)$')
plt.savefig (dsp_envelope)
return (vt, vd, vv, va)
| {
"repo_name": "tkoziara/parmec",
"path": "python/acc_sweep.py",
"copies": "1",
"size": "3817",
"license": "mit",
"hash": 3254415513083997000,
"line_mean": 28.1374045802,
"line_max": 117,
"alpha_frac": 0.5459785172,
"autogenerated": false,
"ratio": 2.9226646248085757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8778831644117887,
"avg_score": 0.03796229957813763,
"num_lines": 131
} |
"""Accelerator class."""
import numpy as _np
import mathphys as _mp
import trackcpp as _trackcpp
from . import elements as _elements
from .utils import interactive as _interactive
class AcceleratorException(Exception):
"""."""
@_interactive
class Accelerator(object):
"""."""
__isfrozen = False # this is used to prevent creation of new attributes
def __init__(self, **kwargs):
"""."""
self.trackcpp_acc = self._init_accelerator(kwargs)
self._init_lattice(kwargs)
if 'energy' in kwargs:
self.trackcpp_acc.energy = kwargs['energy']
if 'harmonic_number' in kwargs:
self.trackcpp_acc.harmonic_number = kwargs['harmonic_number']
if 'radiation_on' in kwargs:
self.trackcpp_acc.radiation_on = kwargs['radiation_on']
if 'cavity_on' in kwargs:
self.trackcpp_acc.cavity_on = kwargs['cavity_on']
if 'vchamber_on' in kwargs:
self.trackcpp_acc.vchamber_on = kwargs['vchamber_on']
if self.trackcpp_acc.energy == 0:
self._brho, self._velocity, self._beta, self._gamma, \
self.trackcpp_acc.energy = \
_mp.beam_optics.beam_rigidity(gamma=1.0)
else:
self._brho, self._velocity, self._beta, self._gamma, energy = \
_mp.beam_optics.beam_rigidity(energy=self.energy/1e9)
self.trackcpp_acc.energy = energy * 1e9
self.__isfrozen = True
@property
def length(self):
"""Return lattice length [m]."""
return self.trackcpp_acc.get_length()
@property
def energy(self):
"""Return beam energy [eV]."""
return self.trackcpp_acc.energy
@energy.setter
def energy(self, value):
"""."""
self._brho, self._velocity, self._beta, self._gamma, energy = \
_mp.beam_optics.beam_rigidity(energy=value/1e9)
self.trackcpp_acc.energy = energy * 1e9
@property
def gamma_factor(self):
"""Return beam relativistic gamma factor."""
return self._gamma
@gamma_factor.setter
def gamma_factor(self, value):
"""Set beam relativistic gamma factor."""
self._brho, self._velocity, self._beta, self._gamma, energy = \
_mp.beam_optics.beam_rigidity(gamma=value)
self.trackcpp_acc.energy = energy * 1e9
@property
def beta_factor(self):
"""Return beam relativistic beta factor."""
return self._beta
@beta_factor.setter
def beta_factor(self, value):
"""Set beam relativistic beta factor."""
self._brho, self._velocity, self._beta, self._gamma, energy = \
_mp.beam_optics.beam_rigidity(beta=value)
self.trackcpp_acc.energy = energy * 1e9
@property
def velocity(self):
"""Return beam velocity [m/s]."""
return self._velocity
@velocity.setter
def velocity(self, value):
"""Set beam velocity [m/s]."""
self._brho, self._velocity, self._beta, self._gamma, energy = \
_mp.beam_optics.beam_rigidity(velocity=value)
self.trackcpp_acc.energy = energy * 1e9
@property
def brho(self):
"""Return beam rigidity [T.m]"""
return self._brho
@brho.setter
def brho(self, value):
"""Set beam rigidity [T.m]"""
self._brho, self._velocity, self._beta, self._gamma, energy = \
_mp.beam_optics.beam_rigidity(brho=value)
self.trackcpp_acc.energy = energy * 1e9
@property
def harmonic_number(self):
"""Return accelerator harmonic number."""
return self.trackcpp_acc.harmonic_number
@harmonic_number.setter
def harmonic_number(self, value):
"""Set accelerator harmonic number."""
if not isinstance(value, int) or value < 1:
raise AcceleratorException(
'harmonic number has to be a positive integer')
self.trackcpp_acc.harmonic_number = value
@property
def cavity_on(self):
"""Return cavity on state."""
return self.trackcpp_acc.cavity_on
@cavity_on.setter
def cavity_on(self, value):
"""Set cavity on state."""
if self.trackcpp_acc.harmonic_number < 1:
raise AcceleratorException('invalid harmonic number')
self.trackcpp_acc.cavity_on = value
@property
def radiation_on(self):
"""Return radiation on state."""
return self.trackcpp_acc.radiation_on
@radiation_on.setter
def radiation_on(self, value):
"""Set radiation on state."""
self.trackcpp_acc.radiation_on = value
@property
def vchamber_on(self):
"""Return vacuum chamber on state."""
return self.trackcpp_acc.vchamber_on
@vchamber_on.setter
def vchamber_on(self, value):
"""Set vacuum chamber on state."""
self.trackcpp_acc.vchamber_on = value
def pop(self, index):
"""."""
elem = self[index]
del self[index]
return elem
def append(self, value):
"""."""
if not isinstance(value, _elements.Element):
raise TypeError('value must be Element')
self.trackcpp_acc.lattice.append(value.trackcpp_e)
def extend(self, value):
"""."""
if not isinstance(value, Accelerator):
raise TypeError('value must be Accelerator')
if value is self:
value = Accelerator(accelerator=value)
for el in value:
self.append(el)
# NOTE: make the class objects pickalable
def __getstate__(self):
"""."""
stri = _trackcpp.String()
_trackcpp.write_flat_file_wrapper(stri, self.trackcpp_acc, False)
return stri.data
def __setstate__(self, stridata):
"""."""
stri = _trackcpp.String(stridata)
acc = Accelerator()
_trackcpp.read_flat_file_wrapper(stri, acc.trackcpp_acc, False)
self.trackcpp_acc = acc.trackcpp_acc
def __setattr__(self, key, value):
"""."""
if self.__isfrozen and not hasattr(self, key):
raise AcceleratorException("%r is a frozen class" % self)
object.__setattr__(self, key, value)
def __delitem__(self, index):
"""."""
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
index = set(range(start, stop, step))
if isinstance(index, (int, _np.int_)):
self.trackcpp_acc.lattice.erase(
self.trackcpp_acc.lattice.begin() + int(index))
elif isinstance(index, (set, list, tuple, _np.ndarray)):
index = sorted(set(index), reverse=True)
for i in index:
self.trackcpp_acc.lattice.erase(
self.trackcpp_acc.lattice.begin() + int(i))
def __getitem__(self, index):
"""."""
if isinstance(index, (int, _np.int_)):
ele = _elements.Element()
ele.trackcpp_e = self.trackcpp_acc.lattice[int(index)]
return ele
elif isinstance(index, (list, tuple, _np.ndarray)):
try:
index = _np.array(index, dtype=int)
except TypeError:
raise TypeError('invalid index')
lattice = _trackcpp.CppElementVector()
for i in index:
lattice.append(self.trackcpp_acc.lattice[int(i)])
elif isinstance(index, slice):
lattice = self.trackcpp_acc.lattice[index]
else:
raise TypeError('invalid index')
acc = Accelerator(
lattice=lattice,
energy=self.trackcpp_acc.energy,
harmonic_number=self.trackcpp_acc.harmonic_number,
cavity_on=self.trackcpp_acc.cavity_on,
radiation_on=self.trackcpp_acc.radiation_on,
vchamber_on=self.trackcpp_acc.vchamber_on)
return acc
def __setitem__(self, index, value):
"""."""
if isinstance(index, (int, _np.int_)):
index = [index, ]
elif isinstance(index, (list, tuple, _np.ndarray)):
pass
elif isinstance(index, slice):
start, stop, step = index.indices(len(self))
index = range(start, stop, step)
else:
raise TypeError('invalid index')
if isinstance(value, (list, tuple, _np.ndarray, Accelerator)):
if not all([isinstance(v, _elements.Element) for v in value]):
raise TypeError('invalid value')
for i, val in zip(index, value):
self.trackcpp_acc.lattice[int(i)] = val.trackcpp_e
elif isinstance(value, _elements.Element):
for i in index:
self.trackcpp_acc.lattice[int(i)] = value.trackcpp_e
else:
raise TypeError('invalid value')
def __len__(self):
"""."""
return self.trackcpp_acc.lattice.size()
def __str__(self):
"""."""
rst = ''
rst += 'energy : ' + str(self.trackcpp_acc.energy) + ' eV'
rst += '\nharmonic_number: ' + str(self.trackcpp_acc.harmonic_number)
rst += '\ncavity_on : ' + str(self.trackcpp_acc.cavity_on)
rst += '\nradiation_on : ' + str(self.trackcpp_acc.radiation_on)
rst += '\nvchamber_on : ' + str(self.trackcpp_acc.vchamber_on)
rst += '\nlattice size : ' + str(len(self.trackcpp_acc.lattice))
rst += '\nlattice length : ' + str(self.length) + ' m'
return rst
def __add__(self, other):
"""."""
if isinstance(other, _elements.Element):
acc = self[:]
acc.append(other)
return acc
elif isinstance(other, Accelerator):
acc = self[:]
for elem in other:
acc.append(elem)
return acc
else:
msg = "unsupported operand type(s) for +: '" + \
self.__class__.__name__ + "' and '" + \
other.__class__.__name__ + "'"
raise TypeError(msg)
def __rmul__(self, other):
"""."""
if isinstance(other, (int, _np.int_)):
if other < 0:
raise ValueError('cannot multiply by negative integer')
elif other == 0:
return Accelerator(
energy=self.energy,
harmonic_number=self.harmonic_number,
cavity_on=self.cavity_on,
radiation_on=self.radiation_on,
vchamber_on=self.vchamber_on)
else:
acc = self[:]
other -= 1
while other > 0:
acc += self[:]
other -= 1
return acc
else:
msg = "unsupported operand type(s) for +: '" + \
other.__class__.__name__ + "' and '" + \
self.__class__.__name__ + "'"
raise TypeError(msg)
def __eq__(self, other):
"""."""
if not isinstance(other, Accelerator):
return NotImplemented
return self.trackcpp_acc.isequal(other.trackcpp_acc)
# --- private methods ---
def _init_accelerator(self, kwargs):
if 'accelerator' in kwargs:
acc = kwargs['accelerator']
if isinstance(acc, _trackcpp.Accelerator):
trackcpp_acc = acc # points to the same object in memory
elif isinstance(acc, Accelerator): # creates another object.
trackcpp_acc = _trackcpp.Accelerator()
trackcpp_acc.lattice = acc.trackcpp_acc.lattice[:]
trackcpp_acc.energy = acc.energy
trackcpp_acc.cavity_on = acc.cavity_on
trackcpp_acc.radiation_on = acc.radiation_on
trackcpp_acc.vchamber_on = acc.vchamber_on
trackcpp_acc.harmonic_number = acc.harmonic_number
else:
trackcpp_acc = _trackcpp.Accelerator()
trackcpp_acc.cavity_on = False
trackcpp_acc.radiation_on = False
trackcpp_acc.vchamber_on = False
trackcpp_acc.harmonic_number = 0
return trackcpp_acc
def _init_lattice(self, kwargs):
if 'lattice' in kwargs:
lattice = kwargs['lattice']
if isinstance(lattice, _trackcpp.CppElementVector):
self.trackcpp_acc.lattice = lattice
elif isinstance(lattice, list):
for elem in lattice:
self.trackcpp_acc.lattice.append(elem.trackcpp_e)
else:
raise TypeError('values must be list of Element')
| {
"repo_name": "lnls-fac/pyaccel",
"path": "pyaccel/accelerator.py",
"copies": "1",
"size": "12654",
"license": "mit",
"hash": 2412383838153128400,
"line_mean": 34.15,
"line_max": 77,
"alpha_frac": 0.554844318,
"autogenerated": false,
"ratio": 3.864996945632254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4919841263632254,
"avg_score": null,
"num_lines": null
} |
"""Accelerator for npm, the Node.js package manager."""
# Standard library modules.
import codecs
import contextlib
import hashlib
import json
import os
import re
import time
# External dependencies.
from chardet import detect
from executor import ExternalCommandFailed, quote
from humanfriendly import Timer, format_path, parse_path
from humanfriendly.tables import format_pretty_table
from humanfriendly.terminal import ansi_wrap, terminal_supports_colors
from humanfriendly.text import concatenate, pluralize
from property_manager import (
PropertyManager,
cached_property,
clear_property,
mutable_property,
required_property,
set_property,
)
from verboselogs import VerboseLogger
# Modules included in our package.
from npm_accel.exceptions import MissingPackageFileError, MissingNodeInterpreterError
KNOWN_INSTALLERS = ("npm", "yarn", "pnpm", "npm-cache")
"""A tuple of strings with the names of supported Node.js installers."""
# Semi-standard module versioning.
__version__ = "2.0"
# Initialize a logger for this program.
logger = VerboseLogger(__name__)
class NpmAccel(PropertyManager):
"""
Python API for npm-accel.
When you create an :class:`NpmAccel` object you're required to provide a
:attr:`context` by passing a keyword argument to the constructor. The
following writable properties can be set in this same way:
:attr:`cache_directory`, :attr:`cache_limit`, :attr:`context`,
:attr:`installer_name`, :attr:`production`, :attr:`read_from_cache`,
:attr:`write_to_cache`. Once you've initialized npm-accel the most useful
method to call is :func:`install()`.
"""
@mutable_property(cached=True)
def cache_directory(self):
"""The absolute pathname of the directory where ``node_modules`` directories are cached (a string)."""
return (
"/var/cache/npm-accel"
if os.getuid() == 0 and os.access("/var/cache", os.W_OK)
else parse_path("~/.cache/npm-accel")
)
@mutable_property
def cache_limit(self):
"""
The maximum number of tar archives to preserve in the cache (an integer, defaults to 20).
The environment variable ``$NPM_ACCEL_CACHE_LIMIT`` can be used to override the
default value of this option.
"""
return int(os.environ.get("NPM_ACCEL_CACHE_LIMIT", "20"))
@required_property
def context(self):
"""A command execution context created using :mod:`executor.contexts`."""
@cached_property
def default_installer(self):
"""
The name of the default installer to use (either 'npm' or 'yarn').
When the yarn program is available in the ``$PATH`` the value of
:attr:`default_installer` will be 'yarn', otherwise it falls back to
'npm'.
"""
if self.context.find_program("yarn"):
logger.verbose("Selecting 'yarn' as default installer.")
return "yarn"
else:
logger.verbose("Selecting 'npm' as default installer ('yarn' isn't installed).")
return "npm"
@property
def installer_method(self):
"""
The method corresponding to :attr:`installer_name` (a callable).
:raises: :exc:`~exceptions.ValueError` if the value of
:attr:`installer_name` is not supported.
"""
if self.installer_name == "npm":
return self.install_with_npm
elif self.installer_name == "yarn":
return self.install_with_yarn
elif self.installer_name == "pnpm":
return self.install_with_pnpm
elif self.installer_name == "npm-cache":
return self.install_with_npm_cache
else:
raise ValueError("The requested installer is not supported! (%r)" % self.installer_name)
@mutable_property(cached=True)
def installer_name(self):
"""
The name of the installer to use (one of the strings in :data:`KNOWN_INSTALLERS`).
The default value of :attr:`installer_name` is :attr:`default_installer`.
When you try to set :attr:`installer_name` to a name that is not included
in :data:`KNOWN_INSTALLERS` a :exc:`~exceptions.ValueError` exception will
be raised. When you try to set :attr:`installer_name` to the name of an
installer that is not available a warning message will be logged and
:attr:`default_installer` is used instead.
"""
return self.default_installer
@installer_name.setter
def installer_name(self, value):
"""Validate the configured installer."""
if value not in KNOWN_INSTALLERS:
msg = "Invalid installer name %r! (the supported installers are %s)"
raise ValueError(msg % (value, concatenate(KNOWN_INSTALLERS)))
if self.context.find_program(value):
logger.verbose("Selecting user defined installer %r (confirmed to be installed).", value)
else:
logger.warning(
"User defined installer %r isn't available, falling back to %s.", value, self.default_installer
)
value = self.default_installer
set_property(self, "installer_name", value)
clear_property(self, "installer_version")
@cached_property
def installer_version(self):
"""The installer version according to the ``${installer_name} --version`` command (a string)."""
return self.context.capture(self.installer_name, "--version")
@cached_property
def nodejs_interpreter(self):
"""
The name of the Node.js interpreter (a string).
The official name of the Node.js interpreter is simply ``node``,
however on Debian based systems this name conflicts with another
program provided by a system package (ax25-node_) which predates the
existence of the Node.js interpreter. For this reason Debian calls the
Node.js interpreter ``nodejs`` instead.
This property first checks whether the ``nodejs`` program is available
(because it is the less ambiguous name of the two) and if that fails it
will check if the ``node`` program is available.
:raises: :exc:`.MissingNodeInterpreterError` when neither of the
expected programs is available.
.. _ax25-node: https://packages.debian.org/ax25-node
"""
logger.debug("Discovering name of Node.js interpreter ..")
for interpreter in "nodejs", "node":
logger.debug("Checking availability of program: %s", interpreter)
matches = self.context.find_program(interpreter)
if matches:
logger.debug("Found Node.js interpreter: %s", matches[0])
return matches[0]
raise MissingNodeInterpreterError("Missing Node.js interpreter! (expected to find 'nodejs' or 'node')")
@cached_property
def nodejs_version(self):
"""
The output of the ``nodejs --version`` or ```node --version`` command (a string).
:raises: :exc:`.MissingNodeInterpreterError` when neither of the
expected programs is available.
"""
return self.context.capture(self.nodejs_interpreter, "--version")
@mutable_property
def production(self):
"""
:data:`True` if devDependencies_ should be ignored, :data:`False` to have them installed.
The value of :attr:`production` defaults to :data:`True` when the
environment variable ``$NODE_ENV`` is set to ``production``, otherwise
it defaults to :data:`False`.
.. _devDependencies: https://docs.npmjs.com/files/package.json#devdependencies
"""
return os.environ.get("NODE_ENV") == "production"
@property
def production_option(self):
"""
One of the strings ``--production=true`` or ``--production=false`` (depending on :attr:`production`).
This command line option is given to the ``npm install``, ``yarn``, ``pnpm`` and
``npm-cache`` commands to explicitly switch between production and development
installations.
"""
return "--production=%s" % ("true" if self.production else "false")
@mutable_property
def read_from_cache(self):
""":data:`True` if npm-accel is allowed to read from its cache, :data:`False` otherwise."""
return self.installer_name != "npm-cache"
@mutable_property
def write_to_cache(self):
""":data:`True` if npm-accel is allowed to write to its cache, :data:`False` otherwise."""
return self.installer_name != "npm-cache"
def add_to_cache(self, modules_directory, file_in_cache):
"""
Add a ``node_modules`` directory to the cache.
:param modules_directory: The pathname of the ``node_modules`` directory (a string).
:param file_in_cache: The pathname of the archive in the cache (a string).
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
This method generates the tar archive under a temporary name inside the
cache directory and then renames it into place atomically, in order to
avoid race conditions where multiple concurrent npm-accel commands try
to use partially generated cache entries.
The temporary names are generated by appending a randomly generated
integer number to the original filename (with a dash to delimit the
original filename from the number).
"""
timer = Timer()
logger.info("Adding to cache (%s) ..", format_path(file_in_cache))
self.context.execute("mkdir", "-p", os.path.dirname(file_in_cache))
with self.context.atomic_write(file_in_cache) as temporary_file:
self.context.execute("tar", "-cf", temporary_file, "-C", modules_directory, ".")
self.write_metadata(file_in_cache)
logger.verbose("Took %s to add directory to cache.", timer)
def benchmark(self, directory, iterations=2, reset_caches=True, silent=False):
"""
Benchmark ``npm install``, ``yarn``, ``pnpm``, ``npm-accel`` and ``npm-cache``.
:param directory: The pathname of a directory with a ``package.json`` file (a string).
:param iterations: The number of times to run each installation command.
:param reset_caches: :data:`True` to reset all caches before the first
iteration of each installation method,
:data:`False` otherwise.
:param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
"""
results = []
baseline = None
for name, label in (
("npm", "npm install"),
("yarn", "yarn"),
("pnpm", "pnpm install"),
("npm-accel", "npm-accel"),
("npm-cache", "npm-cache install npm"),
):
# Reset all caches before the first run of each installer?
if reset_caches:
self.clear_directory("~/.cache/yarn")
self.clear_directory("~/.npm")
self.clear_directory("~/.package_cache") # npm-cache
self.clear_directory("~/.pnpm-store")
self.clear_directory(self.cache_directory)
self.clear_directory(os.path.join(directory, "node_modules"))
# Run the test twice, the first time to prime the cache
# and the second time to actually use the cache.
for i in range(1, iterations + 1):
iteration_label = "%i of %i" % (i, iterations)
logger.info("Testing '%s' (%s) ..", label, iteration_label)
timer = Timer()
if name == "npm-accel":
self.installer_name = self.default_installer
self.read_from_cache = True
self.write_to_cache = True
else:
self.installer_name = name
self.read_from_cache = False
self.write_to_cache = False
try:
self.install(directory, silent=silent)
except ExternalCommandFailed:
label += " (failed)"
if terminal_supports_colors():
label = ansi_wrap(label, color="red")
results.append((label, iteration_label, str(timer), "-"))
# We skip the second iteration on failure.
break
else:
if baseline is None:
baseline = timer.elapsed_time
percentage = "100%"
else:
percentage = "%.2f%%" % (timer.elapsed_time / (baseline / 100.0))
results.append((label, iteration_label, str(timer), percentage))
logger.info("Took %s for '%s' (%s).", timer, label, iteration_label)
print(format_pretty_table(results, column_names=["Approach", "Iteration", "Elapsed time", "Percentage"]))
def clean_cache(self):
"""Remove old and unused archives from the cache directory."""
timer = Timer()
entries = []
for file_in_cache in self.find_archives():
cache_metadata = self.read_metadata(file_in_cache)
last_accessed = cache_metadata.get("last-accessed", 0)
entries.append((last_accessed, file_in_cache))
to_remove = sorted(entries)[: -self.cache_limit]
if to_remove:
for last_used, file_in_cache in to_remove:
logger.debug("Removing archive from cache: %s", format_path(file_in_cache))
metadata_file = self.get_metadata_file(file_in_cache)
self.context.execute("rm", "-f", file_in_cache, metadata_file)
logger.verbose("Took %s to remove %s from cache.", timer, pluralize(len(to_remove), "archive"))
else:
logger.verbose("Wasted %s checking whether cache needs to be cleaned (it doesn't).", timer)
def clear_directory(self, directory):
"""
Make sure a directory exists and is empty.
:param directory: The pathname of the directory (a string).
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
.. note:: If the directory already exists it will be removed and
recreated in order to remove any existing contents. This may
change the ownership and permissions of the directory. If
this ever becomes a problem for someone I can improve it to
preserve the metadata.
"""
parsed_directory = parse_path(directory)
formatted_directory = format_path(parsed_directory)
if self.context.is_directory(parsed_directory):
logger.verbose("Clearing directory contents (%s) ..", formatted_directory)
self.context.execute("rm", "-fr", parsed_directory)
else:
logger.verbose("Creating directory (%s) ..", formatted_directory)
self.context.execute("mkdir", "-p", parsed_directory)
def extract_dependencies(self, package_file):
"""
Extract the relevant dependencies from a ``package.json`` file.
:param package_file: The pathname of the file (a string).
:returns: A dictionary with the relevant dependencies.
:raises: :exc:`.MissingPackageFileError` when the given directory
doesn't contain a ``package.json`` file.
If no dependencies are extracted from the ``package.json`` file
a warning message is logged but it's not considered an error.
"""
formatted_path = format_path(package_file)
logger.verbose("Extracting dependencies (%s) ..", formatted_path)
if not self.context.is_file(package_file):
msg = "Missing package.json file! (%s)" % package_file
raise MissingPackageFileError(msg)
contents = self.context.read_file(package_file)
metadata = json.loads(auto_decode(contents))
dependencies = metadata.get("dependencies", {})
if not self.production:
dependencies.update(metadata.get("devDependencies", {}))
if dependencies:
logger.verbose(
"Extracted %s from package.json file.", pluralize(len(dependencies), "dependency", "dependencies")
)
else:
logger.warning("No dependencies extracted from %s file?!", formatted_path)
return dependencies
def find_archives(self):
"""
Find the absolute pathnames of the archives in the cache directory.
:returns: A generator of filenames (strings).
"""
pattern = re.compile(r"^[0-9A-F]{40}\.tar$", re.IGNORECASE)
for entry in self.context.list_entries(self.cache_directory):
if pattern.match(entry):
yield os.path.join(self.cache_directory, entry)
def get_cache_file(self, dependencies):
"""
Compute the filename in the cache for the given dependencies.
:param dependencies: A dictionary of dependencies like those returned
by :func:`extract_dependencies()`.
:returns: The absolute pathname of the file in the cache (a string).
"""
filename = "%s.tar" % self.get_cache_key(dependencies)
return os.path.join(self.cache_directory, filename)
def get_cache_key(self, dependencies):
"""
Compute the cache key (fingerprint) for the given dependencies.
:param dependencies: A dictionary of dependencies like those returned
by :func:`extract_dependencies()`.
:returns: A 40-character hexadecimal SHA1 digest (a string).
In addition to the dependencies the values of :attr:`nodejs_version` and
:attr:`installer_version` are used to compute the cache key, this is to
make sure that upgrades to Node.js and the installer don't cause problems.
"""
logger.debug(
"Computing cache key based on dependencies (%s), Node.js version (%s) and %s version (%s) ..",
dependencies,
self.nodejs_version,
self.installer_name,
self.installer_version,
)
state = hashlib.sha1()
state.update(repr(sorted(dependencies.items())).encode("ascii"))
state.update(self.nodejs_version.encode("ascii"))
state.update(self.installer_version.encode("ascii"))
cache_key = state.hexdigest()
logger.debug("Computed cache key is %s.", cache_key)
return cache_key
def get_metadata_file(self, file_in_cache):
"""
Get the name of the metadata file for a given file in the cache.
:param file_in_cache: The pathname of the archive in the cache (a string).
:returns: The absolute pathname of the metadata file (a string).
"""
return re.sub(r"\.tar$", ".json", file_in_cache)
def install(self, directory, silent=False):
"""
Install Node.js package(s) listed in a ``package.json`` file.
:param directory: The pathname of a directory with a ``package.json`` file (a string).
:param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
:returns: The result of :func:`extract_dependencies()`.
"""
timer = Timer()
package_file = os.path.join(directory, "package.json")
modules_directory = os.path.join(directory, "node_modules")
dependencies = self.extract_dependencies(package_file)
logger.info("Installing Node.js packages in %s ..", format_path(directory))
if dependencies:
file_in_cache = self.get_cache_file(dependencies)
if self.read_from_cache:
logger.verbose("Checking the cache (%s) ..", format_path(file_in_cache))
if self.read_from_cache and self.context.is_file(file_in_cache):
self.install_from_cache(file_in_cache, modules_directory)
logger.info(
"Done! Took %s to install %s from cache.",
timer,
pluralize(len(dependencies), "dependency", "dependencies"),
)
else:
self.clear_directory(modules_directory)
with self.preserve_contents(package_file):
self.installer_method(directory, silent=silent)
if self.write_to_cache:
self.add_to_cache(modules_directory, file_in_cache)
logger.info(
"Done! Took %s to install %s using %s.",
timer,
pluralize(len(dependencies), "dependency", "dependencies"),
self.installer_name,
)
self.clean_cache()
else:
logger.info("Nothing to do! (no dependencies to install)")
return dependencies
def install_from_cache(self, file_in_cache, modules_directory):
"""
Populate a ``node_modules`` directory by unpacking an archive from the cache.
:param file_in_cache: The pathname of the archive in the cache (a string).
:param modules_directory: The pathname of the ``node_modules`` directory (a string).
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
If the directory already exists it will be removed and recreated in
order to remove any existing contents before the archive is unpacked.
"""
timer = Timer()
formatted_path = format_path(file_in_cache)
logger.info("Installing from cache (%s)..", formatted_path)
self.clear_directory(modules_directory)
logger.verbose("Unpacking archive (%s) ..", formatted_path)
self.context.execute("tar", "-xf", file_in_cache, "-C", modules_directory)
self.write_metadata(file_in_cache)
logger.verbose("Took %s to install from cache.", timer)
def install_with_npm(self, directory, silent=False):
"""
Use `npm install`_ to install dependencies.
:param directory: The pathname of a directory with a ``package.json`` file (a string).
:param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
.. _npm install: https://docs.npmjs.com/cli/install
"""
timer = Timer()
install_command = ["npm", "install", self.production_option]
logger.info("Running command: %s", quote(install_command))
self.context.execute(*install_command, directory=directory, silent=silent)
logger.verbose("Took %s to install with npm.", timer)
def install_with_npm_cache(self, directory, silent=False):
"""
Use npm-cache_ to install dependencies.
:param directory: The pathname of a directory with a ``package.json`` file (a string).
:param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
.. warning:: When I tried out npm-cache_ for the second time I found
out that it unconditionally includes both production
dependencies_ and devDependencies_ in the cache keys that
it calculates, thereby opening the door for 'cache
poisoning'. For more details please refer to `npm-cache
issue 74`_. Currently npm-accel does not work around
this problem, so consider yourself warned ;-).
.. _npm-cache: https://www.npmjs.com/package/npm-cache
.. _dependencies: https://docs.npmjs.com/files/package.json#dependencies
.. _devDependencies: https://docs.npmjs.com/files/package.json#devdependencies
.. _npm-cache issue 74: https://github.com/swarajban/npm-cache/issues/74
"""
timer = Timer()
install_command = ["npm-cache", "install", "npm", self.production_option]
logger.info("Running command: %s", quote(install_command))
self.context.execute(*install_command, directory=directory, silent=silent)
logger.verbose("Took %s to install with npm-cache.", timer)
def install_with_pnpm(self, directory, silent=False):
"""
Use pnpm_ to install dependencies.
:param directory: The pathname of a directory with a ``package.json`` file (a string).
:param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
.. _pnpm: https://www.npmjs.com/package/pnpm
"""
timer = Timer()
install_command = ["pnpm", "install", self.production_option]
logger.info("Running command: %s", quote(install_command))
self.context.execute(*install_command, directory=directory, silent=silent)
logger.verbose("Took %s to install with pnpm.", timer)
def install_with_yarn(self, directory, silent=False):
"""
Use yarn_ to install dependencies.
:param directory: The pathname of a directory with a ``package.json`` file (a string).
:param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
:raises: Any exceptions raised by the :mod:`executor.contexts` module.
.. _yarn: https://www.npmjs.com/package/yarn
"""
timer = Timer()
install_command = ["yarn", self.production_option]
logger.info("Running command: %s", quote(install_command))
self.context.execute(*install_command, directory=directory, silent=silent)
logger.verbose("Took %s to install with yarn.", timer)
@contextlib.contextmanager
def preserve_contents(self, filename):
"""
Restore the contents of a file after the context ends.
:param filename: The pathname of the file (a string).
:returns: A context manager.
"""
contents = self.context.read_file(filename)
yield
self.context.write_file(filename, contents)
def read_metadata(self, file_in_cache):
"""
Read the metadata associated with an archive in the cache.
:param file_in_cache: The pathname of the archive in the cache (a string).
:returns: A dictionary with cache metadata. If the cache metadata file
cannot be read or its contents can't be parsed as JSON then
an empty dictionary is returned.
"""
metadata_file = self.get_metadata_file(file_in_cache)
if self.context.is_file(metadata_file):
return json.loads(auto_decode(self.context.read_file(metadata_file)))
else:
return {}
def write_metadata(self, file_in_cache, **overrides):
"""
Create or update the metadata file associated with an archive in the cache.
:param file_in_cache: The pathname of the archive in the cache (a string).
:param overrides: Any key/value pairs to add to the metadata.
"""
metadata_file = self.get_metadata_file(file_in_cache)
cache_metadata = self.read_metadata(file_in_cache)
logger.verbose(
"%s metadata file (%s) ..", "Updating" if cache_metadata else "Creating", format_path(metadata_file)
)
cache_metadata.update(overrides)
if "date-created" not in cache_metadata:
cache_metadata["date-created"] = int(time.time())
cache_metadata["last-accessed"] = int(time.time())
cache_metadata["cache-hits"] = cache_metadata.get("cache-hits", 0) + 1
with self.context.atomic_write(metadata_file) as temporary_file:
self.context.write_file(temporary_file, json.dumps(cache_metadata).encode("UTF-8"))
def auto_decode(text):
"""
Decode a byte string by guessing the text encoding.
:param text: A byte string.
:returns: A Unicode string.
"""
if text.startswith(codecs.BOM_UTF8):
encoding = "utf-8-sig"
else:
result = detect(text)
encoding = result["encoding"]
return codecs.decode(text, encoding)
| {
"repo_name": "xolox/python-npm-accel",
"path": "npm_accel/__init__.py",
"copies": "1",
"size": "28516",
"license": "mit",
"hash": 8073024971832775000,
"line_mean": 43.6259780908,
"line_max": 114,
"alpha_frac": 0.6177584514,
"autogenerated": false,
"ratio": 4.272059925093633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5389818376493632,
"avg_score": null,
"num_lines": null
} |
"""
Usage: npm-accel [OPTIONS] [DIRECTORY]
The npm-accel program is a wrapper for npm (the Node.js package manager) that
optimizes one specific use case: Building a `node_modules' directory from a
`package.json' file as quickly as possible.
It works on the assumption that you build `node_modules' directories more
frequently then you change the contents of `package.json' files, because it
computes a fingerprint of the dependencies and uses that fingerprint as a
cache key, to cache the complete `node_modules' directory in a tar archive.
Supported options:
-p, --production
Don't install modules listed in `devDependencies'.
-i, --installer=NAME
Set the installer to use. Supported values for NAME are `npm', `yarn',
`pnpm' and `npm-cache'. When yarn is available it will be selected as the
default installer, otherwise the default is npm.
-u, --update
Don't read from the cache but do write to the cache. If you suspect a cache
entry to be corrupt you can use --update to 'refresh' the cache entry.
-n, --no-cache
Disallow writing to the cache managed by npm-accel (reading is still
allowed though). This option does not disable internal caching
performed by npm, yarn, pnpm and npm-cache.
-c, --cache-directory=DIR
Set the pathname of the directory where the npm-accel cache is stored.
-l, --cache-limit=COUNT
Set the maximum number of tar archives to preserve. When the cache
directory contains more than COUNT archives the least recently used
archives are removed. Defaults to 20.
The environment variable $NPM_ACCEL_CACHE_LIMIT provides a convenient
way to customize this option in CI and build environments.
-b, --benchmark
Benchmark and compare the following installation methods:
1. npm install
2. yarn
3. pnpm
4. npm-accel
5. npm-cache
The first method performs no caching (except for the HTTP caching that's
native to npm) while the other four methods each manage their own cache
(that is to say, the caching logic of npm-accel is only used in step 4).
Warning: Benchmarking wipes the caches managed by npm, yarn, pnpm,
npm-accel and npm-cache in order to provide a fair comparison (you
can override this in the Python API but not on the command line).
-r, --remote-host=SSH_ALIAS
Operate on a remote system instead of the local system. The
SSH_ALIAS argument gives the SSH alias of the remote host.
-v, --verbose
Increase logging verbosity (can be repeated).
-q, --quiet
Decrease logging verbosity (can be repeated).
--version
Report the version of npm-accel.
-h, --help
Show this message and exit.
"""
# Standard library modules.
import getopt
import logging
import os
import sys
# External dependencies.
import coloredlogs
from executor.contexts import create_context
from humanfriendly import parse_path
from humanfriendly.terminal import output, usage, warning
# Modules included in our package.
from npm_accel import __version__, NpmAccel
from npm_accel.exceptions import NpmAccelError
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def main():
"""Command line interface for the ``npm-accel`` program."""
# Initialize logging to the terminal and system log.
coloredlogs.install(syslog=True)
# Command line option defaults.
program_opts = {}
context_opts = {}
directory = None
action = "install"
# Parse the command line arguments.
try:
options, arguments = getopt.getopt(
sys.argv[1:],
"pi:unc:l:br:vqh",
[
"production",
"installer=",
"update",
"no-cache",
"cache-directory=",
"cache-limit=",
"benchmark",
"remote-host=",
"verbose",
"quiet",
"version",
"help",
],
)
for option, value in options:
if option in ("-p", "--production"):
program_opts["production"] = True
elif option in ("-i", "--installer"):
program_opts["installer_name"] = value
elif option in ("-u", "--update"):
program_opts["read_from_cache"] = False
program_opts["write_to_cache"] = True
elif option in ("-n", "--no-cache"):
program_opts["write_to_cache"] = False
elif option in ("-c", "--cache-directory"):
program_opts["cache_directory"] = parse_path(value)
elif option in ("-l", "--cache-limit"):
program_opts["cache_limit"] = int(value)
elif option in ("-b", "--benchmark"):
action = "benchmark"
elif option in ("-r", "--remote-host"):
context_opts["ssh_alias"] = value
elif option in ("-v", "--verbose"):
coloredlogs.increase_verbosity()
elif option in ("-q", "--quiet"):
coloredlogs.decrease_verbosity()
elif option == "--version":
output(__version__)
return
elif option in ("-h", "--help"):
usage(__doc__)
return
else:
assert False, "Unhandled option!"
if arguments:
directory = arguments.pop(0)
if arguments:
raise Exception("Got more positional arguments than expected!")
if not directory:
if context_opts.get("ssh_alias"):
raise Exception("When operating on a remote system the directory needs to be specified explicitly!")
directory = os.getcwd()
except Exception as e:
warning("Error: Failed to parse command line arguments! (%s)" % e)
sys.exit(1)
# Perform the requested action(s).
try:
context = create_context(**context_opts)
program_opts["context"] = context
accelerator = NpmAccel(**program_opts)
method = getattr(accelerator, action)
method(directory)
except NpmAccelError as e:
warning("Error: %s", e)
sys.exit(1)
except Exception:
logger.exception("Encountered unexpected exception! Aborting ..")
sys.exit(1)
| {
"repo_name": "xolox/python-npm-accel",
"path": "npm_accel/cli.py",
"copies": "1",
"size": "6539",
"license": "mit",
"hash": 2291959065914274300,
"line_mean": 32.192893401,
"line_max": 116,
"alpha_frac": 0.6193607585,
"autogenerated": false,
"ratio": 4.246103896103896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004338583018786065,
"num_lines": 197
} |
"""Sphinx documentation configuration for the `npm-accel` project."""
import os
import sys
# Add the npm-accel source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'npm-accel'
copyright = '2020, Peter Odding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from npm_accel import __version__ as npm_accel_version # NOQA
# The short X.Y version.
version = '.'.join(npm_accel_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = npm_accel_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# http://sphinx-doc.org/ext/autodoc.html#confval-autodoc_member_order
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = dict(
executor=('https://executor.readthedocs.io/en/latest/', None),
propertymanager=('https://property-manager.readthedocs.io/en/latest/', None),
python=('http://docs.python.org/2/', None),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Output file base name for HTML help builder.
htmlhelp_basename = 'npmacceldoc'
| {
"repo_name": "xolox/python-npm-accel",
"path": "docs/conf.py",
"copies": "1",
"size": "2616",
"license": "mit",
"hash": -5083647663765442000,
"line_mean": 30.1428571429,
"line_max": 81,
"alpha_frac": 0.7025993884,
"autogenerated": false,
"ratio": 3.6383866481223923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4840986036522392,
"avg_score": null,
"num_lines": null
} |
"""Test suite for the `npm-accel` package."""
# Standard library modules.
import json
import logging
import os
import string
# External dependencies.
from executor import execute
from executor.contexts import create_context
from humanfriendly import Timer
from humanfriendly.text import random_string
from humanfriendly.testing import CustomSearchPath, MockedProgram, TemporaryDirectory, TestCase, run_cli
# Modules included in our package.
from npm_accel import NpmAccel
from npm_accel.cli import main
from npm_accel.exceptions import MissingPackageFileError, MissingNodeInterpreterError
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class NpmAccelTestCase(TestCase):
"""Container for the `npm-accel` test suite."""
def test_missing_package_file_error(self):
"""Make sure an error is raised when the ``package.json`` file is missing."""
with TemporaryDirectory() as project_directory:
accelerator = NpmAccel(context=create_context())
self.assertRaises(MissingPackageFileError, accelerator.install, project_directory)
def test_node_binary_not_found_error(self):
"""Make sure an error is raised when the Node.js interpreter is missing."""
with CustomSearchPath(isolated=True):
accelerator = NpmAccel(context=create_context())
self.assertRaises(MissingNodeInterpreterError, getattr, accelerator, "nodejs_interpreter")
def test_multiple_arguments_error(self):
"""Make sure that multiple positional arguments raise an error."""
returncode, output = run_cli(main, "a", "b")
assert returncode != 0
def test_cache_directory(self):
"""Make sure the default cache directory is writable."""
accelerator = NpmAccel(context=create_context())
directory = accelerator.cache_directory
# The actual cache directory might not exist, but in that case one of
# its parent directories is expected to exist and be writable for the
# current user.
for _ in range(100):
try:
assert os.access(directory, os.W_OK)
except AssertionError:
directory = os.path.dirname(directory)
def test_implicit_local_directory(self):
"""Make sure local installation implicitly uses the working directory."""
saved_cwd = os.getcwd()
with TemporaryDirectory() as project_directory:
write_package_metadata(project_directory)
os.chdir(project_directory)
try:
returncode, output = run_cli(main)
assert returncode == 0
finally:
os.chdir(saved_cwd)
def test_explicit_remote_directory(self):
"""Make sure remote installation requires an explicit working directory."""
returncode, output = run_cli(main, "--remote-host=localhost")
assert returncode != 0
def test_installer_selection(self):
"""Make sure the installer name is properly validated."""
# Check that 'yarn' is the default installer when available.
with MockedProgram(name="yarn"):
accelerator = NpmAccel(context=create_context())
assert accelerator.default_installer == "yarn"
# Check that 'npm' is the default installer when 'yarn' isn't available.
with CustomSearchPath(isolated=True):
accelerator = NpmAccel(context=create_context())
assert accelerator.default_installer == "npm"
# Check that non-default installers are ignored when unavailable.
with CustomSearchPath(isolated=True):
accelerator = NpmAccel(context=create_context())
accelerator.installer_name == "npm-cache"
assert accelerator.installer_name == accelerator.default_installer
# All of the following assertions can share the same program instance.
accelerator = NpmAccel(context=create_context())
# Make sure the default installer is 'npm' or 'yarn'.
assert accelerator.installer_name in ("yarn", "npm")
assert accelerator.installer_method in (accelerator.install_with_npm, accelerator.install_with_yarn)
# Make sure 'npm' is supported.
accelerator.installer_name = "npm"
assert accelerator.installer_method == accelerator.install_with_npm
# Make sure 'yarn' is supported.
accelerator.installer_name = "yarn"
assert accelerator.installer_method == accelerator.install_with_yarn
# Make sure 'pnpm' is supported.
accelerator.installer_name = "pnpm"
assert accelerator.installer_method == accelerator.install_with_pnpm
# Make sure 'npm-cache' is supported.
accelerator.installer_name = "npm-cache"
assert accelerator.installer_method == accelerator.install_with_npm_cache
# Make sure invalid installer names raise an error.
self.assertRaises(ValueError, setattr, accelerator, "installer_name", "bogus")
def test_installers(self):
"""Make sure all of the supported installers actually work!"""
for installer_name in "npm", "yarn", "pnpm", "npm-cache":
with TemporaryDirectory() as cache_directory:
with TemporaryDirectory() as project_directory:
write_package_metadata(project_directory, dict(npm="3.10.6"))
run_cli(
main,
"--installer=%s" % installer_name,
"--cache-directory=%s" % cache_directory,
project_directory,
)
self.check_program(project_directory, "npm", "help")
def test_development_versus_production(self):
"""
Make sure development and production installations both work.
This test is intended to verify that development & production installs
don't "poison" each other due to naively computed cache keys.
"""
with TemporaryDirectory() as cache_directory:
with TemporaryDirectory() as project_directory:
write_package_metadata(project_directory, dict(path="0.12.7"), dict(npm="3.10.6"))
# Install the production dependencies (a subset of the development dependencies).
run_cli(main, "--cache-directory=%s" % cache_directory, "--production", project_directory)
# We *do* expect the `path' production dependency to have been installed.
assert os.path.exists(os.path.join(project_directory, "node_modules", "path"))
# We *don't* expect the `npm' development dependency to have been installed.
assert not os.path.exists(os.path.join(project_directory, "node_modules", "npm"))
# Install the development dependencies (a superset of the production dependencies).
run_cli(main, "--cache-directory=%s" % cache_directory, project_directory)
# We *do* expect the `path' production dependency to have been installed.
assert os.path.exists(os.path.join(project_directory, "node_modules", "path"))
# We *also* expect the `npm' development dependency to have been installed.
assert os.path.exists(os.path.join(project_directory, "node_modules", "npm"))
def test_caching(self):
"""Verify that caching of ``node_modules`` brings a speed improvement."""
with TemporaryDirectory() as cache_directory:
with TemporaryDirectory() as project_directory:
original_dependencies = dict(npm="3.10.6")
write_package_metadata(project_directory, original_dependencies)
accelerator = NpmAccel(context=create_context(), cache_directory=cache_directory)
# Sanity check that we're about to prime the cache.
parsed_dependencies = accelerator.extract_dependencies(os.path.join(project_directory, "package.json"))
assert parsed_dependencies == original_dependencies
# XXX In Python 2.x the following two expressions can both be
# True (due to implicit Unicode string coercion):
#
# 1. parsed_dependencies == original_dependencies
# 2. get_cache_file(parsed_dependencies) != get_cache_file(original_dependencies)
#
# That is to say: While you can successfully compare two
# dictionaries for equality, the repr() of the two dictionaries
# will differ, due to string keys versus Unicode keys and the
# u'' syntax in the repr() output.
file_in_cache = accelerator.get_cache_file(parsed_dependencies)
logger.debug(
"Name of file to be added to cache: %s (based on original dependencies: %s)",
file_in_cache,
original_dependencies,
)
assert not os.path.isfile(file_in_cache)
# The first run is expected to prime the cache.
first_run = Timer(resumable=True)
with first_run:
parsed_dependencies = accelerator.install(project_directory)
assert parsed_dependencies == original_dependencies
self.check_program(project_directory, "npm", "help")
# Sanity check that the cache was primed.
assert os.path.isfile(file_in_cache)
# The second run is expected to reuse the cache.
second_run = Timer(resumable=True)
with second_run:
parsed_dependencies = accelerator.install(project_directory)
assert parsed_dependencies == original_dependencies
self.check_program(project_directory, "npm", "help")
# Make sure the 2nd run was significantly faster than the 1st run.
assert second_run.elapsed_time < (first_run.elapsed_time / 2)
def test_cache_cleaning(self):
"""Make sure the automatic cache cleaning logic works as expected."""
with TemporaryDirectory() as cache_directory:
context = create_context()
accelerator = NpmAccel(context=context, cache_directory=cache_directory)
just_above_limit = accelerator.cache_limit + 1
for i in range(just_above_limit):
# Create a fake (empty) tar archive.
fingerprint = random_string(length=40, characters=string.hexdigits)
filename = os.path.join(cache_directory, "%s.tar" % fingerprint)
context.write_file(filename, "")
# Create the cache metadata.
accelerator.write_metadata(filename)
# Sanity check the cache entries.
assert len(list(accelerator.find_archives())) == just_above_limit
# Run the cleanup.
accelerator.clean_cache()
# Make sure the number of cache entries decreased.
assert len(list(accelerator.find_archives())) == accelerator.cache_limit
def test_benchmark(self):
"""Make sure the benchmark finishes successfully."""
with TemporaryDirectory() as cache_directory:
with TemporaryDirectory() as project_directory:
write_package_metadata(project_directory, dict(npm="3.10.6"))
run_cli(main, "--cache-directory=%s" % cache_directory, "--benchmark", project_directory)
def check_program(self, directory, program_name, *arguments):
"""Verify that a Node.js program was correctly installed."""
# Verify that the program's executable was installed.
program_path = os.path.join(directory, "node_modules", ".bin", program_name)
assert os.path.isfile(program_path)
assert os.access(program_path, os.X_OK)
# Verify that the program's executable actually runs.
execute(program_path, *arguments)
def write_package_metadata(directory, dependencies={}, devDependencies={}):
"""Generate a ``package.json`` file for testing."""
metadata = dict(name=random_string(10), version="0.0.1", dependencies=dependencies, devDependencies=devDependencies)
with open(os.path.join(directory, "package.json"), "w") as handle:
json.dump(metadata, handle)
| {
"repo_name": "xolox/python-npm-accel",
"path": "npm_accel/tests.py",
"copies": "1",
"size": "12595",
"license": "mit",
"hash": 6648589455252716000,
"line_mean": 51.6987447699,
"line_max": 120,
"alpha_frac": 0.6367606193,
"autogenerated": false,
"ratio": 4.4631467044649185,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019241504099672946,
"num_lines": 239
} |
"""
:py:mod:`pip_accel.exceptions` - Exceptions for structured error handling
=========================================================================
This module defines named exceptions raised by pip-accel when it encounters
error conditions that:
1. Already require structured handling inside pip-accel
2. May require structured handling by callers of pip-accel
Yes, I know, I just made your lovely and elegant Python look a whole lot like
Java! I guess the message to take away here is that (in my opinion) structured
error handling helps to build robust software that acknowledges failures exist
and tries to deal with them (even if only by clearly recognizing a problem and
giving up when there's nothing useful to do!).
Hierarchy of exceptions
-----------------------
If you're interested in implementing structured handling of exceptions reported
by pip-accel the following diagram may help by visualizing the hierarchy:
.. inheritance-diagram:: EnvironmentMismatchError UnknownDistributionFormat InvalidSourceDistribution BuildFailed NoBuildOutput CacheBackendError CacheBackendDisabledError DependencyInstallationRefused DependencyInstallationFailed
:parts: 1
----
"""
from pip_accel.utils import compact
class PipAcceleratorError(Exception):
"""
Base exception for all exception types explicitly raised by
:py:mod:`pip_accel`.
"""
def __init__(self, text, **kw):
"""Accepts the same arguments as :py:func:`.compact()`."""
super(PipAcceleratorError, self).__init__(compact(text, **kw))
class NothingToDoError(PipAcceleratorError):
"""
Raised by :py:func:`~pip_accel.PipAccelerator.get_pip_requirement_set()`
when pip doesn't report an error but also doesn't generate a requirement
set (this happens when the user specifies an empty requirements file).
"""
class EnvironmentMismatchError(PipAcceleratorError):
"""
Raised by :py:func:`~pip_accel.PipAccelerator.validate_environment()` when
it detects a mismatch between :py:data:`sys.prefix` and ``$VIRTUAL_ENV``.
"""
class UnknownDistributionFormat(PipAcceleratorError):
"""
Raised by :py:attr:`~pip_accel.req.Requirement.is_wheel` when it cannot
discern whether a given unpacked distribution is a source distribution or a
wheel distribution.
"""
class BinaryDistributionError(PipAcceleratorError):
"""Base class for exceptions related to the generation of binary distributions."""
class InvalidSourceDistribution(BinaryDistributionError):
"""
Raised by :py:func:`~pip_accel.bdist.BinaryDistributionManager.build_binary_dist()`
when the given directory doesn't contain a Python source distribution.
"""
class BuildFailed(BinaryDistributionError):
"""
Raised by :py:func:`~pip_accel.bdist.BinaryDistributionManager.build_binary_dist()`
when a binary distribution build fails.
"""
class NoBuildOutput(BinaryDistributionError):
"""
Raised by :py:func:`~pip_accel.bdist.BinaryDistributionManager.build_binary_dist()`
when a binary distribution build fails to produce the expected binary
distribution archive.
"""
class CacheBackendError(PipAcceleratorError):
"""Raised by cache backends when they fail in a controlled manner."""
class CacheBackendDisabledError(CacheBackendError):
"""Raised by cache backends when they require configuration."""
class SystemDependencyError(PipAcceleratorError):
"""Base class for exceptions related to missing system packages."""
class DependencyInstallationRefused(SystemDependencyError):
"""
Raised by :py:class:`.SystemPackageManager` when one or more known to be
required system packages are missing and automatic installation of missing
dependencies is disabled by the operator.
"""
class DependencyInstallationFailed(SystemDependencyError):
"""
Raised by :py:class:`.SystemPackageManager` when the installation of
missing system packages fails.
"""
| {
"repo_name": "theyoprst/pip-accel",
"path": "pip_accel/exceptions.py",
"copies": "1",
"size": "4145",
"license": "mit",
"hash": 7120511161194587000,
"line_mean": 36.6818181818,
"line_max": 230,
"alpha_frac": 0.738721351,
"autogenerated": false,
"ratio": 4.414270500532481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00283057174194513,
"num_lines": 110
} |
"""
:py:mod:`pip_accel.cli` - Command line interface for the ``pip-accel`` program
==============================================================================
"""
# Standard library modules.
import logging
import os
import sys
import textwrap
# Modules included in our package.
from pip_accel import PipAccelerator
from pip_accel.config import Config
from pip_accel.exceptions import NothingToDoError
from pip_accel.utils import match_option
# External dependencies.
import coloredlogs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def main():
"""The command line interface for the ``pip-accel`` program."""
arguments = sys.argv[1:]
# If no arguments are given, the help text of pip-accel is printed.
if not arguments:
usage()
sys.exit(0)
# If no install subcommand is given we pass the command line straight
# to pip without any changes and exit immediately afterwards.
if 'install' not in arguments:
# This will not return.
os.execvp('pip', ['pip'] + arguments)
else:
arguments = [arg for arg in arguments if arg != 'install']
# Initialize logging output.
coloredlogs.install()
# Adjust verbosity based on -v, -q, --verbose, --quiet options.
for argument in list(arguments):
if match_option(argument, '-v', '--verbose'):
coloredlogs.increase_verbosity()
elif match_option(argument, '-q', '--quiet'):
coloredlogs.decrease_verbosity()
# Perform the requested action(s).
try:
accelerator = PipAccelerator(Config())
accelerator.install_from_arguments(arguments)
except NothingToDoError as e:
# Don't print a traceback for this (it's not very user friendly) and
# exit with status zero to stay compatible with pip. For more details
# please refer to https://github.com/paylogic/pip-accel/issues/47.
logger.warning("%s", e)
sys.exit(0)
except Exception:
logger.exception("Caught unhandled exception!")
sys.exit(1)
def usage():
"""Print a usage message to the terminal."""
print(textwrap.dedent("""
Usage: pip-accel [PIP_ARGS]
The pip-accel program is a wrapper for pip, the Python package manager. It
accelerates the usage of pip to initialize Python virtual environments given
one or more requirements files. The pip-accel command supports all subcommands
and options supported by pip, however the only added value is in the "pip
install" subcommand.
For more information please refer to the GitHub project page
at https://github.com/paylogic/pip-accel
""").strip())
| {
"repo_name": "theyoprst/pip-accel",
"path": "pip_accel/cli.py",
"copies": "1",
"size": "2876",
"license": "mit",
"hash": 2904229933859374600,
"line_mean": 35.4050632911,
"line_max": 86,
"alpha_frac": 0.6599443672,
"autogenerated": false,
"ratio": 4.132183908045977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00262985675613456,
"num_lines": 79
} |
"""
Configuration handling for `pip-accel`.
This module defines the :class:`Config` class which is used throughout the
pip accelerator. At runtime an instance of :class:`Config` is created and
passed down like this:
.. digraph:: config_dependency_injection
node [fontsize=10, shape=rect]
PipAccelerator -> BinaryDistributionManager
BinaryDistributionManager -> CacheManager
CacheManager -> LocalCacheBackend
CacheManager -> S3CacheBackend
BinaryDistributionManager -> SystemPackageManager
The :class:`.PipAccelerator` class receives its configuration object from
its caller. Usually this will be :func:`.main()` but when pip-accel is used
as a Python API the person embedding or extending pip-accel is responsible for
providing the configuration object. This is intended as a form of `dependency
injection`_ that enables non-default configurations to be injected into
pip-accel.
Support for runtime configuration
---------------------------------
The properties of the :class:`Config` class can be set at runtime using
regular attribute assignment syntax. This overrides the default values of the
properties (whether based on environment variables, configuration files or hard
coded defaults).
Support for configuration files
-------------------------------
You can use a configuration file to permanently configure certain options of
pip-accel. If ``/etc/pip-accel.conf`` and/or ``~/.pip-accel/pip-accel.conf``
exist they are automatically loaded. You can also set the environment variable
``$PIP_ACCEL_CONFIG`` to load a configuration file in a non-default location.
If all three files exist the system wide file is loaded first, then the user
specific file is loaded and then the file set by the environment variable is
loaded (duplicate settings are overridden by the configuration file that's
loaded last).
Here is an example of the available options:
.. code-block:: ini
[pip-accel]
auto-install = yes
max-retries = 3
data-directory = ~/.pip-accel
s3-bucket = my-shared-pip-accel-binary-cache
s3-prefix = ubuntu-trusty-amd64
s3-readonly = yes
Note that the configuration options shown above are just examples, they are not
meant to represent the configuration defaults.
----
.. _dependency injection: http://en.wikipedia.org/wiki/Dependency_injection
"""
# Standard library modules.
import logging
import os
import os.path
import sys
# Modules included in our package.
from pip_accel.compat import configparser
from pip_accel.utils import is_root, expand_path
# External dependencies.
from coloredlogs import DEFAULT_LOG_FORMAT
from cached_property import cached_property
from humanfriendly import coerce_boolean, parse_path
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
# The locations of the user specific and system wide configuration files.
LOCAL_CONFIG = '~/.pip-accel/pip-accel.conf'
GLOBAL_CONFIG = '/etc/pip-accel.conf'
class Config(object):
"""Configuration of the pip accelerator."""
def __init__(self, load_configuration_files=True, load_environment_variables=True):
"""
Initialize the configuration of the pip accelerator.
:param load_configuration_files: If this is :data:`True` (the default) then
configuration files in known locations
are automatically loaded.
:param load_environment_variables: If this is :data:`True` (the default) then
environment variables are used to
initialize the configuration.
"""
self.overrides = {}
self.configuration = {}
self.environment = os.environ if load_environment_variables else {}
if load_configuration_files:
for filename in self.available_configuration_files:
self.load_configuration_file(filename)
@cached_property
def available_configuration_files(self):
"""A list of strings with the absolute pathnames of the available configuration files."""
known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get('PIP_ACCEL_CONFIG')]
absolute_paths = [parse_path(pathname) for pathname in known_files if pathname]
return [pathname for pathname in absolute_paths if os.path.isfile(pathname)]
def load_configuration_file(self, configuration_file):
"""
Load configuration defaults from a configuration file.
:param configuration_file: The pathname of a configuration file (a
string).
:raises: :exc:`Exception` when the configuration file cannot be
loaded.
"""
configuration_file = parse_path(configuration_file)
logger.debug("Loading configuration file: %s", configuration_file)
parser = configparser.RawConfigParser()
files_loaded = parser.read(configuration_file)
if len(files_loaded) != 1:
msg = "Failed to load configuration file! (%s)"
raise Exception(msg % configuration_file)
elif not parser.has_section('pip-accel'):
msg = "Missing 'pip-accel' section in configuration file! (%s)"
raise Exception(msg % configuration_file)
else:
self.configuration.update(parser.items('pip-accel'))
def __setattr__(self, name, value):
"""
Override the value of a property at runtime.
:param name: The name of the property to override (a string).
:param value: The overridden value of the property.
"""
attribute = getattr(self, name, None)
if isinstance(attribute, (property, cached_property)):
self.overrides[name] = value
else:
self.__dict__[name] = value
def get(self, property_name=None, environment_variable=None, configuration_option=None, default=None):
"""
Internal shortcut to get a configuration option's value.
:param property_name: The name of the property that users can set on
the :class:`Config` class (a string).
:param environment_variable: The name of the environment variable (a
string).
:param configuration_option: The name of the option in the
configuration file (a string).
:param default: The default value.
:returns: The value of the environment variable or configuration file
option or the default value.
"""
if self.overrides.get(property_name) is not None:
return self.overrides[property_name]
elif environment_variable and self.environment.get(environment_variable):
return self.environment[environment_variable]
elif self.configuration.get(configuration_option) is not None:
return self.configuration[configuration_option]
else:
return default
@cached_property
def cache_format_revision(self):
"""
The revision of the binary distribution cache format in use (an integer).
This number is encoded in the directory name of the binary cache so
that multiple revisions can peacefully coexist. When pip-accel breaks
backwards compatibility this number is bumped so that pip-accel starts
using a new directory.
"""
return 7
@cached_property
def source_index(self):
"""
The absolute pathname of pip-accel's source index directory (a string).
This is the ``sources`` subdirectory of :data:`data_directory`.
"""
return self.get(property_name='source_index',
default=os.path.join(self.data_directory, 'sources'))
@cached_property
def binary_cache(self):
"""
The absolute pathname of pip-accel's binary cache directory (a string).
This is the ``binaries`` subdirectory of :data:`data_directory`.
"""
return self.get(property_name='binary_cache',
default=os.path.join(self.data_directory, 'binaries'))
@cached_property
def eggs_cache(self):
"""
The absolute pathname of pip-accel's eggs cache directory (a string).
This is the ``eggs`` subdirectory of :data:`data_directory`. It is used
to cache setup requirements which avoids continuous rebuilding of setup
requirements.
"""
return self.get(property_name='eggs_cache',
default=os.path.join(self.data_directory, 'eggs'))
@cached_property
def data_directory(self):
"""
The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
"""
return expand_path(self.get(property_name='data_directory',
environment_variable='PIP_ACCEL_CACHE',
configuration_option='data-directory',
default='/var/cache/pip-accel' if is_root() else '~/.pip-accel'))
@cached_property
def on_debian(self):
""":data:`True` if running on a Debian derived system, :data:`False` otherwise."""
return self.get(property_name='on_debian',
default=os.path.exists('/etc/debian_version'))
@cached_property
def install_prefix(self):
"""
The absolute pathname of the installation prefix to use (a string).
This property is based on :data:`sys.prefix` except that when
:data:`sys.prefix` is ``/usr`` and we're running on a Debian derived
system ``/usr/local`` is used instead.
The reason for this is that on Debian derived systems only apt (dpkg)
should be allowed to touch files in ``/usr/lib/pythonX.Y/dist-packages``
and ``python setup.py install`` knows this (see the ``posix_local``
installation scheme in ``/usr/lib/pythonX.Y/sysconfig.py`` on Debian
derived systems). Because pip-accel replaces ``python setup.py
install`` it has to replicate this logic. Inferring all of this from
the :mod:`sysconfig` module would be nice but that module wasn't
available in Python 2.6.
"""
return self.get(property_name='install_prefix',
default='/usr/local' if sys.prefix == '/usr' and self.on_debian else sys.prefix)
@cached_property
def python_executable(self):
"""The absolute pathname of the Python executable (a string)."""
return self.get(property_name='python_executable',
default=sys.executable or os.path.join(self.install_prefix, 'bin', 'python'))
@cached_property
def auto_install(self):
"""
Whether automatic installation of missing system packages is enabled.
:data:`True` if automatic installation of missing system packages is
enabled, :data:`False` if it is disabled, :data:`None` otherwise (in this case
the user will be prompted at the appropriate time).
- Environment variable: ``$PIP_ACCEL_AUTO_INSTALL`` (refer to
:func:`~humanfriendly.coerce_boolean()` for details on how the
value of the environment variable is interpreted)
- Configuration option: ``auto-install`` (also parsed using
:func:`~humanfriendly.coerce_boolean()`)
- Default: :data:`None`
"""
value = self.get(property_name='auto_install',
environment_variable='PIP_ACCEL_AUTO_INSTALL',
configuration_option='auto-install')
if value is not None:
return coerce_boolean(value)
@cached_property
def log_format(self):
"""
The format of log messages written to the terminal.
- Environment variable: ``$PIP_ACCEL_LOG_FORMAT``
- Configuration option: ``log-format``
- Default: :data:`~coloredlogs.DEFAULT_LOG_FORMAT`
"""
return self.get(property_name='log_format',
environment_variable='PIP_ACCEL_LOG_FORMAT',
configuration_option='log-format',
default=DEFAULT_LOG_FORMAT)
@cached_property
def log_verbosity(self):
"""
The verbosity of log messages written to the terminal.
- Environment variable: ``$PIP_ACCEL_LOG_VERBOSITY``
- Configuration option: ``log-verbosity``
- Default: 'INFO' (a string).
"""
return self.get(property_name='log_verbosity',
environment_variable='PIP_ACCEL_LOG_VERBOSITY',
configuration_option='log-verbosity',
default='INFO')
@cached_property
def max_retries(self):
"""
The number of times to retry ``pip install --download`` if it fails.
- Environment variable: ``$PIP_ACCEL_MAX_RETRIES``
- Configuration option: ``max-retries``
- Default: ``3``
"""
value = self.get(property_name='max_retries',
environment_variable='PIP_ACCEL_MAX_RETRIES',
configuration_option='max-retries')
try:
n = int(value)
if n >= 0:
return n
except:
return 3
@cached_property
def trust_mod_times(self):
"""
Whether to trust file modification times for cache invalidation.
- Environment variable: ``$PIP_ACCEL_TRUST_MOD_TIMES``
- Configuration option: ``trust-mod-times``
- Default: :data:`True` unless the AppVeyor_ continuous integration
environment is detected (see `issue 62`_).
.. _AppVeyor: http://www.appveyor.com
.. _issue 62: https://github.com/paylogic/pip-accel/issues/62
"""
on_appveyor = coerce_boolean(os.environ.get('APPVEYOR', 'False'))
return coerce_boolean(self.get(property_name='trust_mod_times',
environment_variable='PIP_ACCEL_TRUST_MOD_TIMES',
configuration_option='trust-mod-times',
default=(not on_appveyor)))
@cached_property
def s3_cache_url(self):
"""
The URL of the Amazon S3 API endpoint to use.
By default this points to the official Amazon S3 API endpoint. You can
change this option if you're running a local Amazon S3 compatible
storage service that you want pip-accel to use.
- Environment variable: ``$PIP_ACCEL_S3_URL``
- Configuration option: ``s3-url``
- Default: ``https://s3.amazonaws.com``
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return self.get(property_name='s3_cache_url',
environment_variable='PIP_ACCEL_S3_URL',
configuration_option='s3-url',
default='https://s3.amazonaws.com')
@cached_property
def s3_cache_bucket(self):
"""
Name of Amazon S3 bucket where binary distributions are cached (a string or :data:`None`).
- Environment variable: ``$PIP_ACCEL_S3_BUCKET``
- Configuration option: ``s3-bucket``
- Default: :data:`None`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return self.get(property_name='s3_cache_bucket',
environment_variable='PIP_ACCEL_S3_BUCKET',
configuration_option='s3-bucket')
@cached_property
def s3_cache_create_bucket(self):
"""
Whether to automatically create the Amazon S3 bucket when it's missing.
- Environment variable: ``$PIP_ACCEL_S3_CREATE_BUCKET``
- Configuration option: ``s3-create-bucket``
- Default: :data:`False`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return coerce_boolean(self.get(property_name='s3_cache_create_bucket',
environment_variable='PIP_ACCEL_S3_CREATE_BUCKET',
configuration_option='s3-create-bucket',
default=False))
@cached_property
def s3_cache_prefix(self):
"""
Cache prefix for binary distribution archives in Amazon S3 bucket (a string or :data:`None`).
- Environment variable: ``$PIP_ACCEL_S3_PREFIX``
- Configuration option: ``s3-prefix``
- Default: :data:`None`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return self.get(property_name='s3_cache_prefix',
environment_variable='PIP_ACCEL_S3_PREFIX',
configuration_option='s3-prefix')
@cached_property
def s3_cache_readonly(self):
"""
Whether the Amazon S3 bucket is considered read only.
If this is :data:`True` then the Amazon S3 bucket will only be used for
:class:`~pip_accel.caches.s3.S3CacheBackend.get()` operations (all
:class:`~pip_accel.caches.s3.S3CacheBackend.put()` operations will
be disabled).
- Environment variable: ``$PIP_ACCEL_S3_READONLY`` (refer to
:func:`~humanfriendly.coerce_boolean()` for details on how the
value of the environment variable is interpreted)
- Configuration option: ``s3-readonly`` (also parsed using
:func:`~humanfriendly.coerce_boolean()`)
- Default: :data:`False`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return coerce_boolean(self.get(property_name='s3_cache_readonly',
environment_variable='PIP_ACCEL_S3_READONLY',
configuration_option='s3-readonly',
default=False))
@cached_property
def s3_cache_timeout(self):
"""
The socket timeout in seconds for connections to Amazon S3 (an integer).
This value is injected into Boto's configuration to override the
default socket timeout used for connections to Amazon S3.
- Environment variable: ``$PIP_ACCEL_S3_TIMEOUT``
- Configuration option: ``s3-timeout``
- Default: ``60`` (`Boto's default`_)
.. _Boto's default: http://boto.readthedocs.org/en/latest/boto_config_tut.html
"""
value = self.get(property_name='s3_cache_timeout',
environment_variable='PIP_ACCEL_S3_TIMEOUT',
configuration_option='s3-timeout')
try:
n = int(value)
if n >= 0:
return n
except:
return 60
@cached_property
def s3_cache_retries(self):
"""
The number of times to retry failed requests to Amazon S3 (an integer).
This value is injected into Boto's configuration to override the
default number of times to retry failed requests to Amazon S3.
- Environment variable: ``$PIP_ACCEL_S3_RETRIES``
- Configuration option: ``s3-retries``
- Default: ``5`` (`Boto's default`_)
"""
value = self.get(property_name='s3_cache_retries',
environment_variable='PIP_ACCEL_S3_RETRIES',
configuration_option='s3-retries')
try:
n = int(value)
if n >= 0:
return n
except:
return 5
| {
"repo_name": "paylogic/pip-accel",
"path": "pip_accel/config.py",
"copies": "2",
"size": "20109",
"license": "mit",
"hash": 4932323489949895000,
"line_mean": 39.7064777328,
"line_max": 106,
"alpha_frac": 0.6129593714,
"autogenerated": false,
"ratio": 4.421503957783641,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6034463329183641,
"avg_score": null,
"num_lines": null
} |
"""Operating system detection and Python version compatibility."""
# Standard library modules.
import sys
# Inform static code analysis tools about our intention to expose the
# following variables. This avoids 'imported but unused' warnings.
__all__ = (
'WINDOWS',
'StringIO',
'configparser',
'pathname2url',
'urljoin',
'urlparse',
)
WINDOWS = sys.platform.startswith('win')
""":data:`True` if running on Windows, :data:`False` otherwise."""
# Compatibility between Python 2 and 3.
try:
# Python 2.
basestring = basestring
import ConfigParser as configparser
from StringIO import StringIO
from urllib import pathname2url
from urlparse import urljoin, urlparse
PY3 = False
except (ImportError, NameError):
# Python 3.
basestring = str
import configparser
from io import StringIO
from urllib.parse import urljoin, urlparse
from urllib.request import pathname2url
PY3 = True
| {
"repo_name": "paylogic/pip-accel",
"path": "pip_accel/compat.py",
"copies": "2",
"size": "1136",
"license": "mit",
"hash": 183284327751627330,
"line_mean": 26.0476190476,
"line_max": 69,
"alpha_frac": 0.7112676056,
"autogenerated": false,
"ratio": 4.115942028985507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 42
} |
"""Sphinx documentation configuration for the `pip-accel` project."""
import os
import sys
# Add the pip_accel source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pip-accel'
copyright = u'2016, Peter Odding and Paylogic International'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from pip_accel import __version__ as pip_accel_version # NOQA
# The short X.Y version.
version = '.'.join(pip_accel_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = pip_accel_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# http://sphinx-doc.org/ext/autodoc.html#confval-autodoc_member_order
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = dict(
boto=('http://boto.readthedocs.org/en/latest/', None),
coloredlogs=('http://coloredlogs.readthedocs.org/en/latest/', None),
humanfriendly=('http://humanfriendly.readthedocs.org/en/latest/', None),
python=('http://docs.python.org', None),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pip-acceldoc'
| {
"repo_name": "paylogic/pip-accel",
"path": "docs/conf.py",
"copies": "2",
"size": "2702",
"license": "mit",
"hash": 6101289734364999000,
"line_mean": 30.7882352941,
"line_max": 80,
"alpha_frac": 0.7042931162,
"autogenerated": false,
"ratio": 3.7115384615384617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5415831577738461,
"avg_score": null,
"num_lines": null
} |
"""
:py:mod:`pip_accel.caches.local` - Local cache backend
======================================================
This module implements the local cache backend which stores distribution
archives on the local file system. This is a very simple cache backend, all it
does is create directories and write local files. The only trick here is that
new binary distribution archives are written to temporary files which are
then moved into place atomically using :py:func:`os.rename()` to avoid partial
reads caused by running multiple invocations of pip-accel at the same time
(which happened in `issue 25`_).
.. _issue 25: https://github.com/paylogic/pip-accel/issues/25
"""
# Standard library modules.
import logging
import os
import shutil
# Modules included in our package.
from pip_accel.caches import AbstractCacheBackend
from pip_accel.utils import makedirs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class LocalCacheBackend(AbstractCacheBackend):
"""The local cache backend stores Python distribution archives on the local file system."""
PRIORITY = 10
def get(self, filename):
"""
Check if a distribution archive exists in the local cache.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or ``None``.
"""
pathname = os.path.join(self.config.binary_cache, filename)
if os.path.isfile(pathname):
logger.debug("Distribution archive exists in local cache (%s).", pathname)
return pathname
else:
logger.debug("Distribution archive doesn't exist in local cache (%s).", pathname)
return None
def put(self, filename, handle):
"""
Store a distribution archive in the local cache.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
"""
file_in_cache = os.path.join(self.config.binary_cache, filename)
logger.debug("Storing distribution archive in local cache: %s", file_in_cache)
makedirs(os.path.dirname(file_in_cache))
# Stream the contents of the distribution archive to a temporary file
# to avoid race conditions (e.g. partial reads) between multiple
# processes that are using the local cache at the same time.
temporary_file = '%s.tmp-%i' % (file_in_cache, os.getpid())
logger.debug("Using temporary file to avoid partial reads: %s", temporary_file)
with open(temporary_file, 'wb') as temporary_file_handle:
shutil.copyfileobj(handle, temporary_file_handle)
# Atomically move the distribution archive into its final place
# (again, to avoid race conditions between multiple processes).
logger.debug("Moving temporary file into place ..")
os.rename(temporary_file, file_in_cache)
logger.debug("Finished caching distribution archive in local cache.")
| {
"repo_name": "theyoprst/pip-accel",
"path": "pip_accel/caches/local.py",
"copies": "1",
"size": "3310",
"license": "mit",
"hash": 3173175662906059000,
"line_mean": 41.4358974359,
"line_max": 95,
"alpha_frac": 0.6821752266,
"autogenerated": false,
"ratio": 4.401595744680851,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009831243896724455,
"num_lines": 78
} |
"""Command line interface for the ``pip-accel`` program."""
# Standard library modules.
import logging
import os
import sys
import textwrap
# Modules included in our package.
from pip_accel import PipAccelerator
from pip_accel.config import Config
from pip_accel.exceptions import NothingToDoError
from pip_accel.utils import match_option
# External dependencies.
import coloredlogs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def main():
"""The command line interface for the ``pip-accel`` program."""
arguments = sys.argv[1:]
# If no arguments are given, the help text of pip-accel is printed.
if not arguments:
usage()
sys.exit(0)
# If no install subcommand is given we pass the command line straight
# to pip without any changes and exit immediately afterwards.
if 'install' not in arguments:
# This will not return.
os.execvp('pip', ['pip'] + arguments)
else:
arguments = [arg for arg in arguments if arg != 'install']
# Initialize logging output.
coloredlogs.install()
# Adjust verbosity based on -v, -q, --verbose, --quiet options.
for argument in list(arguments):
if match_option(argument, '-v', '--verbose'):
coloredlogs.increase_verbosity()
elif match_option(argument, '-q', '--quiet'):
coloredlogs.decrease_verbosity()
# Perform the requested action(s).
try:
accelerator = PipAccelerator(Config())
accelerator.install_from_arguments(arguments)
except NothingToDoError as e:
# Don't print a traceback for this (it's not very user friendly) and
# exit with status zero to stay compatible with pip. For more details
# please refer to https://github.com/paylogic/pip-accel/issues/47.
logger.warning("%s", e)
sys.exit(0)
except Exception:
logger.exception("Caught unhandled exception!")
sys.exit(1)
def usage():
"""Print a usage message to the terminal."""
print(textwrap.dedent("""
Usage: pip-accel [PIP_ARGS]
The pip-accel program is a wrapper for pip, the Python package manager. It
accelerates the usage of pip to initialize Python virtual environments given
one or more requirements files. The pip-accel command supports all subcommands
and options supported by pip, however the only added value is in the "pip
install" subcommand.
For more information please refer to the GitHub project page
at https://github.com/paylogic/pip-accel
""").strip())
| {
"repo_name": "matysek/pip-accel",
"path": "pip_accel/cli.py",
"copies": "1",
"size": "2775",
"license": "mit",
"hash": 2146154739715034600,
"line_mean": 34.5769230769,
"line_max": 86,
"alpha_frac": 0.6792792793,
"autogenerated": false,
"ratio": 4.092920353982301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006090035061310257,
"num_lines": 78
} |
"""
Configuration handling for `pip-accel`.
This module defines the :class:`Config` class which is used throughout the
pip accelerator. At runtime an instance of :class:`Config` is created and
passed down like this:
.. digraph:: config_dependency_injection
node [fontsize=10, shape=rect]
PipAccelerator -> BinaryDistributionManager
BinaryDistributionManager -> CacheManager
CacheManager -> LocalCacheBackend
CacheManager -> S3CacheBackend
BinaryDistributionManager -> SystemPackageManager
The :class:`.PipAccelerator` class receives its configuration object from
its caller. Usually this will be :func:`.main()` but when pip-accel is used
as a Python API the person embedding or extending pip-accel is responsible for
providing the configuration object. This is intended as a form of `dependency
injection`_ that enables non-default configurations to be injected into
pip-accel.
Support for runtime configuration
---------------------------------
The properties of the :class:`Config` class can be set at runtime using
regular attribute assignment syntax. This overrides the default values of the
properties (whether based on environment variables, configuration files or hard
coded defaults).
Support for configuration files
-------------------------------
You can use a configuration file to permanently configure certain options of
pip-accel. If ``/etc/pip-accel.conf`` and/or ``~/.pip-accel/pip-accel.conf``
exist they are automatically loaded. You can also set the environment variable
``$PIP_ACCEL_CONFIG`` to load a configuration file in a non-default location.
If all three files exist the system wide file is loaded first, then the user
specific file is loaded and then the file set by the environment variable is
loaded (duplicate settings are overridden by the configuration file that's
loaded last).
Here is an example of the available options:
.. code-block:: ini
[pip-accel]
auto-install = yes
max-retries = 3
data-directory = ~/.pip-accel
s3-bucket = my-shared-pip-accel-binary-cache
s3-prefix = ubuntu-trusty-amd64
s3-readonly = yes
Note that the configuration options shown above are just examples, they are not
meant to represent the configuration defaults.
----
.. _dependency injection: http://en.wikipedia.org/wiki/Dependency_injection
"""
# Standard library modules.
import logging
import os
import os.path
import sys
# Modules included in our package.
from pip_accel.compat import configparser
from pip_accel.utils import is_root, expand_path
# External dependencies.
from cached_property import cached_property
from humanfriendly import coerce_boolean, parse_path
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
# The locations of the user specific and system wide configuration files.
LOCAL_CONFIG = '~/.pip-accel/pip-accel.conf'
GLOBAL_CONFIG = '/etc/pip-accel.conf'
class Config(object):
"""Configuration of the pip accelerator."""
def __init__(self, load_configuration_files=True, load_environment_variables=True):
"""
Initialize the configuration of the pip accelerator.
:param load_configuration_files: If this is :data:`True` (the default) then
configuration files in known locations
are automatically loaded.
:param load_environment_variables: If this is :data:`True` (the default) then
environment variables are used to
initialize the configuration.
"""
self.overrides = {}
self.configuration = {}
self.environment = os.environ if load_environment_variables else {}
if load_configuration_files:
for filename in self.available_configuration_files:
self.load_configuration_file(filename)
@cached_property
def available_configuration_files(self):
"""A list of strings with the absolute pathnames of the available configuration files."""
known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get('PIP_ACCEL_CONFIG')]
absolute_paths = [parse_path(pathname) for pathname in known_files if pathname]
return [pathname for pathname in absolute_paths if os.path.isfile(pathname)]
def load_configuration_file(self, configuration_file):
"""
Load configuration defaults from a configuration file.
:param configuration_file: The pathname of a configuration file (a
string).
:raises: :exc:`Exception` when the configuration file cannot be
loaded.
"""
configuration_file = parse_path(configuration_file)
logger.debug("Loading configuration file: %s", configuration_file)
parser = configparser.RawConfigParser()
files_loaded = parser.read(configuration_file)
if len(files_loaded) != 1:
msg = "Failed to load configuration file! (%s)"
raise Exception(msg % configuration_file)
elif not parser.has_section('pip-accel'):
msg = "Missing 'pip-accel' section in configuration file! (%s)"
raise Exception(msg % configuration_file)
else:
self.configuration.update(parser.items('pip-accel'))
def __setattr__(self, name, value):
"""
Override the value of a property at runtime.
:param name: The name of the property to override (a string).
:param value: The overridden value of the property.
"""
attribute = getattr(self, name, None)
if isinstance(attribute, (property, cached_property)):
self.overrides[name] = value
else:
self.__dict__[name] = value
def get(self, property_name=None, environment_variable=None, configuration_option=None, default=None):
"""
Internal shortcut to get a configuration option's value.
:param property_name: The name of the property that users can set on
the :class:`Config` class (a string).
:param environment_variable: The name of the environment variable (a
string).
:param configuration_option: The name of the option in the
configuration file (a string).
:param default: The default value.
:returns: The value of the environment variable or configuration file
option or the default value.
"""
if self.overrides.get(property_name) is not None:
return self.overrides[property_name]
elif environment_variable and self.environment.get(environment_variable):
return self.environment[environment_variable]
elif self.configuration.get(configuration_option) is not None:
return self.configuration[configuration_option]
else:
return default
@cached_property
def cache_format_revision(self):
"""
The revision of the binary distribution cache format in use (an integer).
This number is encoded in the directory name of the binary cache so
that multiple revisions can peacefully coexist. When pip-accel breaks
backwards compatibility this number is bumped so that pip-accel starts
using a new directory.
"""
return 7
@cached_property
def source_index(self):
"""
The absolute pathname of pip-accel's source index directory (a string).
This is the ``sources`` subdirectory of :data:`data_directory`.
"""
return self.get(property_name='source_index',
default=os.path.join(self.data_directory, 'sources'))
@cached_property
def binary_cache(self):
"""
The absolute pathname of pip-accel's binary cache directory (a string).
This is the ``binaries`` subdirectory of :data:`data_directory`.
"""
return self.get(property_name='binary_cache',
default=os.path.join(self.data_directory, 'binaries'))
@cached_property
def data_directory(self):
"""
The absolute pathname of the directory where pip-accel's data files are stored (a string).
- Environment variable: ``$PIP_ACCEL_CACHE``
- Configuration option: ``data-directory``
- Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
"""
return expand_path(self.get(property_name='data_directory',
environment_variable='PIP_ACCEL_CACHE',
configuration_option='data-directory',
default='/var/cache/pip-accel' if is_root() else '~/.pip-accel'))
@cached_property
def on_debian(self):
""":data:`True` if running on a Debian derived system, :data:`False` otherwise."""
return self.get(property_name='on_debian',
default=os.path.exists('/etc/debian_version'))
@cached_property
def install_prefix(self):
"""
The absolute pathname of the installation prefix to use (a string).
This property is based on :data:`sys.prefix` except that when
:data:`sys.prefix` is ``/usr`` and we're running on a Debian derived
system ``/usr/local`` is used instead.
The reason for this is that on Debian derived systems only apt (dpkg)
should be allowed to touch files in ``/usr/lib/pythonX.Y/dist-packages``
and ``python setup.py install`` knows this (see the ``posix_local``
installation scheme in ``/usr/lib/pythonX.Y/sysconfig.py`` on Debian
derived systems). Because pip-accel replaces ``python setup.py
install`` it has to replicate this logic. Inferring all of this from
the :mod:`sysconfig` module would be nice but that module wasn't
available in Python 2.6.
"""
return self.get(property_name='install_prefix',
default='/usr/local' if sys.prefix == '/usr' and self.on_debian else sys.prefix)
@cached_property
def python_executable(self):
"""The absolute pathname of the Python executable (a string)."""
return self.get(property_name='python_executable',
default=sys.executable or os.path.join(self.install_prefix, 'bin', 'python'))
@cached_property
def auto_install(self):
"""
Whether automatic installation of missing system packages is enabled.
:data:`True` if automatic installation of missing system packages is
enabled, :data:`False` if it is disabled, :data:`None` otherwise (in this case
the user will be prompted at the appropriate time).
- Environment variable: ``$PIP_ACCEL_AUTO_INSTALL`` (refer to
:func:`~humanfriendly.coerce_boolean()` for details on how the
value of the environment variable is interpreted)
- Configuration option: ``auto-install`` (also parsed using
:func:`~humanfriendly.coerce_boolean()`)
- Default: :data:`None`
"""
value = self.get(property_name='auto_install',
environment_variable='PIP_ACCEL_AUTO_INSTALL',
configuration_option='auto-install')
if value is not None:
return coerce_boolean(value)
@cached_property
def max_retries(self):
"""
The number of times to retry ``pip install --download`` if it fails.
- Environment variable: ``$PIP_ACCEL_MAX_RETRIES``
- Configuration option: ``max-retries``
- Default: ``3``
"""
value = self.get(property_name='max_retries',
environment_variable='PIP_ACCEL_MAX_RETRIES',
configuration_option='max-retries')
try:
n = int(value)
if n >= 0:
return n
except:
return 3
@cached_property
def s3_cache_url(self):
"""
The URL of the Amazon S3 API endpoint to use.
By default this points to the official Amazon S3 API endpoint. You can
change this option if you're running a local Amazon S3 compatible
storage service that you want pip-accel to use.
- Environment variable: ``$PIP_ACCEL_S3_URL``
- Configuration option: ``s3-url``
- Default: ``https://s3.amazonaws.com``
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return self.get(property_name='s3_cache_url',
environment_variable='PIP_ACCEL_S3_URL',
configuration_option='s3-url',
default='https://s3.amazonaws.com')
@cached_property
def s3_cache_bucket(self):
"""
Name of Amazon S3 bucket where binary distributions are cached (a string or :data:`None`).
- Environment variable: ``$PIP_ACCEL_S3_BUCKET``
- Configuration option: ``s3-bucket``
- Default: :data:`None`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return self.get(property_name='s3_cache_bucket',
environment_variable='PIP_ACCEL_S3_BUCKET',
configuration_option='s3-bucket')
@cached_property
def s3_cache_create_bucket(self):
"""
Whether to automatically create the Amazon S3 bucket when it's missing.
- Environment variable: ``$PIP_ACCEL_S3_CREATE_BUCKET``
- Configuration option: ``s3-create-bucket``
- Default: :data:`False`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return coerce_boolean(self.get(property_name='s3_cache_create_bucket',
environment_variable='PIP_ACCEL_S3_CREATE_BUCKET',
configuration_option='s3-create-bucket',
default=False))
@cached_property
def s3_cache_prefix(self):
"""
Cache prefix for binary distribution archives in Amazon S3 bucket (a string or :data:`None`).
- Environment variable: ``$PIP_ACCEL_S3_PREFIX``
- Configuration option: ``s3-prefix``
- Default: :data:`None`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return self.get(property_name='s3_cache_prefix',
environment_variable='PIP_ACCEL_S3_PREFIX',
configuration_option='s3-prefix')
@cached_property
def s3_cache_readonly(self):
"""
Whether the Amazon S3 bucket is considered read only.
If this is :data:`True` then the Amazon S3 bucket will only be used for
:class:`~pip_accel.caches.s3.S3CacheBackend.get()` operations (all
:class:`~pip_accel.caches.s3.S3CacheBackend.put()` operations will
be disabled).
- Environment variable: ``$PIP_ACCEL_S3_READONLY`` (refer to
:func:`~humanfriendly.coerce_boolean()` for details on how the
value of the environment variable is interpreted)
- Configuration option: ``s3-readonly`` (also parsed using
:func:`~humanfriendly.coerce_boolean()`)
- Default: :data:`False`
For details please refer to the :mod:`pip_accel.caches.s3` module.
"""
return coerce_boolean(self.get(property_name='s3_cache_readonly',
environment_variable='PIP_ACCEL_S3_READONLY',
configuration_option='s3-readonly',
default=False))
@cached_property
def s3_cache_timeout(self):
"""
The socket timeout in seconds for connections to Amazon S3 (an integer).
This value is injected into Boto's configuration to override the
default socket timeout used for connections to Amazon S3.
- Environment variable: ``$PIP_ACCEL_S3_TIMEOUT``
- Configuration option: ``s3-timeout``
- Default: ``60`` (`Boto's default`_)
.. _Boto's default: http://boto.readthedocs.org/en/latest/boto_config_tut.html
"""
value = self.get(property_name='s3_cache_timeout',
environment_variable='PIP_ACCEL_S3_TIMEOUT',
configuration_option='s3-timeout')
try:
n = int(value)
if n >= 0:
return n
except:
return 60
@cached_property
def s3_cache_retries(self):
"""
The number of times to retry failed requests to Amazon S3 (an integer).
This value is injected into Boto's configuration to override the
default number of times to retry failed requests to Amazon S3.
- Environment variable: ``$PIP_ACCEL_S3_RETRIES``
- Configuration option: ``s3-retries``
- Default: ``5`` (`Boto's default`_)
"""
value = self.get(property_name='s3_cache_retries',
environment_variable='PIP_ACCEL_S3_RETRIES',
configuration_option='s3-retries')
try:
n = int(value)
if n >= 0:
return n
except:
return 5
| {
"repo_name": "matysek/pip-accel",
"path": "pip_accel/config.py",
"copies": "1",
"size": "17658",
"license": "mit",
"hash": -7091750949034735000,
"line_mean": 39.6866359447,
"line_max": 106,
"alpha_frac": 0.6184165817,
"autogenerated": false,
"ratio": 4.430005017561465,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001039888634254761,
"num_lines": 434
} |
"""
Simple wrapper for pip and pkg_resources `Requirement` objects.
After downloading the specified requirement(s) pip reports a "requirement set"
to pip-accel. In the past pip-accel would summarize this requirement set into a
list of tuples, where each tuple would contain a requirement's project name,
version and source directory (basically only the information required by
pip-accel remained).
Recently I've started using pip-accel as a library in another project I'm
working on (not yet public) and in that project I am very interested in whether
a given requirement is a direct or transitive requirement. Unfortunately
pip-accel did not preserve this information.
That's when I decided that next to pip's :class:`pip.req.InstallRequirement`
and setuptools' :class:`pkg_resources.Requirement` I would introduce yet
another type of requirement object... It's basically just a summary of the
other two types of requirement objects and it also provides access to the
original requirement objects (for those who are interested; the interfaces are
basically undocumented AFAIK).
"""
# Standard library modules.
import glob
import os
import re
import time
# Modules included in our package.
from pip_accel.exceptions import UnknownDistributionFormat
# External dependencies.
from cached_property import cached_property
from pip._vendor.distlib.util import ARCHIVE_EXTENSIONS
from pip._vendor.pkg_resources import find_distributions
from pip.req import InstallRequirement
class Requirement(object):
"""Simple wrapper for the requirement objects defined by pip and setuptools."""
def __init__(self, config, requirement):
"""
Initialize a requirement object.
:param config: A :class:`~pip_accel.config.Config` object.
:param requirement: A :class:`pip.req.InstallRequirement` object.
"""
self.config = config
self.pip_requirement = requirement
self.setuptools_requirement = requirement.req
def __repr__(self):
"""Generate a human friendly representation of a requirement object."""
return "Requirement(name=%r, version=%r)" % (self.name, self.version)
@cached_property
def name(self):
"""
The name of the Python package (a string).
This is the name used to register a package on PyPI and the name
reported by commands like ``pip freeze``. Based on
:attr:`pkg_resources.Requirement.project_name`.
"""
return self.setuptools_requirement.project_name
@cached_property
def version(self):
"""The version of the package that ``pip`` wants to install (a string)."""
if self.is_wheel:
return self.wheel_metadata.version
else:
return self.sdist_metadata['Version']
@cached_property
def related_archives(self):
"""
Try to find the source distribution archive(s) for this requirement.
Returns a list of pathnames (strings).
This property is very new in pip-accel and its logic may need some time
to mature. For now any misbehavior by this property shouldn't be too
much of a problem because the pathnames reported by this property are
only used for cache invalidation (see :attr:`last_modified`).
"""
# Escape the requirement's name for use in a regular expression.
name_pattern = escape_name(self.name)
# Escape the requirement's version for in a regular expression.
version_pattern = re.escape(self.version)
# Create a regular expression that matches any of the known source
# distribution archive extensions.
extension_pattern = '|'.join(re.escape(ext) for ext in ARCHIVE_EXTENSIONS if ext != '.whl')
# Compose the regular expression pattern to match filenames of source
# distribution archives in the local source index directory.
pattern = '^%s-%s(%s)$' % (name_pattern, version_pattern, extension_pattern)
# Compile the regular expression for case insensitive matching.
compiled_pattern = re.compile(pattern, re.IGNORECASE)
# Find the matching source distribution archives.
return [os.path.join(self.config.source_index, fn)
for fn in os.listdir(self.config.source_index)
if compiled_pattern.match(fn)]
@cached_property
def last_modified(self):
"""
Try to find the last modified time of the requirement's source distribution archive(s).
Returns a number.
Based on :attr:`related_archives`. If no related archives are found
the current time is reported. In the balance between not invalidating
cached binary distributions enough and invalidating them too
frequently, this property causes the latter to happen.
"""
mtimes = list(map(os.path.getmtime, self.related_archives))
return max(mtimes) if mtimes else time.time()
@cached_property
def source_directory(self):
"""
The pathname of the directory containing the unpacked source distribution (a string).
This is the directory that contains a ``setup.py`` script. Based on
:attr:`pip.req.InstallRequirement.source_dir`.
"""
return self.pip_requirement.source_dir
@cached_property
def is_wheel(self):
"""
:data:`True` when the requirement is a wheel, :data:`False` otherwise.
.. note:: To my surprise it seems to be non-trivial to determine
whether a given :class:`pip.req.InstallRequirement` object
produced by pip's internal Python API concerns a source
distribution or a wheel distribution.
There's a :class:`pip.req.InstallRequirement.is_wheel`
property but I'm currently looking at a wheel distribution
whose ``is_wheel`` property returns :data:`None`, apparently
because the requirement's ``url`` property is also :data:`None`.
Whether this is an obscure implementation detail of pip or
caused by the way pip-accel invokes pip, I really can't tell
(yet).
"""
probably_sdist = os.path.isfile(os.path.join(self.source_directory, 'setup.py'))
probably_wheel = len(glob.glob(os.path.join(self.source_directory, '*.dist-info', 'WHEEL'))) > 0
if probably_wheel and not probably_sdist:
return True
elif probably_sdist and not probably_wheel:
return False
elif probably_sdist and probably_wheel:
variables = dict(requirement=self.setuptools_requirement,
directory=self.source_directory)
raise UnknownDistributionFormat("""
The unpacked distribution of {requirement} in {directory} looks
like a source distribution and a wheel distribution, I'm
confused!
""", **variables)
else:
variables = dict(requirement=self.setuptools_requirement,
directory=self.source_directory)
raise UnknownDistributionFormat("""
The unpacked distribution of {requirement} in {directory}
doesn't look like a source distribution and also doesn't look
like a wheel distribution, I'm confused!
""", **variables)
@cached_property
def is_transitive(self):
"""
Whether the dependency is transitive (indirect).
:data:`True` when the requirement is a transitive dependency (a
dependency of a dependency) or :data:`False` when the requirement is a
direct dependency (specified on pip's command line or in a
``requirements.txt`` file). Based on
:attr:`pip.req.InstallRequirement.comes_from`.
"""
return isinstance(self.pip_requirement.comes_from, InstallRequirement)
@cached_property
def is_direct(self):
"""The opposite of :attr:`Requirement.is_transitive`."""
return not self.is_transitive
@cached_property
def is_editable(self):
"""
Whether the requirement should be installed in editable mode.
:data:`True` when the requirement is to be installed in editable mode
(i.e. setuptools "develop mode"). Based on
:attr:`pip.req.InstallRequirement.editable`.
"""
return self.pip_requirement.editable
@cached_property
def sdist_metadata(self):
"""Get the distribution metadata of an unpacked source distribution."""
if self.is_wheel:
raise TypeError("Requirement is not a source distribution!")
return self.pip_requirement.pkg_info()
@cached_property
def wheel_metadata(self):
"""Get the distribution metadata of an unpacked wheel distribution."""
if not self.is_wheel:
raise TypeError("Requirement is not a wheel distribution!")
for distribution in find_distributions(self.source_directory):
return distribution
msg = "pkg_resources didn't find a wheel distribution in %s!"
raise Exception(msg % self.source_directory)
def __str__(self):
"""Render a human friendly string describing the requirement."""
return "%s (%s)" % (self.name, self.version)
def escape_name(requirement_name):
"""
Escape a requirement's name for use in a regular expression.
This backslash-escapes all non-alphanumeric characters and replaces dashes
and underscores with a character class that matches a dash or underscore
(effectively treating dashes and underscores equivalently).
:param requirement_name: The name of the requirement (a string).
:returns: The requirement's name as a regular expression (a string).
"""
return re.sub('[^A-Za-z0-9]', escape_name_callback, requirement_name)
def escape_name_callback(match):
"""
Used by :func:`escape_name()` to treat dashes and underscores as equivalent.
:param match: A regular expression match object that captured a single character.
:returns: A regular expression string that matches the captured character.
"""
character = match.group(0)
return '[-_]' if character in ('-', '_') else r'\%s' % character
| {
"repo_name": "matysek/pip-accel",
"path": "pip_accel/req.py",
"copies": "1",
"size": "10512",
"license": "mit",
"hash": -5130399135741146000,
"line_mean": 40.7142857143,
"line_max": 104,
"alpha_frac": 0.6678082192,
"autogenerated": false,
"ratio": 4.618629173989455,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5786437393189455,
"avg_score": null,
"num_lines": null
} |
"""
Support for multiple cache backends.
This module defines an abstract base class (:class:`AbstractCacheBackend`)
to be inherited by custom cache backends in order to easily integrate them in
pip-accel. The cache backends included in pip-accel are built on top of the
same mechanism.
Additionally this module defines :class:`CacheManager` which makes it
possible to merge the available cache backends into a single logical cache
which automatically disables backends that report errors.
"""
# Standard library modules.
import logging
# Modules included in our package.
from pip_accel.compat import WINDOWS
from pip_accel.exceptions import CacheBackendDisabledError
from pip_accel.utils import get_python_version
# External dependencies.
from humanfriendly import concatenate, pluralize
from pkg_resources import iter_entry_points
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
# Initialize the registry of cache backends.
registered_backends = set()
# On Windows it is not allowed to have colons in filenames so we use a dollar sign instead.
FILENAME_PATTERN = 'v%i\\%s$%s$%s.tar.gz' if WINDOWS else 'v%i/%s:%s:%s.tar.gz'
class CacheBackendMeta(type):
"""Metaclass to intercept cache backend definitions."""
def __init__(cls, name, bases, dict):
"""Intercept cache backend definitions."""
type.__init__(cls, name, bases, dict)
registered_backends.add(cls)
class AbstractCacheBackend(object):
"""
Abstract base class for implementations of pip-accel cache backends.
Subclasses of this class are used by pip-accel to store Python distribution
archives in order to accelerate performance and gain independence of
external systems like PyPI and distribution sites.
.. note:: This base class automatically registers subclasses at definition
time, providing a simple and elegant registration mechanism for
custom backends. This technique uses metaclasses and was
originally based on the article `Using Metaclasses to Create
Self-Registering Plugins
<http://effbot.org/zone/metaclass-plugins.htm>`_.
I've since had to introduce some additional magic to make this
mechanism compatible with both Python 2.x and Python 3.x because
the syntax for metaclasses is very much incompatible and I refuse
to write separate implementations for both :-).
"""
PRIORITY = 0
def __init__(self, config):
"""
Initialize a cache backend.
:param config: The pip-accel configuration (a :class:`.Config`
object).
"""
self.config = config
def get(self, filename):
"""
Get a previously cached distribution archive from the cache.
:param filename: The expected filename of the distribution archive (a
string).
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive hasn't been cached.
This method is called by `pip-accel` before fetching or building a
distribution archive, in order to check whether a previously cached
distribution archive is available for re-use.
"""
raise NotImplementedError()
def put(self, filename, handle):
"""
Store a newly built distribution archive in the cache.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
This method is called by `pip-accel` after fetching or building a
distribution archive, in order to cache the distribution archive.
"""
raise NotImplementedError()
def __repr__(self):
"""Generate a textual representation of the cache backend."""
return self.__class__.__name__
# Obscure syntax gymnastics to define a class with a metaclass whose
# definition is compatible with Python 2.x as well as Python 3.x.
# See also: https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef#metaclasses
AbstractCacheBackend = CacheBackendMeta('AbstractCacheBackend',
AbstractCacheBackend.__bases__,
dict(AbstractCacheBackend.__dict__))
class CacheManager(object):
"""
Interface to treat multiple cache backends as a single one.
The cache manager automatically disables cache backends that raise
exceptions on ``get()`` and ``put()`` operations.
"""
def __init__(self, config):
"""
Initialize a cache manager.
Automatically initializes instances of all registered cache backends
based on setuptools' support for entry points which makes it possible
for external Python packages to register additional cache backends
without any modifications to pip-accel.
:param config: The pip-accel configuration (a :class:`.Config`
object).
"""
self.config = config
for entry_point in iter_entry_points('pip_accel.cache_backends'):
logger.debug("Importing cache backend: %s", entry_point.module_name)
__import__(entry_point.module_name)
# Initialize instances of all registered cache backends (sorted by
# priority so that e.g. the local file system is checked before S3).
self.backends = sorted((b(self.config) for b in registered_backends if b != AbstractCacheBackend),
key=lambda b: b.PRIORITY)
logger.debug("Initialized %s: %s",
pluralize(len(self.backends), "cache backend"),
concatenate(map(repr, self.backends)))
def get(self, requirement):
"""
Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
try:
pathname = backend.get(filename)
if pathname is not None:
return pathname
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend)
def put(self, requirement, handle):
"""
Store a distribution archive in all of the available caches.
:param requirement: A :class:`.Requirement` object.
:param handle: A file-like object that provides access to the
distribution archive.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
handle.seek(0)
try:
backend.put(filename, handle)
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend)
def generate_filename(self, requirement):
"""
Generate a distribution archive filename for a package.
:param requirement: A :class:`.Requirement` object.
:returns: The filename of the distribution archive (a string)
including a single leading directory component to indicate
the cache format revision.
"""
return FILENAME_PATTERN % (self.config.cache_format_revision,
requirement.name, requirement.version,
get_python_version())
| {
"repo_name": "paylogic/pip-accel",
"path": "pip_accel/caches/__init__.py",
"copies": "2",
"size": "8411",
"license": "mit",
"hash": -1321300969949617400,
"line_mean": 38.8625592417,
"line_max": 106,
"alpha_frac": 0.6465343003,
"autogenerated": false,
"ratio": 4.867476851851852,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005410716571109985,
"num_lines": 211
} |
"""Sphinx documentation configuration for the `pip-accel` project."""
import os
import sys
import types
# Add the pip_accel source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
]
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pip-accel'
copyright = u'2015, Peter Odding and Paylogic International'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from pip_accel import __version__ as pip_accel_version # NOQA
# The short X.Y version.
version = '.'.join(pip_accel_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = pip_accel_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# http://sphinx-doc.org/ext/autodoc.html#confval-autodoc_member_order
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = {
'python': ('http://docs.python.org', None),
'humanfriendly': ('http://humanfriendly.readthedocs.org/en/latest/', None),
'boto': ('http://boto.readthedocs.org/en/latest/', None),
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pip-acceldoc'
def setup(app):
"""Sphinx customizations applied through the API."""
app.connect('autodoc-skip-member', custom_skip_member)
def custom_skip_member(app, what, name, obj, skip, options):
"""Inspired by http://stackoverflow.com/a/5599712/788200."""
if skip and obj.__doc__:
# If Sphinx would skip this object but it concerns a function or method
# that does have documentation we tell Sphinx to reconsider. This
# enables documentation of e.g. __init__(), __str__(), __unicode__(),
# __enter__(), __exit__(), etc. The isinstance() check makes sure we
# don't include things like __doc__, __module__ and __weakref__ in the
# documentation.
return not isinstance(obj, (types.FunctionType, types.MethodType))
else:
return skip
| {
"repo_name": "matysek/pip-accel",
"path": "docs/conf.py",
"copies": "1",
"size": "3432",
"license": "mit",
"hash": 5277408366384524000,
"line_mean": 32.3203883495,
"line_max": 80,
"alpha_frac": 0.6861888112,
"autogenerated": false,
"ratio": 3.7880794701986753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4974268281398675,
"avg_score": null,
"num_lines": null
} |
"""
Top level functionality of `pip-accel`.
The Python module :mod:`pip_accel` defines the classes that implement the
top level functionality of the pip accelerator. Instead of using the
``pip-accel`` command you can also use the pip accelerator as a Python module,
in this case you'll probably want to start by taking a look at
the :class:`PipAccelerator` class.
Wheel support
-------------
During the upgrade to pip 6 support for installation of wheels_ was added to
pip-accel. The ``pip-accel`` command line program now downloads and installs
wheels when available for a given requirement, but part of pip-accel's Python
API defaults to the more conservative choice of allowing callers to opt-in to
wheel support.
This is because previous versions of pip-accel would only download source
distributions and pip-accel provides the functionality to convert those source
distributions to "dumb binary distributions". This functionality is exposed to
callers who may depend on this mode of operation. So for now users of the
Python API get to decide whether they're interested in wheels or not.
Setuptools upgrade
~~~~~~~~~~~~~~~~~~
If the requirement set includes wheels and ``setuptools >= 0.8`` is not yet
installed, it will be added to the requirement set and installed together with
the other requirement(s) in order to enable the usage of distributions
installed from wheels (their metadata is different).
.. _wheels: https://pypi.python.org/pypi/wheel
"""
# Standard library modules.
import logging
import os
import os.path
import shutil
import sys
import tempfile
# Modules included in our package.
from pip_accel.bdist import BinaryDistributionManager
from pip_accel.exceptions import EnvironmentMismatchError, NothingToDoError
from pip_accel.req import Requirement
from pip_accel.utils import (
is_installed,
makedirs,
match_option,
match_option_with_value,
same_directories,
uninstall,
)
# External dependencies.
from humanfriendly import concatenate, Timer, pluralize
from pip import index as pip_index_module
from pip import wheel as pip_wheel_module
from pip._vendor import pkg_resources
from pip.commands import install as pip_install_module
from pip.commands.install import InstallCommand
from pip.exceptions import DistributionNotFound
# Semi-standard module versioning.
__version__ = '0.34'
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class PipAccelerator(object):
"""
Accelerator for pip, the Python package manager.
The :class:`PipAccelerator` class brings together the top level logic of
pip-accel. This top level logic was previously just a collection of
functions but that became more unwieldy as the amount of internal state
increased. The :class:`PipAccelerator` class is intended to make it
(relatively) easy to build something on top of pip and pip-accel.
"""
def __init__(self, config, validate=True):
"""
Initialize the pip accelerator.
:param config: The pip-accel configuration (a :class:`.Config`
object).
:param validate: :data:`True` to run :func:`validate_environment()`,
:data:`False` otherwise.
"""
self.config = config
self.bdists = BinaryDistributionManager(self.config)
if validate:
self.validate_environment()
self.initialize_directories()
self.clean_source_index()
# Keep a list of build directories created by pip-accel.
self.build_directories = []
# We hold on to returned Requirement objects so we can remove their
# temporary sources after pip-accel has finished.
self.reported_requirements = []
def validate_environment(self):
"""
Make sure :data:`sys.prefix` matches ``$VIRTUAL_ENV`` (if defined).
This may seem like a strange requirement to dictate but it avoids hairy
issues like `documented here <https://github.com/paylogic/pip-accel/issues/5>`_.
The most sneaky thing is that ``pip`` doesn't have this problem
(de-facto) because ``virtualenv`` copies ``pip`` wherever it goes...
(``pip-accel`` on the other hand has to be installed by the user).
"""
environment = os.environ.get('VIRTUAL_ENV')
if environment:
if not same_directories(sys.prefix, environment):
raise EnvironmentMismatchError("""
You are trying to install packages in environment #1 which
is different from environment #2 where pip-accel is
installed! Please install pip-accel under environment #1 to
install packages there.
Environment #1: {environment} (defined by $VIRTUAL_ENV)
Environment #2: {prefix} (Python's installation prefix)
""", environment=environment, prefix=sys.prefix)
def initialize_directories(self):
"""Automatically create the local source distribution index directory."""
makedirs(self.config.source_index)
def clean_source_index(self):
"""
Cleanup broken symbolic links in the local source distribution index.
The purpose of this method requires some context to understand. Let me
preface this by stating that I realize I'm probably overcomplicating
things, but I like to preserve forward / backward compatibility when
possible and I don't feel like dropping everyone's locally cached
source distribution archives without a good reason to do so. With that
out of the way:
- Versions of pip-accel based on pip 1.4.x maintained a local source
distribution index based on a directory containing symbolic links
pointing directly into pip's download cache. When files were removed
from pip's download cache, broken symbolic links remained in
pip-accel's local source distribution index directory. This resulted
in very confusing error messages. To avoid this
:func:`clean_source_index()` cleaned up broken symbolic links
whenever pip-accel was about to invoke pip.
- More recent versions of pip (6.x) no longer support the same style of
download cache that contains source distribution archives that can be
re-used directly by pip-accel. To cope with the changes in pip 6.x
new versions of pip-accel tell pip to download source distribution
archives directly into the local source distribution index directory
maintained by pip-accel.
- It is very reasonable for users of pip-accel to have multiple
versions of pip-accel installed on their system (imagine a dozen
Python virtual environments that won't all be updated at the same
time; this is the situation I always find myself in :-). These
versions of pip-accel will be sharing the same local source
distribution index directory.
- All of this leads up to the local source distribution index directory
containing a mixture of symbolic links and regular files with no
obvious way to atomically and gracefully upgrade the local source
distribution index directory while avoiding fights between old and
new versions of pip-accel :-).
- I could of course switch to storing the new local source distribution
index in a differently named directory (avoiding potential conflicts
between multiple versions of pip-accel) but then I would have to
introduce a new configuration option, otherwise everyone who has
configured pip-accel to store its source index in a non-default
location could still be bitten by compatibility issues.
For now I've decided to keep using the same directory for the local
source distribution index and to keep cleaning up broken symbolic
links. This enables cooperating between old and new versions of
pip-accel and avoids trashing user's local source distribution indexes.
The main disadvantage is that pip-accel is still required to clean up
broken symbolic links...
"""
cleanup_timer = Timer()
cleanup_counter = 0
for entry in os.listdir(self.config.source_index):
pathname = os.path.join(self.config.source_index, entry)
if os.path.islink(pathname) and not os.path.exists(pathname):
logger.warn("Cleaning up broken symbolic link: %s", pathname)
os.unlink(pathname)
cleanup_counter += 1
logger.debug("Cleaned up %i broken symbolic links from source index in %s.", cleanup_counter, cleanup_timer)
def install_from_arguments(self, arguments, **kw):
"""
Download, unpack, build and install the specified requirements.
This function is a simple wrapper for :func:`get_requirements()`,
:func:`install_requirements()` and :func:`cleanup_temporary_directories()`
that implements the default behavior of the pip accelerator. If you're
extending or embedding pip-accel you may want to call the underlying
methods instead.
If the requirement set includes wheels and ``setuptools >= 0.8`` is not
yet installed, it will be added to the requirement set and installed
together with the other requirement(s) in order to enable the usage of
distributions installed from wheels (their metadata is different).
:param arguments: The command line arguments to ``pip install ..`` (a
list of strings).
:param kw: Any keyword arguments are passed on to
:func:`install_requirements()`.
:returns: The result of :func:`install_requirements()`.
"""
try:
requirements = self.get_requirements(arguments, use_wheels=self.arguments_allow_wheels(arguments))
have_wheels = any(req.is_wheel for req in requirements)
if have_wheels and not self.setuptools_supports_wheels():
logger.info("Preparing to upgrade to setuptools >= 0.8 to enable wheel support ..")
requirements.extend(self.get_requirements(['setuptools >= 0.8']))
if requirements:
return self.install_requirements(requirements, **kw)
else:
logger.info("Nothing to do! (requirements already installed)")
return 0
finally:
self.cleanup_temporary_directories()
def setuptools_supports_wheels(self):
"""
Check whether setuptools should be upgraded to ``>= 0.8`` for wheel support.
:returns: :data:`True` when setuptools needs to be upgraded, :data:`False` otherwise.
"""
# Don't use pkg_resources.Requirement.parse, to avoid the override
# in distribute, that converts `setuptools' to `distribute'.
setuptools_requirement = next(pkg_resources.parse_requirements('setuptools >= 0.8'))
try:
installed_setuptools = pkg_resources.get_distribution('setuptools')
if installed_setuptools in setuptools_requirement:
# setuptools >= 0.8 is already installed; nothing to do.
return True
except pkg_resources.DistributionNotFound:
pass
# We need to install setuptools >= 0.8.
return False
def get_requirements(self, arguments, max_retries=None, use_wheels=False):
"""
Use pip to download and unpack the requested source distribution archives.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param max_retries: The maximum number of times that pip will be asked
to download distribution archives (this helps to
deal with intermittent failures). If this is
:data:`None` then :attr:`~.Config.max_retries` is
used.
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
.. warning:: Requirements which are already installed are not included
in the result. If this breaks your use case consider using
pip's ``--ignore-installed`` option.
"""
# Use a new build directory for each run of get_requirements().
self.create_build_directory()
# Check whether -U or --upgrade was given.
if any(match_option(a, '-U', '--upgrade') for a in arguments):
logger.info("Checking index(es) for new version (-U or --upgrade was given) ..")
else:
# If -U or --upgrade wasn't given and all requirements can be
# satisfied using the archives in pip-accel's local source index we
# don't need pip to connect to PyPI looking for new versions (that
# will just slow us down).
try:
return self.unpack_source_dists(arguments, use_wheels=use_wheels)
except DistributionNotFound:
logger.info("We don't have all distribution archives yet!")
# Get the maximum number of retries from the configuration if the
# caller didn't specify a preference.
if max_retries is None:
max_retries = self.config.max_retries
# If not all requirements are available locally we use pip to download
# the missing source distribution archives from PyPI (we retry a couple
# of times in case pip reports recoverable errors).
for i in range(max_retries):
try:
return self.download_source_dists(arguments, use_wheels=use_wheels)
except Exception as e:
if i + 1 < max_retries:
# On all but the last iteration we swallow exceptions
# during downloading.
logger.warning("pip raised exception while downloading distributions: %s", e)
else:
# On the last iteration we don't swallow exceptions
# during downloading because the error reported by pip
# is the most sensible error for us to report.
raise
logger.info("Retrying after pip failed (%i/%i) ..", i + 1, max_retries)
def unpack_source_dists(self, arguments, use_wheels=False):
"""
Find and unpack local source distributions and discover their metadata.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A list of :class:`pip_accel.req.Requirement` objects.
:raises: Any exceptions raised by pip, for example
:exc:`pip.exceptions.DistributionNotFound` when not all
requirements can be satisfied.
This function checks whether there are local source distributions
available for all requirements, unpacks the source distribution
archives and finds the names and versions of the requirements. By using
the ``pip install --download`` command we avoid reimplementing the
following pip features:
- Parsing of ``requirements.txt`` (including recursive parsing).
- Resolution of possibly conflicting pinned requirements.
- Unpacking source distributions in multiple formats.
- Finding the name & version of a given source distribution.
"""
unpack_timer = Timer()
logger.info("Unpacking distribution(s) ..")
with PatchedAttribute(pip_install_module, 'PackageFinder', CustomPackageFinder):
requirements = self.get_pip_requirement_set(arguments, use_remote_index=False, use_wheels=use_wheels)
logger.info("Finished unpacking %s in %s.", pluralize(len(requirements), "distribution"), unpack_timer)
return requirements
def download_source_dists(self, arguments, use_wheels=False):
"""
Download missing source distributions.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:raises: Any exceptions raised by pip.
"""
download_timer = Timer()
logger.info("Downloading missing distribution(s) ..")
requirements = self.get_pip_requirement_set(arguments, use_remote_index=True, use_wheels=use_wheels)
logger.info("Finished downloading distribution(s) in %s.", download_timer)
return requirements
def get_pip_requirement_set(self, arguments, use_remote_index, use_wheels=False):
"""
Get the unpacked requirement(s) specified by the caller by running pip.
:param arguments: The command line arguments to ``pip install ..`` (a
list of strings).
:param use_remote_index: A boolean indicating whether pip is allowed to
connect to the main package index
(http://pypi.python.org by default).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A :class:`pip.req.RequirementSet` object created by pip.
:raises: Any exceptions raised by pip.
"""
# Compose the pip command line arguments. This is where a lot of the
# core logic of pip-accel is hidden and it uses some esoteric features
# of pip so this method is heavily commented.
command_line = []
# Use `--download' to instruct pip to download requirement(s) into
# pip-accel's local source distribution index directory. This has the
# following documented side effects (see `pip install --help'):
# 1. It disables the installation of requirements (without using the
# `--no-install' option which is deprecated and slated for removal
# in pip 7.x).
# 2. It ignores requirements that are already installed (because
# pip-accel doesn't actually need to re-install requirements that
# are already installed we will have work around this later, but
# that seems fairly simple to do).
command_line.append('--download=%s' % self.config.source_index)
# Use `--find-links' to point pip at pip-accel's local source
# distribution index directory. This ensures that source distribution
# archives are never downloaded more than once (regardless of the HTTP
# cache that was introduced in pip 6.x).
command_line.append('--find-links=file://%s' % self.config.source_index)
# Use `--no-binary=:all:' to ignore wheel distributions by default in
# order to preserve backwards compatibility with callers that expect a
# requirement set consisting only of source distributions that can be
# converted to `dumb binary distributions'.
if not use_wheels and self.arguments_allow_wheels(arguments):
command_line.append('--no-binary=:all:')
# Use `--no-index' to force pip to only consider source distribution
# archives contained in pip-accel's local source distribution index
# directory. This enables pip-accel to ask pip "Can the local source
# distribution index satisfy all requirements in the given requirement
# set?" which enables pip-accel to keep pip off the internet unless
# absolutely necessary :-).
if not use_remote_index:
command_line.append('--no-index')
# Use `--no-clean' to instruct pip to unpack the source distribution
# archives and *not* clean up the unpacked source distributions
# afterwards. This enables pip-accel to replace pip's installation
# logic with cached binary distribution archives.
command_line.append('--no-clean')
# Use `--build-directory' to instruct pip to unpack the source
# distribution archives to a temporary directory managed by pip-accel.
# We will clean up the build directory when we're done using the
# unpacked source distributions.
command_line.append('--build-directory=%s' % self.build_directory)
# Append the user's `pip install ...' arguments to the command line
# that we just assembled.
command_line.extend(arguments)
logger.info("Executing command: pip install %s", ' '.join(command_line))
# Clear the build directory to prevent PreviousBuildDirError exceptions.
self.clear_build_directory()
# During the pip 6.x upgrade pip-accel switched to using `pip install
# --download' which can produce an interactive prompt as described in
# issue 51 [1]. The documented way [2] to get rid of this interactive
# prompt is pip's --exists-action option, but due to what is most
# likely a bug in pip this doesn't actually work. The environment
# variable $PIP_EXISTS_ACTION does work however, so if the user didn't
# set it we will set a reasonable default for them.
# [1] https://github.com/paylogic/pip-accel/issues/51
# [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option
os.environ.setdefault('PIP_EXISTS_ACTION', 'w')
# Initialize and run the `pip install' command.
command = InstallCommand()
opts, args = command.parse_args(command_line)
if not opts.ignore_installed:
# If the user didn't supply the -I, --ignore-installed option we
# will forcefully disable the option. Refer to the documentation of
# the AttributeOverrides class for further details.
opts = AttributeOverrides(opts, ignore_installed=False)
requirement_set = command.run(opts, args)
# Make sure the output of pip and pip-accel are not intermingled.
sys.stdout.flush()
if requirement_set is None:
raise NothingToDoError("""
pip didn't generate a requirement set, most likely you
specified an empty requirements file?
""")
else:
return self.transform_pip_requirement_set(requirement_set)
def transform_pip_requirement_set(self, requirement_set):
"""
Transform pip's requirement set into one that `pip-accel` can work with.
:param requirement_set: The :class:`pip.req.RequirementSet` object
reported by pip.
:returns: A list of :class:`pip_accel.req.Requirement` objects.
This function converts the :class:`pip.req.RequirementSet` object
reported by pip into a list of :class:`pip_accel.req.Requirement`
objects.
"""
filtered_requirements = []
for requirement in requirement_set.requirements.values():
# The `satisfied_by' property is set by pip when a requirement is
# already satisfied (i.e. a version of the package that satisfies
# the requirement is already installed) and -I, --ignore-installed
# is not used. We filter out these requirements because pip never
# unpacks distributions for these requirements, so pip-accel can't
# do anything useful with such requirements.
if not requirement.satisfied_by:
filtered_requirements.append(requirement)
self.reported_requirements.append(requirement)
return sorted([Requirement(self.config, r) for r in filtered_requirements],
key=lambda r: r.name.lower())
def install_requirements(self, requirements, **kw):
"""
Manually install a requirement set from binary and/or wheel distributions.
:param requirements: A list of :class:`pip_accel.req.Requirement` objects.
:param kw: Any keyword arguments are passed on to
:func:`~pip_accel.bdist.BinaryDistributionManager.install_binary_dist()`.
:returns: The number of packages that were just installed (an integer).
"""
install_timer = Timer()
install_types = []
if any(not req.is_wheel for req in requirements):
install_types.append('binary')
if any(req.is_wheel for req in requirements):
install_types.append('wheel')
logger.info("Installing from %s distributions ..", concatenate(install_types))
# Track installed files by default (unless the caller specifically opted out).
kw.setdefault('track_installed_files', True)
num_installed = 0
for requirement in requirements:
# If we're upgrading over an older version, first remove the
# old version to make sure we don't leave files from old
# versions around.
if is_installed(requirement.name):
uninstall(requirement.name)
# When installing setuptools we need to uninstall distribute,
# otherwise distribute will shadow setuptools and all sorts of
# strange issues can occur (e.g. upgrading to the latest
# setuptools to gain wheel support and then having everything
# blow up because distribute doesn't know about wheels).
if requirement.name == 'setuptools' and is_installed('distribute'):
uninstall('distribute')
if requirement.is_editable:
logger.debug("Installing %s in editable form using pip.", requirement)
command = InstallCommand()
opts, args = command.parse_args(['--no-deps', '--editable', requirement.source_directory])
command.run(opts, args)
elif requirement.is_wheel:
logger.info("Installing %s wheel distribution using pip ..", requirement)
wheel_version = pip_wheel_module.wheel_version(requirement.source_directory)
pip_wheel_module.check_compatibility(wheel_version, requirement.name)
requirement.pip_requirement.move_wheel_files(requirement.source_directory)
else:
binary_distribution = self.bdists.get_binary_dist(requirement)
self.bdists.install_binary_dist(binary_distribution, **kw)
num_installed += 1
logger.info("Finished installing %s in %s.",
pluralize(num_installed, "requirement"),
install_timer)
return num_installed
def arguments_allow_wheels(self, arguments):
"""
Check whether the given command line arguments allow the use of wheels.
:param arguments: A list of strings with command line arguments.
:returns: :data:`True` if the arguments allow wheels, :data:`False` if
they disallow wheels.
Contrary to what the name of this method implies its implementation
actually checks if the user hasn't *disallowed* the use of wheels using
the ``--no-use-wheel`` option (deprecated in pip 7.x) or the
``--no-binary=:all:`` option (introduced in pip 7.x). This is because
wheels are "opt out" in recent versions of pip. I just didn't like the
method name ``arguments_dont_disallow_wheels`` ;-).
"""
return not ('--no-use-wheel' in arguments or match_option_with_value(arguments, '--no-binary', ':all:'))
def create_build_directory(self):
"""Create a new build directory for pip to unpack its archives."""
self.build_directories.append(tempfile.mkdtemp(prefix='pip-accel-build-dir-'))
def clear_build_directory(self):
"""Clear the build directory where pip unpacks the source distribution archives."""
stat = os.stat(self.build_directory)
shutil.rmtree(self.build_directory)
os.makedirs(self.build_directory, stat.st_mode)
def cleanup_temporary_directories(self):
"""Delete the build directories and any temporary directories created by pip."""
while self.build_directories:
shutil.rmtree(self.build_directories.pop())
for requirement in self.reported_requirements:
requirement.remove_temporary_source()
@property
def build_directory(self):
"""Get the pathname of the current build directory (a string)."""
if not self.build_directories:
self.create_build_directory()
return self.build_directories[-1]
class CustomPackageFinder(pip_index_module.PackageFinder):
"""
Custom :class:`pip.index.PackageFinder` to keep pip off the internet.
This class customizes :class:`pip.index.PackageFinder` to enforce what
the ``--no-index`` option does for the default package index but doesn't do
for package indexes registered with the ``--index=`` option in requirements
files. Judging by pip's documentation the fact that this has to be monkey
patched seems like a bug / oversight in pip (IMHO).
"""
@property
def index_urls(self):
"""Dummy list of index URLs that is always empty."""
return []
@index_urls.setter
def index_urls(self, value):
"""Dummy setter for index URLs that ignores the value set."""
pass
@property
def dependency_links(self):
"""Dummy list of dependency links that is always empty."""
return []
@dependency_links.setter
def dependency_links(self, value):
"""Dummy setter for dependency links that ignores the value set."""
pass
class PatchedAttribute(object):
"""
Contact manager to temporarily patch an object attribute.
This context manager changes the value of an object attribute when the
context is entered and restores the original value when the context is
exited.
"""
def __init__(self, object, attribute, value):
"""
Initialize a :class:`PatchedAttribute` object.
:param object: The object whose attribute should be patched.
:param attribute: The name of the attribute to be patched (a string).
:param value: The temporary value for the attribute.
"""
self.object = object
self.attribute = attribute
self.patched_value = value
self.original_value = None
def __enter__(self):
"""Change the object attribute when entering the context."""
self.original_value = getattr(self.object, self.attribute)
setattr(self.object, self.attribute, self.patched_value)
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""Restore the object attribute when leaving the context."""
setattr(self.object, self.attribute, self.original_value)
class AttributeOverrides(object):
"""
:class:`AttributeOverrides` enables overriding of object attributes.
During the pip 6.x upgrade pip-accel switched to using ``pip install
--download`` which unintentionally broke backwards compatibility with
previous versions of pip-accel as documented in `issue 52`_.
The reason for this is that when pip is given the ``--download`` option it
internally enables ``--ignore-installed`` (which can be problematic for
certain use cases as described in `issue 52`_). There is no documented way
to avoid this behavior, so instead pip-accel resorts to monkey patching to
restore backwards compatibility.
:class:`AttributeOverrides` is used to replace pip's parsed command line
options object with an object that defers all attribute access (gets and
sets) to the original options object but always reports
``ignore_installed`` as :data:`False`, even after it was set to :data:`True` by pip
(as described above).
.. _issue 52: https://github.com/paylogic/pip-accel/issues/52
"""
def __init__(self, opts, **overrides):
"""
Construct an :class:`AttributeOverrides` instance.
:param opts: The object to which attribute access is deferred.
:param overrides: The attributes whose value should be overridden.
"""
object.__setattr__(self, 'opts', opts)
object.__setattr__(self, 'overrides', overrides)
def __getattr__(self, name):
"""
Get an attribute's value from overrides or by deferring attribute access.
:param name: The name of the attribute (a string).
:returns: The attribute's value.
"""
if name in self.overrides:
logger.debug("AttributeOverrides() getting %s from overrides ..", name)
return self.overrides[name]
else:
logger.debug("AttributeOverrides() getting %s by deferring attribute access ..", name)
return getattr(self.opts, name)
def __setattr__(self, name, value):
"""
Set an attribute's value (unless it has an override).
:param name: The name of the attribute (a string).
:param value: The new value for the attribute.
"""
if name in self.overrides:
logger.debug("AttributeOverrides() refusing to set %s=%r (attribute has override) ..", name, value)
else:
logger.debug("AttributeOverrides() setting %s=%r by deferring attribute access ..", name, value)
setattr(self.opts, name, value)
| {
"repo_name": "matysek/pip-accel",
"path": "pip_accel/__init__.py",
"copies": "1",
"size": "34535",
"license": "mit",
"hash": 2595270987934753000,
"line_mean": 48.0553977273,
"line_max": 116,
"alpha_frac": 0.6527870277,
"autogenerated": false,
"ratio": 4.688433342383926,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5841220370083927,
"avg_score": null,
"num_lines": null
} |
"""
:py:mod:`pip_accel.caches.s3` - Amazon S3 cache backend
=======================================================
This module implements a cache backend that stores distribution archives in a
user defined `Amazon S3 <http://aws.amazon.com/s3/>`_ bucket. To enable this
backend you need to define the configuration option
:py:attr:`~.Config.s3_cache_bucket` and configure your Amazon S3 API
credentials (see the readme for details).
Using S3 compatible storage services
------------------------------------
The Amazon S3 API has been implemented in several open source projects and
dozens of online services. To use pip-accel with an S3 compatible storage
service you can override the :py:attr:`~.Config.s3_cache_url` option. The
pip-accel test suite actually uses this option to test the S3 cache backend by
running FakeS3_ in the background and pointing pip-accel at the FakeS3 server.
Below are some usage notes that may be relevant for people evaluating this
option.
**Secure connections**
Boto_ has to be told whether to make a "secure" connection to the S3 API and
pip-accel assumes the ``https://`` URL scheme implies a secure connection
while the ``http://`` URL scheme implies a non-secure connection.
**Calling formats**
Boto_ has the concept of "calling formats" for the S3 API and to connect to
the official Amazon S3 API pip-accel needs to specify the "sub-domain calling
format" or the API calls will fail. When you specify a nonstandard S3 API URL
pip-accel tells Boto to use the "ordinary calling format" instead. This
differentiation will undoubtedly not be correct in all cases. If this is
bothering you then feel free to open an issue on GitHub to make pip-accel more
flexible in this regard.
**Credentials**
If you don't specify S3 API credentials and the connection attempt to S3 fails
with "NoAuthHandlerFound: No handler was ready to authenticate" pip-accel will
fall back to an anonymous connection attempt. If that fails as well the S3
cache backend is disabled. It may be useful to note here that the pip-accel
test suite uses FakeS3_ and the anonymous connection fall back works fine.
A note about robustness
-----------------------
The Amazon S3 cache backend implemented in :py:mod:`pip_accel.caches.s3` is
specifically written to gracefully disable itself when it encounters known
errors such as:
- The configuration option :py:attr:`~.Config.s3_cache_bucket` is not set (i.e.
the user hasn't configured the backend yet).
- The :py:mod:`boto` package is not installed (i.e. the user ran ``pip install
pip-accel`` instead of ``pip install 'pip-accel[s3]'``).
- The connection to the S3 API can't be established (e.g. because API
credentials haven't been correctly configured).
- The connection to the configured S3 bucket can't be established (e.g. because
the bucket doesn't exist or the configured credentials don't provide access to
the bucket).
Additionally :py:class:`~pip_accel.caches.CacheManager` automatically disables
cache backends that raise exceptions on
:py:class:`~pip_accel.caches.AbstractCacheBackend.get()` and
:py:class:`~pip_accel.caches.AbstractCacheBackend.put()` operations. The end
result is that when the S3 backend fails you will just revert to using the
cache on the local file system.
Optionally if you are using read only credentials you can disable
:py:class:`~S3CacheBackend.put()` operations by setting the configuration
option :py:attr:`~.Config.s3_cache_readonly`.
----
.. _FakeS3: https://github.com/jubos/fake-s3
.. _Boto: https://github.com/boto/boto
"""
# Standard library modules.
import logging
import os
# External dependencies.
from humanfriendly import coerce_boolean, Timer
# Modules included in our package.
from pip_accel.caches import AbstractCacheBackend
from pip_accel.compat import urlparse
from pip_accel.exceptions import CacheBackendDisabledError, CacheBackendError
from pip_accel.utils import makedirs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
# The name of the boto.config configuration section that controls general
# settings like the number of retries and the HTTP socket timeout.
BOTO_CONFIG_SECTION = 'Boto'
# The name of the boto.config option that controls the number of retries.
BOTO_CONFIG_NUM_RETRIES_OPTION = 'num_retries'
# The name of the boto.config option that controls the HTTP socket timeout.
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION = 'http_socket_timeout'
# The `coloredlogs' package installs a logging handler on the root logger which
# means all loggers automatically write their log messages to the standard
# error stream. In the case of Boto this is a bit confusing because Boto logs
# messages with the ERROR severity even when nothing is wrong, because it
# tries to connect to the Amazon EC2 metadata service which is (obviously) not
# available outside of Amazon EC2:
#
# boto[6851] DEBUG Retrieving credentials from metadata server.
# boto[6851] ERROR Caught exception reading instance data
#
# To avoid confusing users of pip-accel (i.e. this is not an error because it's
# properly handled) we silence the Boto logger. To avoid annoying people who
# actually want to debug Boto we'll also provide an escape hatch in the form of
# an environment variable.
if coerce_boolean(os.environ.get('PIP_ACCEL_SILENCE_BOTO', 'true')):
logging.getLogger('boto').setLevel(logging.FATAL)
class S3CacheBackend(AbstractCacheBackend):
"""The S3 cache backend stores distribution archives in a user defined Amazon S3 bucket."""
PRIORITY = 20
def get(self, filename):
"""
Download a cached distribution archive from the configured Amazon S3
bucket to the local cache.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or ``None``.
:raises: :py:exc:`.CacheBackendError` when any underlying method fails.
"""
timer = Timer()
self.check_prerequisites()
# Check if the distribution archive is available.
raw_key = self.get_cache_key(filename)
logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key)
key = self.s3_bucket.get_key(raw_key)
if key is None:
logger.debug("Distribution archive is not available in S3 bucket.")
else:
# Download the distribution archive to the local binary index.
# TODO Shouldn't this use LocalCacheBackend.put() instead of
# implementing the same steps manually?!
logger.info("Downloading distribution archive from S3 bucket ..")
local_file = os.path.join(self.config.binary_cache, filename)
makedirs(os.path.dirname(local_file))
key.get_contents_to_filename(local_file)
logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer)
return local_file
def put(self, filename, handle):
"""
Upload a distribution archive to the configured Amazon S3 bucket.
If the :py:attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :py:exc:`.CacheBackendError` when any underlying method fails.
"""
if self.config.s3_cache_readonly:
logger.info('Skipping upload to S3 bucket (using S3 in read only mode).')
else:
timer = Timer()
self.check_prerequisites()
from boto.s3.key import Key
raw_key = self.get_cache_key(filename)
logger.info("Uploading distribution archive to S3 bucket: %s", raw_key)
key = Key(self.s3_bucket)
key.key = raw_key
try:
key.set_contents_from_file(handle)
except Exception as e:
logger.info("Encountered error writing to S3 bucket, falling back to read only mode (exception: %s)", e)
self.config.s3_cache_readonly = True
else:
logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
@property
def s3_bucket(self):
"""
Connect to the user defined Amazon S3 bucket.
Called on demand by :py:func:`get()` and :py:func:`put()`. Caches its
return value so that only a single connection is created.
:returns: A :py:class:`boto.s3.bucket.Bucket` object.
:raises: :py:exc:`.CacheBackendDisabledError` when the user hasn't
defined :py:attr:`.Config.s3_cache_bucket`.
:raises: :py:exc:`.CacheBackendError` when the connection to the Amazon
S3 bucket fails.
"""
if not hasattr(self, 'cached_bucket'):
from boto.exception import BotoClientError, BotoServerError, S3ResponseError
# The following try/except block translates unexpected exceptions
# raised by Boto into a CacheBackendError exception.
try:
# The following try/except block handles the expected exception
# raised by Boto when an Amazon S3 bucket does not exist.
try:
logger.debug("Connecting to Amazon S3 bucket: %s", self.config.s3_cache_bucket)
self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket)
except S3ResponseError as e:
if e.status == 404 and self.config.s3_cache_create_bucket:
logger.info("Amazon S3 bucket doesn't exist yet, creating it now: %s", self.config.s3_cache_bucket)
self.s3_connection.create_bucket(self.config.s3_cache_bucket)
self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket)
else:
# Don't swallow exceptions we can't handle.
raise
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the configured Amazon S3 bucket
{bucket}! Are you sure the bucket exists and is accessible
using the provided credentials? The Amazon S3 cache backend
will be disabled for now.
""", bucket=repr(self.config.s3_cache_bucket))
return self.cached_bucket
@property
def s3_connection(self):
"""
Connect to the Amazon S3 API.
If the connection attempt fails because Boto can't find credentials the
attempt is retried once with an anonymous connection.
Called on demand by :py:attr:`s3_bucket`.
:returns: A :py:class:`boto.s3.connection.S3Connection` object.
:raises: :py:exc:`.CacheBackendError` when the connection to the Amazon
S3 API fails.
"""
if not hasattr(self, 'cached_connection'):
import boto
from boto.exception import BotoClientError, BotoServerError, NoAuthHandlerFound
from boto.s3.connection import S3Connection, SubdomainCallingFormat, OrdinaryCallingFormat
try:
# Configure the number of retries and the socket timeout used
# by Boto. Based on the snippet given in the following email:
# https://groups.google.com/d/msg/boto-users/0osmP0cUl5Y/X4NdlMGWKiEJ
if not boto.config.has_section(BOTO_CONFIG_SECTION):
boto.config.add_section(BOTO_CONFIG_SECTION)
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_NUM_RETRIES_OPTION,
str(self.config.s3_cache_retries))
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION,
str(self.config.s3_cache_timeout))
logger.debug("Connecting to Amazon S3 API ..")
endpoint = urlparse(self.config.s3_cache_url)
host, _, port = endpoint.netloc.partition(':')
is_secure = (endpoint.scheme == 'https')
calling_format = SubdomainCallingFormat() if host == S3Connection.DefaultHost else OrdinaryCallingFormat()
try:
self.cached_connection = S3Connection(host=host,
port=int(port) if port else None,
is_secure=is_secure,
calling_format=calling_format)
except NoAuthHandlerFound:
logger.debug("Amazon S3 API credentials missing, retrying with anonymous connection ..")
self.cached_connection = S3Connection(host=host,
port=int(port) if port else None,
is_secure=is_secure,
calling_format=calling_format,
anon=True)
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the Amazon S3 API! Most likely your
credentials are not correctly configured. The Amazon S3
cache backend will be disabled for now.
""")
return self.cached_connection
def get_cache_key(self, filename):
"""
Compose an S3 cache key based on :py:attr:`.Config.s3_cache_prefix` and
the given filename.
:param filename: The filename of the distribution archive (a string).
:returns: The cache key for the given filename (a string).
"""
return '/'.join(filter(None, [self.config.s3_cache_prefix, filename]))
def check_prerequisites(self):
"""
Validate the prerequisites required to use the Amazon S3 cache backend.
Makes sure the Amazon S3 cache backend is configured
(:py:attr:`.Config.s3_cache_bucket` is defined by the user) and
:py:mod:`boto` is available for use.
:raises: :py:exc:`.CacheBackendDisabledError` when a prerequisite fails.
"""
if not self.config.s3_cache_bucket:
raise CacheBackendDisabledError("""
To use Amazon S3 as a cache you have to set the environment
variable $PIP_ACCEL_S3_BUCKET and configure your Amazon S3 API
credentials (see the documentation for details).
""")
try:
__import__('boto')
except ImportError:
raise CacheBackendDisabledError("""
Boto is required to use Amazon S3 as a cache but it looks like
Boto is not installed! You can resolve this issue by installing
pip-accel using the command `pip install pip-accel[s3]'. The
Amazon S3 cache backend will be disabled for now.
""")
| {
"repo_name": "theyoprst/pip-accel",
"path": "pip_accel/caches/s3.py",
"copies": "1",
"size": "15890",
"license": "mit",
"hash": -2081011666149970400,
"line_mean": 47.0060422961,
"line_max": 123,
"alpha_frac": 0.6451856514,
"autogenerated": false,
"ratio": 4.373795761078998,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5518981412478998,
"avg_score": null,
"num_lines": null
} |
"""
Amazon S3 cache backend.
This module implements a cache backend that stores distribution archives in a
user defined `Amazon S3 <http://aws.amazon.com/s3/>`_ bucket. To enable this
backend you need to define the configuration option
:attr:`~.Config.s3_cache_bucket` and configure your Amazon S3 API
credentials (see the readme for details).
Using S3 compatible storage services
------------------------------------
The Amazon S3 API has been implemented in several open source projects and
dozens of online services. To use pip-accel with an S3 compatible storage
service you can override the :attr:`~.Config.s3_cache_url` option. The
pip-accel test suite actually uses this option to test the S3 cache backend by
running FakeS3_ in the background and pointing pip-accel at the FakeS3 server.
Below are some usage notes that may be relevant for people evaluating this
option.
**Secure connections**
Boto_ has to be told whether to make a "secure" connection to the S3 API and
pip-accel assumes the ``https://`` URL scheme implies a secure connection
while the ``http://`` URL scheme implies a non-secure connection.
**Calling formats**
Boto_ has the concept of "calling formats" for the S3 API and to connect to
the official Amazon S3 API pip-accel needs to specify the "sub-domain calling
format" or the API calls will fail. When you specify a nonstandard S3 API URL
pip-accel tells Boto to use the "ordinary calling format" instead. This
differentiation will undoubtedly not be correct in all cases. If this is
bothering you then feel free to open an issue on GitHub to make pip-accel more
flexible in this regard.
**Credentials**
If you don't specify S3 API credentials and the connection attempt to S3 fails
with "NoAuthHandlerFound: No handler was ready to authenticate" pip-accel will
fall back to an anonymous connection attempt. If that fails as well the S3
cache backend is disabled. It may be useful to note here that the pip-accel
test suite uses FakeS3_ and the anonymous connection fall back works fine.
A note about robustness
-----------------------
The Amazon S3 cache backend implemented in :mod:`pip_accel.caches.s3` is
specifically written to gracefully disable itself when it encounters known
errors such as:
- The configuration option :attr:`~.Config.s3_cache_bucket` is not set (i.e.
the user hasn't configured the backend yet).
- The :mod:`boto` package is not installed (i.e. the user ran ``pip install
pip-accel`` instead of ``pip install 'pip-accel[s3]'``).
- The connection to the S3 API can't be established (e.g. because API
credentials haven't been correctly configured).
- The connection to the configured S3 bucket can't be established (e.g. because
the bucket doesn't exist or the configured credentials don't provide access to
the bucket).
Additionally :class:`~pip_accel.caches.CacheManager` automatically disables
cache backends that raise exceptions on
:class:`~pip_accel.caches.AbstractCacheBackend.get()` and
:class:`~pip_accel.caches.AbstractCacheBackend.put()` operations. The end
result is that when the S3 backend fails you will just revert to using the
cache on the local file system.
Optionally if you are using read only credentials you can disable
:class:`~S3CacheBackend.put()` operations by setting the configuration
option :attr:`~.Config.s3_cache_readonly`.
----
.. _FakeS3: https://github.com/jubos/fake-s3
.. _Boto: https://github.com/boto/boto
"""
# Standard library modules.
import logging
import os
# External dependencies.
from humanfriendly import coerce_boolean, Timer
# Modules included in our package.
from pip_accel.caches import AbstractCacheBackend
from pip_accel.compat import urlparse
from pip_accel.exceptions import CacheBackendDisabledError, CacheBackendError
from pip_accel.utils import AtomicReplace, makedirs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
# The name of the boto.config configuration section that controls general
# settings like the number of retries and the HTTP socket timeout.
BOTO_CONFIG_SECTION = 'Boto'
# The name of the boto.config option that controls the number of retries.
BOTO_CONFIG_NUM_RETRIES_OPTION = 'num_retries'
# The name of the boto.config option that controls the HTTP socket timeout.
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION = 'http_socket_timeout'
# The `coloredlogs' package installs a logging handler on the root logger which
# means all loggers automatically write their log messages to the standard
# error stream. In the case of Boto this is a bit confusing because Boto logs
# messages with the ERROR severity even when nothing is wrong, because it
# tries to connect to the Amazon EC2 metadata service which is (obviously) not
# available outside of Amazon EC2:
#
# boto[6851] DEBUG Retrieving credentials from metadata server.
# boto[6851] ERROR Caught exception reading instance data
#
# To avoid confusing users of pip-accel (i.e. this is not an error because it's
# properly handled) we silence the Boto logger. To avoid annoying people who
# actually want to debug Boto we'll also provide an escape hatch in the form of
# an environment variable.
if coerce_boolean(os.environ.get('PIP_ACCEL_SILENCE_BOTO', 'true')):
logging.getLogger('boto').setLevel(logging.FATAL)
class S3CacheBackend(AbstractCacheBackend):
"""The S3 cache backend stores distribution archives in a user defined Amazon S3 bucket."""
PRIORITY = 20
def get(self, filename):
"""
Download a distribution archive from the configured Amazon S3 bucket.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or :data:`None`.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
timer = Timer()
self.check_prerequisites()
# Check if the distribution archive is available.
raw_key = self.get_cache_key(filename)
logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key)
key = self.s3_bucket.get_key(raw_key)
if key is None:
logger.debug("Distribution archive is not available in S3 bucket.")
else:
# Download the distribution archive to the local binary index.
# TODO Shouldn't this use LocalCacheBackend.put() instead of
# implementing the same steps manually?!
logger.info("Downloading distribution archive from S3 bucket ..")
file_in_cache = os.path.join(self.config.binary_cache, filename)
makedirs(os.path.dirname(file_in_cache))
with AtomicReplace(file_in_cache) as temporary_file:
key.get_contents_to_filename(temporary_file)
logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer)
return file_in_cache
def put(self, filename, handle):
"""
Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
if self.config.s3_cache_readonly:
logger.info('Skipping upload to S3 bucket (using S3 in read only mode).')
else:
timer = Timer()
self.check_prerequisites()
from boto.s3.key import Key
raw_key = self.get_cache_key(filename)
logger.info("Uploading distribution archive to S3 bucket: %s", raw_key)
key = Key(self.s3_bucket)
key.key = raw_key
try:
key.set_contents_from_file(handle)
except Exception as e:
logger.info("Encountered error writing to S3 bucket, falling back to read only mode (exception: %s)", e)
self.config.s3_cache_readonly = True
else:
logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
@property
def s3_bucket(self):
"""
Connect to the user defined Amazon S3 bucket.
Called on demand by :func:`get()` and :func:`put()`. Caches its
return value so that only a single connection is created.
:returns: A :class:`boto.s3.bucket.Bucket` object.
:raises: :exc:`.CacheBackendDisabledError` when the user hasn't
defined :attr:`.Config.s3_cache_bucket`.
:raises: :exc:`.CacheBackendError` when the connection to the Amazon
S3 bucket fails.
"""
if not hasattr(self, 'cached_bucket'):
from boto.exception import BotoClientError, BotoServerError, S3ResponseError
# The following try/except block translates unexpected exceptions
# raised by Boto into a CacheBackendError exception.
try:
# The following try/except block handles the expected exception
# raised by Boto when an Amazon S3 bucket does not exist.
try:
logger.debug("Connecting to Amazon S3 bucket: %s", self.config.s3_cache_bucket)
self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket)
except S3ResponseError as e:
if e.status == 404 and self.config.s3_cache_create_bucket:
logger.info("Amazon S3 bucket doesn't exist yet, creating it now: %s",
self.config.s3_cache_bucket)
self.s3_connection.create_bucket(self.config.s3_cache_bucket)
self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket)
else:
# Don't swallow exceptions we can't handle.
raise
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the configured Amazon S3 bucket
{bucket}! Are you sure the bucket exists and is accessible
using the provided credentials? The Amazon S3 cache backend
will be disabled for now.
""", bucket=repr(self.config.s3_cache_bucket))
return self.cached_bucket
@property
def s3_connection(self):
"""
Connect to the Amazon S3 API.
If the connection attempt fails because Boto can't find credentials the
attempt is retried once with an anonymous connection.
Called on demand by :attr:`s3_bucket`.
:returns: A :class:`boto.s3.connection.S3Connection` object.
:raises: :exc:`.CacheBackendError` when the connection to the Amazon
S3 API fails.
"""
if not hasattr(self, 'cached_connection'):
import boto
from boto.exception import BotoClientError, BotoServerError, NoAuthHandlerFound
from boto.s3.connection import S3Connection, SubdomainCallingFormat, OrdinaryCallingFormat
try:
# Configure the number of retries and the socket timeout used
# by Boto. Based on the snippet given in the following email:
# https://groups.google.com/d/msg/boto-users/0osmP0cUl5Y/X4NdlMGWKiEJ
if not boto.config.has_section(BOTO_CONFIG_SECTION):
boto.config.add_section(BOTO_CONFIG_SECTION)
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_NUM_RETRIES_OPTION,
str(self.config.s3_cache_retries))
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION,
str(self.config.s3_cache_timeout))
logger.debug("Connecting to Amazon S3 API ..")
endpoint = urlparse(self.config.s3_cache_url)
host, _, port = endpoint.netloc.partition(':')
is_secure = (endpoint.scheme == 'https')
calling_format = (SubdomainCallingFormat() if host == S3Connection.DefaultHost
else OrdinaryCallingFormat())
try:
self.cached_connection = S3Connection(host=host,
port=int(port) if port else None,
is_secure=is_secure,
calling_format=calling_format)
except NoAuthHandlerFound:
logger.debug("Amazon S3 API credentials missing, retrying with anonymous connection ..")
self.cached_connection = S3Connection(host=host,
port=int(port) if port else None,
is_secure=is_secure,
calling_format=calling_format,
anon=True)
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the Amazon S3 API! Most likely your
credentials are not correctly configured. The Amazon S3
cache backend will be disabled for now.
""")
return self.cached_connection
def get_cache_key(self, filename):
"""
Compose an S3 cache key based on :attr:`.Config.s3_cache_prefix` and the given filename.
:param filename: The filename of the distribution archive (a string).
:returns: The cache key for the given filename (a string).
"""
return '/'.join(filter(None, [self.config.s3_cache_prefix, filename]))
def check_prerequisites(self):
"""
Validate the prerequisites required to use the Amazon S3 cache backend.
Makes sure the Amazon S3 cache backend is configured
(:attr:`.Config.s3_cache_bucket` is defined by the user) and
:mod:`boto` is available for use.
:raises: :exc:`.CacheBackendDisabledError` when a prerequisite fails.
"""
if not self.config.s3_cache_bucket:
raise CacheBackendDisabledError("""
To use Amazon S3 as a cache you have to set the environment
variable $PIP_ACCEL_S3_BUCKET and configure your Amazon S3 API
credentials (see the documentation for details).
""")
try:
__import__('boto')
except ImportError:
raise CacheBackendDisabledError("""
Boto is required to use Amazon S3 as a cache but it looks like
Boto is not installed! You can resolve this issue by installing
pip-accel using the command `pip install pip-accel[s3]'. The
Amazon S3 cache backend will be disabled for now.
""")
| {
"repo_name": "matysek/pip-accel",
"path": "pip_accel/caches/s3.py",
"copies": "1",
"size": "15862",
"license": "mit",
"hash": -4707493154543464000,
"line_mean": 46.7771084337,
"line_max": 120,
"alpha_frac": 0.6451267179,
"autogenerated": false,
"ratio": 4.418384401114206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5563511119014206,
"avg_score": null,
"num_lines": null
} |
# Accelerator hundler for adxl345
import smbus
import time
bus = smbus.SMBus(1)
address = 0x1D
xyz_adr = [0x32, 0x34, 0x36]
bus.write_byte_data(address, 0x2D, 0x08)
bus.write_byte_data(address, 0x2E, 0x70) #Enable interrupt
bus.write_byte_data(address, 0x2A, 0x07) #Enable all axies
bus.write_byte_data(address, 0x1D, 0x30) #tap threshould, 62.5 mg/LSB
bus.write_byte_data(address, 0x21, 0x10) #tap duration, 625us/LSB
bus.write_byte_data(address, 0x22, 0x10) #tap latency, 1.25ms/LSB
bus.write_byte_data(address, 0x23, 0x50) #tap window, 1.25ms/LSB
def measure():
global bus, xyz_adr, address
acc = [ 0, 0, 0]
for i in range(0, 3):
#read lower bytes of each axis
acc0 = bus.read_byte_data(address, xyz_adr[i])
#read higher bytes of each axis
acc1 = bus.read_byte_data(address, xyz_adr[i]+1)
#unite 2byte datas into 10byte
acc[i] = (acc1 << 8) + acc0
#check if 10th byte is 10
if acc[i] > 0x1FF:
#minus
acc[i] = (65536 - acc[i]) * -1
# Rescalling into from 1.0 to -1.0
acc[i] = acc[i] * 3.9/1000 #range -1 to 1
# Limmitter
if acc[i]>1.0:
acc[i]=1.0
elif acc[i]<(-1.0):
acc[i]=(-1.0)
# Rescalling into from 0.0 to 1.0
acc[i] = acc[i] + 1.0
acc[i] = acc[i]*0.5 #range 0 to 1
return acc
def tapDetection():
tap = 0
tap = bus.read_byte_data(address, 0x30)
tap -= 131 #Remove DATA_READY(128), Watermark(2), and Overrun(1) values
if tap == 64:
return True #tap detected
else:
return False
| {
"repo_name": "DiamondOhana/jphacks",
"path": "python_main/sonilab/accelerator.py",
"copies": "2",
"size": "1555",
"license": "mit",
"hash": -3534044089015495000,
"line_mean": 23.6825396825,
"line_max": 75,
"alpha_frac": 0.6154340836,
"autogenerated": false,
"ratio": 2.520259319286872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41356934028868714,
"avg_score": null,
"num_lines": null
} |
"""Accelerator module."""
import numpy as _np
import pyaccel as _pyaccel
from . import lattice as _lattice
default_cavity_on = False
default_radiation_on = False
default_vchamber_on = False
def create_accelerator(optics_mode=_lattice.default_optics_mode,
simplified=False):
"""Create accelerator model."""
lattice = _lattice.create_lattice(optics_mode=optics_mode,
simplified=simplified)
accelerator = _pyaccel.accelerator.Accelerator(
lattice=lattice,
energy=_lattice.energy,
harmonic_number=_lattice.harmonic_number,
cavity_on=default_cavity_on,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator
accelerator_data = dict()
accelerator_data['lattice_version'] = 'SI_V24_04'
accelerator_data['global_coupling'] = 0.01 # expected corrected value
accelerator_data['pressure_profile'] = \
_np.array([[0, 518.396], [1.333e-9]*2]) # [s [m], p [mbar]]
| {
"repo_name": "lnls-fac/sirius",
"path": "pymodels/SI_V24_04/accelerator.py",
"copies": "1",
"size": "1026",
"license": "mit",
"hash": -3239328597780136400,
"line_mean": 29.1764705882,
"line_max": 70,
"alpha_frac": 0.656920078,
"autogenerated": false,
"ratio": 3.363934426229508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9520854504229508,
"avg_score": 0,
"num_lines": 34
} |
# Accelerometer Grapher and Fall Dector - Hugh O'Brien March 2009
#
#This is a script for PyS60 that opens a bluetooth serial connection
#to a pre-programmed SHIMMER sensor, The SHIMMER provides accelerometer
#data in the form "1111 1111 1111" where '1111' will be in the range
#of 0 -> 4400. The three values represent the data gathered
#from monitoring the three axis of the accelerometer.
#
#The script reduces the accuracy of these values in order to be able
#to graph them on a screen that is only 320x240px in size
#
#The script also monitors the difference between two subsequent
#readings in order to determine if a large movement has occured.
#This can be interpreted as a fall. A call is then placed to a
#pre-defined telephone number and the details of the victim are
#read out to the receiver.
import e32, appuifw, audio, telephone
#btsocket is the 'old' BT system, new version introduced in
#PyS60 1.9.1 is harder to work with.
import btsocket as socket
#a predefined BT MAC address can be set here to skip discovery process
target = ''
contact_name = "John Watson"
contact_number = "5550137"
victim_name = "Mr. Sherlock Holmes"
victim_address = "221 B. Baker Street. London"
sensitivity = 28
def fall():
global app_lock, contact_name, contact_number, victim_name,\
victim_address, data, prev
audio.say("Dialling %s now" % contact_name)
telephone.dial(contact_number)
e32.ao_sleep(7) #7 sec delay for someone to answer
for i in range(2, -1, -1):
audio.say("This is an automated message. A fall has been detected.\
Please assist %s at address %s. \
This message will repeat %d more times" \
% (victim_name, victim_address, i) )
telephone.hang_up()
data = ( 40, 40, 40 ) #reset values so as not to trigger again
prev = data
app_lock.signal() #unlock the main loop
def connect(): #this function sets up the BT socket connection
global btsocket, target
try:
#socket params passed to the OS
btsocket=socket.socket(socket.AF_BT,socket.SOCK_STREAM)
if target == '': #if no target defined, begin OS discovery routine
address,services = socket.bt_discover()
target = (address, services.values()[0])
btsocket.connect(target) #initiate connection and notify user
appuifw.note(u"Connected to " + str(address), "info")
except: #fail cleanly
appuifw.note(u"Error connecting to device")
btsocket.close()
def getData(): #this receives single characters over the bitstream
#until it encounters a newline and carraige return it then
#returns the characters it has buffered until that point
global btsocket #use the globally defined socket
buffer = "" #create an empty buffer
rxChar = btsocket.recv(1) #receive 1 char over BT and save in rxChar
#spin here until we get a 'real' char
while (rxChar == '\n') or (rxChar == '\r'):
rxChar = btsocket.recv(1)
#as long as we receive 'real' chars buffer them
while (rxChar != '\n') and (rxChar != '\r'):
buffer += rxChar
rxChar = btsocket.recv(1)
return buffer #return the buffer contents
def graph_data(input):
#this function produces the graphs on the screen. the screen is
#landscape oriented with a resolution of 240x320. The constants seen
#here are used to define where on the screen the graphs should be drawn
global count, canvas, prev, data
#take the input string formated like "1111 1111 1111" and parse it
#to acquire 3 sets of chars and then interpret them as digits saving
#them to a list in this format: ( '1111', '1111', '1111' )
#the values are then divided by 60 as they will be in the range
#0 -> x -> 4400 as the screen is only 240px high. furthermore as there
#are three graphs being drawn each is confined to (240 / 3 )px of
#height. The divisor of 60 accommodates this at the cost of accuracy.
try:
data = (\
int(input[0:4]) / 60, \
int(input[5:9]) / 60, \
int(input[10:14]) / 60\
)
#sane defaults if we receive a malformed reading
except ValueError:
data = ( 36, 36, 36 )
#redraw the screen if there are more than 280 samples displayed.
if count > 280:
reset()
#draw a line, with the X1 starting 10 points from the left and
#expanding right, Y1 being the previous value of Y2 (initially zero)
#plus a vertical offset so the graphs don't overlap each other, X2
#being one point right of X1 and Y2 one of the 3 XYZ readings plus
#the vertical offset. other options are purely aesthetic.
canvas.line(\
(count + 10, prev[0], count + 11, data[0] ), \
outline = 0xFF0000, width = 1)
canvas.line(\
(count + 10, prev[1] + 80, count + 11, data[1] + 80), \
outline = 0x00DD00, width = 1)
canvas.line(\
(count + 10, prev[2] + 160, count + 11, data[2] + 160), \
outline = 0x4444FF, width = 1)
#increment counter - data should also be pushed into prev here
#but this happens in the main loop for monitoring reasons
count = count + 1
def reset(): # this function redraws the screen when it becomes full
global count, canvas
#reset the count and redraw a blank canvas
count = 0
canvas.rectangle((0, 0, 320, 240), fill = 0x000000)
#Main
data = ( 0, 0, 0 )
prev = (40, 40, 40) #initial zero values for 'previous values' of the data
canvas = appuifw.Canvas() #create a new Canvas object
appuifw.app.body = canvas
appuifw.app.screen = "full" #go 'fullscreen'
appuifw.app.orientation = "landscape" # draw in landscape orientation
appuifw.app.title = u"Activity Monitor" #name the program
app_lock = e32.Ao_lock() #locking system
connect() #open the BT socket
e32.ao_sleep(1) # sleep for 1 second in case of graphical slowness
reset() # initially reset the screen to draw the canvas
while 1: #loop the following code infinitely
e32.reset_inactivity() #keep the screensaver away
graph_data( getData() ) # poll the BT data passing it to the grapher.
#test the movement level between the last two samples
if ( (abs(data[0] - prev[0]) > sensitivity ) \
or (abs(data[1] - prev[1]) > sensitivity ) \
or (abs(data[2] - prev[2]) > sensitivity ) ):
fall() #if too much, take action
app_lock.wait() #pause this loop until fall() finishes
e32.ao_sleep(1)
reset()
prev = data #move current data into previous data buffer | {
"repo_name": "hughobrien/shimmer-nokia-fall-detection",
"path": "MovementGrapher.py",
"copies": "1",
"size": "6767",
"license": "mit",
"hash": -3011709792132006400,
"line_mean": 36.2429378531,
"line_max": 75,
"alpha_frac": 0.6485887395,
"autogenerated": false,
"ratio": 3.5747490755414684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47233378150414684,
"avg_score": null,
"num_lines": null
} |
"""Accept a name as a parameter, calculate its word score, and report whether the score is prime."""
import sys
def main():
"""Where the magic happens."""
score = 0
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009]
try:
sys.argv[2] # Check if the user is supplying the name correctly
except IndexError:
for i in range(0, len(sys.argv[1])):
for j in range(0, len(alphabet)):
if sys.argv[1][i] == alphabet[j]:
score += j + 1
break
for i in range(0, len(primes)):
if primes[i] == score:
print str(score) + " and prime"
return
print str(score) + " and NOT prime"
else:
print "If your name contains spaces, please wrap it in quotes."
return
main() # Run the program
| {
"repo_name": "jessemillar/pythonista",
"path": "Desktop/prime_name.py",
"copies": "1",
"size": "1777",
"license": "mit",
"hash": 3010768539352706000,
"line_mean": 56.3225806452,
"line_max": 830,
"alpha_frac": 0.5312324142,
"autogenerated": false,
"ratio": 2.391655450874832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.841378863716137,
"avg_score": 0.0018198455826921897,
"num_lines": 31
} |
#accept an array of bytes, an offset in that array, and how many bytes starting at that offset to convert to the opposite endian
def conv_endian(bytes, offset, numbytes):
sum = 0
for n in range(0,numbytes):
if offset+n < len(bytes):
sum += bytes[offset+n]*(256**n)
else:
sum = -1
return sum
#accepts a number to convert to two's compliment, in the accepted number of bits
def conv_twos(number, bits):
if number >= 2**(bits-1):
return number - 2**bits
else:
return number
#needs to recognize when there's a secondary table and when there's not
def readfile(filename, datas = []):
blocks = []
fp = open(filename, 'rb')
string = fp.read()
fp.close()
bytes = [ord(s) for s in string]
#read file length
filelen = conv_endian(bytes, 0, 4)
#ADD CHECK FOR FILE LENGTH COMPARED TO WHAT SYSTEM REPORTS
#read first table, make list of secondary tables
endtable1 = conv_endian(bytes, 4, 3)
table1 = []
for offset in range(4, endtable1, 4):
table1.append(conv_endian(bytes, offset, 3))
#for each entry in the primary table (pointer to a block, either data or a secondary table)
for j in range (0, len(table1)):
offset = table1[j]
endtable = conv_endian(bytes, offset, 4)
#read where the table ends (assuming it's a table)
if j+1 < len(table1):
endblock= table1[j+1]
else:
endblock = filelen
table2 = []
flag = 0
for i in range(offset, min((offset+endtable, filelen)), 4):
table2.append(offset + conv_endian(bytes, i, 4))
flag = 0
for i in range(0, len(table2)):
offset2 = table2[i]
if offset2 > endblock:
flag += 1
table2 = table2[:i]
break
#if flag > 0 or j in datas:
if j in datas:
blocks.append(bytes[offset:endblock])
#print endblock-offset
else:
blocks.append([])
for i in range(0, len(table2)):
offset2 = table2[i]
if i+1 < len(table2):
endentry = table2[i+1]
else:
endentry = endblock
#endentry = table2[-1]+500
blocks[-1].append(bytes[offset2:endentry])
# print endentry-offset2
#outfile = open(filename + '_' + `j` + '_' + `i` + '.txt', 'w')
#str = "".join([chr(x) for x in bytes[offset2:endentry]])
#outfile.write(str)
#outfile.close()
return blocks
def writefile(filename, blocks):
debug = open('debug.txt', 'wb')
filesize = 4 + len(blocks) * 4
pri_table = []
sec_tables = []
off_count_1 = len(blocks) * 4 + 4
flags = []
for b1 in blocks:
if len(b1) > 0 and type(b1[0]) is list:
filesize += len(b1) * 4
sec_table = []
off_count_2 = len(b1) * 4
for b2 in b1:
sec_table.append(off_count_2)
off_count_2 += len(b2)
filesize += len(b2)
pri_table.append(off_count_1)
off_count_1 += off_count_2
sec_tables.append(sec_table)
flags.append(0)
else:
filesize += len(b1)
pri_table.append(off_count_1)
off_count_1 += len(b1)
sec_tables.append(b1)
flags.append(1)
bytes = []
remainder = filesize
while remainder > 0 or len(bytes) < 4:
bytes.append(remainder % 256)
remainder /= 256
for e in pri_table:
remainder = e
count = 0
while remainder > 0 or count < 4:
bytes.append(remainder % 256)
remainder /= 256
count += 1
#sec_tables doesn't contain data blocks, need to be inserted
for i in range(0, len(sec_tables)):
t = sec_tables[i]
if flags[i] == 0:
for j in range(0, len(t)):
e = t[j]
remainder = e
count = 0
while remainder > 0 or count < 4:
bytes.append(remainder % 256)
remainder /= 256
count += 1
for j in range(0, len(t)):
if len(blocks[i]) > j:
if type(blocks[i][j]) is list:
bytes.extend(blocks[i][j])
else:
bytes.append(blocks[i][j])
else:
bytes.extend(blocks[i])
str = "".join([chr(x) for x in bytes])
debug.close()
fp = open(filename, 'wb')
fp.write(str)
fp.close()
blocks= readfile("camp.002", [0,1,2])
str = ""
print len(blocks[0])
for i in range(0, len(blocks[0]), 8):
vals = []
for j in range(0, 8, 2):
vals.append(blocks[0][i+j] + 256 * blocks[0][i+j+1])
if vals[0] == 65535:
#print "BLANK"
pass
else:
#str += `vals`
pass
str += `i/32+1` + "-" + `i%32/8+1` + ": " + `vals` + "\n"
if i % 32 == 24:
str += "\n"
f = open("output2.txt", "w")
f.write(str)
f.close()
print str
chunks = []
for i in range(0, len(blocks[1]), 90):
chunks.append(blocks[1][i:i+90])
def add(x,y): return x+y
for i in range(0, len(chunks)):
c = chunks[i]
for j in range(10, len(c), 20):
#print `i+1` + '-' + `j/20+1` + ": " + `reduce(add, c[j+4:j+20])`
pass
# print vals
writefile("camp.000.tst", blocks) | {
"repo_name": "delMar43/wcmodtoolsources",
"path": "WC1_clone/camp/camp_block_expander.py",
"copies": "1",
"size": "4627",
"license": "mit",
"hash": 2561581256046139400,
"line_mean": 20.8301886792,
"line_max": 128,
"alpha_frac": 0.6029824941,
"autogenerated": false,
"ratio": 2.6546184738955825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7994206676365383,
"avg_score": 0.15267885832603997,
"num_lines": 212
} |
"""Acceptance test utils"""
import functools
import logging
import time
import traceback
import pika.compat
def retry_assertion(timeout_sec, retry_interval_sec=0.1):
"""Creates a decorator that retries the decorated function or
method only upon `AssertionError` exception at the given retry interval
not to exceed the overall given timeout.
:param float timeout_sec: overall timeout in seconds
:param float retry_interval_sec: amount of time to sleep
between retries in seconds.
:returns: decorator that implements the following behavior
1. This decorator guarantees to call the decorated function or method at
least once.
2. It passes through all exceptions besides `AssertionError`, preserving the
original exception and its traceback.
3. If no exception, it returns the return value from the decorated function/method.
4. It sleeps `time.sleep(retry_interval_sec)` between retries.
5. It checks for expiry of the overall timeout before sleeping.
6. If the overall timeout is exceeded, it re-raises the latest `AssertionError`,
preserving its original traceback
"""
def retry_assertion_decorator(func):
"""Decorator"""
@functools.wraps(func)
def retry_assertion_wrap(*args, **kwargs):
"""The wrapper"""
num_attempts = 0
start_time = pika.compat.time_now()
while True:
num_attempts += 1
try:
result = func(*args, **kwargs)
except AssertionError:
now = pika.compat.time_now()
# Compensate for time adjustment
if now < start_time:
start_time = now
if (now - start_time) > timeout_sec:
logging.exception(
'Exceeded retry timeout of %s sec in %s attempts '
'with func %r. Caller\'s stack:\n%s',
timeout_sec, num_attempts, func,
''.join(traceback.format_stack()))
raise
logging.debug('Attempt %s failed; retrying %r in %s sec.',
num_attempts, func, retry_interval_sec)
time.sleep(retry_interval_sec)
else:
logging.debug('%r succeeded at attempt %s',
func, num_attempts)
return result
return retry_assertion_wrap
return retry_assertion_decorator
| {
"repo_name": "pika/pika",
"path": "tests/misc/test_utils.py",
"copies": "1",
"size": "2615",
"license": "bsd-3-clause",
"hash": -8222856616435301000,
"line_mean": 34.3378378378,
"line_max": 87,
"alpha_frac": 0.5736137667,
"autogenerated": false,
"ratio": 5.009578544061303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013992892179166688,
"num_lines": 74
} |
"""Acceptance test utils"""
import functools
import logging
import time
import traceback
def retry_assertion(timeout_sec, retry_interval_sec=0.1):
"""Creates a decorator that retries the decorated function or
method only upon `AssertionError` exception at the given retry interval
not to exceed the overall given timeout.
:param float timeout_sec: overall timeout in seconds
:param float retry_interval_sec: amount of time to sleep
between retries in seconds.
:returns: decorator that implements the following behavior
1. This decorator guarantees to call the decorated function or method at
least once.
2. It passes through all exceptions besides `AssertionError`, preserving the
original exception and its traceback.
3. If no exception, it returns the return value from the decorated function/method.
4. It sleeps `time.sleep(retry_interval_sec)` between retries.
5. It checks for expiry of the overall timeout before sleeping.
6. If the overall timeout is exceeded, it re-raises the latest `AssertionError`,
preserving its original traceback
"""
def retry_assertion_decorator(func):
"""Decorator"""
@functools.wraps(func)
def retry_assertion_wrap(*args, **kwargs):
"""The wrapper"""
num_attempts = 0
start_time = time.time()
while True:
num_attempts += 1
try:
result = func(*args, **kwargs)
except AssertionError:
now = time.time()
# Compensate for time adjustment
if now < start_time:
start_time = now
if (now - start_time) > timeout_sec:
logging.exception(
'Exceeded retry timeout of %s sec in %s attempts '
'with func %r. Caller\'s stack:\n%s',
timeout_sec, num_attempts, func,
''.join(traceback.format_stack()))
raise
logging.debug('Attempt %s failed; retrying %r in %s sec.',
num_attempts, func, retry_interval_sec)
time.sleep(retry_interval_sec)
else:
logging.debug('%r succeeded at attempt %s',
func, num_attempts)
return result
return retry_assertion_wrap
return retry_assertion_decorator
| {
"repo_name": "zixiliuyue/pika",
"path": "tests/acceptance/test_utils.py",
"copies": "4",
"size": "2574",
"license": "bsd-3-clause",
"hash": 435286367165771900,
"line_mean": 34.2602739726,
"line_max": 87,
"alpha_frac": 0.5695415695,
"autogenerated": false,
"ratio": 5.086956521739131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01418457563367582,
"num_lines": 73
} |
"""Accept a text file and do the fizzbuzz upon it.
Your program should accept a file as its first argument.
The file contains multiple separated lines; each line contains
3 numbers that are space delimited. The first number is the first
divider (X), the second number is the second divider (Y), and the
third number is how far you should count (N). You may assume that
the input file is formatted correctly and the numbers are valid
positive integers.
"""
from __future__ import print_function
import sys
INPUT_FILE = sys.argv[1]
def parse_input(input_file):
values = []
with open(input_file, mode='r') as f:
for line in f:
#convert the strings to ints
nums = [int(s) for s in line.split()]
values.append(nums)
return values
def fizzbuzz(X, Y, N):
for val in range(1, N + 1):
if val % X == 0 and val % Y == 0:
print('FB', end='')
elif val % X == 0:
print('F', end='')
elif val % Y == 0:
print('B', end='')
else:
print(val, end='')
if val == N:
print()
elif val < N:
print(' ', end='')
def process():
values = parse_input(INPUT_FILE)
for val in values:
X, Y, Z = val[0], val[1], val[2]
fizzbuzz(X, Y, Z)
process()
| {
"repo_name": "joelstanner/codeeval",
"path": "python_solutions/fizz_buzz/codeeval_fizzbuzz.py",
"copies": "1",
"size": "1333",
"license": "mit",
"hash": -7208140637637672000,
"line_mean": 22.8035714286,
"line_max": 65,
"alpha_frac": 0.5723930983,
"autogenerated": false,
"ratio": 3.652054794520548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4724447892820548,
"avg_score": null,
"num_lines": null
} |
"""Accept incoming log message."""
import logging.handlers
import select
from multilog import IS_PYTHON2
if IS_PYTHON2:
import SocketServer as socketserver
else:
import socketserver
from .handlers import LogHandler
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = logging.handlers.DEFAULT_TCP_LOGGING_PORT
class LogReceiver(socketserver.ThreadingTCPServer):
"""Simple TCP socket-based logging receiver."""
allow_reuse_address = True
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, handler=LogHandler):
"""Initialize the log receiver
:param host: The hostname to bind to
:param port: The port to listen on
:param handler: The handler to send received messages to
"""
socketserver.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
"""Run the server foevah"""
abort = 0
while not abort:
read_items, _, _ = select.select([self.socket.fileno()], [], [], self.timeout)
if read_items:
self.handle_request()
abort = self.abort
| {
"repo_name": "humangeo/multilog",
"path": "multilog/receivers.py",
"copies": "1",
"size": "1194",
"license": "mit",
"hash": -8163029092508410000,
"line_mean": 26.7674418605,
"line_max": 90,
"alpha_frac": 0.6440536013,
"autogenerated": false,
"ratio": 4.160278745644599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009863930737441797,
"num_lines": 43
} |
# Accept in gym/101190H
# Wrong Answer in uvalive 7777
def idx( x ): return x + 32768
def main():
text = []
while True:
try: text.append(input().strip())
except EOFError: break
endflag = [i for i in range(len(text)) if text[i][-2:] != '||']
beginfg = [0] + [_ + 1 for _ in endflag[:-1]]
for tt in range(len(endflag)):
segs = []
for i in range(beginfg[tt], endflag[tt] + 1):
word = text[i].split()
if i != endflag[tt]: word = word[:-1]
if (len(word) == 3):
bound = int(word[2])
segs.append([bound, 32767] if word[1][0] == '>' else [-32768, bound])
else:
x, y = int(word[2]), int(word[6])
if x > y: continue
segs.append([x, y])
segs.sort(key=lambda s: (s[0], s[1]))
if len(segs) == 0:
print('false')
continue
hoge = [segs[0]]
for i in range(1, len(segs)):
if segs[i][0] == segs[i - 1][0]:
hoge[-1][1] = max(segs[i][1], segs[i - 1][1])
else: hoge.append(segs[i])
piyo = [hoge[0]]
for i in range(1, len(hoge)):
if hoge[i][0] <= piyo[-1][1] + 1:
piyo[-1][1] = max(piyo[-1][1], hoge[i][1])
else: piyo.append(hoge[i])
if len(piyo) == 1 and piyo[0][0] == -32768 and piyo[0][1] == 32767:
print('true')
continue
ans = []
for seg in piyo:
[x, y] = seg
if (x == -32768):
ans.append('x <= ' + str(y))
elif (y == 32767):
ans.append('x >= ' + str(x))
else: ans.append('x >= ' + str(x) + ' && x <= ' + str(y))
print(' ||\n'.join(ans))
if __name__ == '__main__':
main() | {
"repo_name": "polossk/CodeArchive",
"path": "UVALive/uvalive7777.py",
"copies": "1",
"size": "1830",
"license": "mit",
"hash": 3295977871882723000,
"line_mean": 31.6964285714,
"line_max": 85,
"alpha_frac": 0.4202185792,
"autogenerated": false,
"ratio": 3.080808080808081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40010266600080807,
"avg_score": null,
"num_lines": null
} |
"""Acceptor class for Paxos Calendar."""
import pickle
import socket
import time
from Calendar import Calendar
class Acceptor(object):
"""
Acceptor class.
maxPrepare: The maximum proposal number this Acceptor has encountered;
initialized at -1 so a node can have id = 0.
accNum: Number of highest-numbered proposal this Acceptor object
has accepted thus far; initialized as None.
accVal: Value of highest-numbered proposal this Acceptor object
has accepted thus far; initialized as None.
Acceptor has to keep track of its maxPrepare, accNum and accVal in case of
a crash and must be able to write them to stable storage.
"""
def __init__(self, ip_table):
"""Construct Acceptor object."""
self._maxPrepare = -1
from collections import defaultdict
self._accNums = {}
self._accVals = {}
self._command_queue = []
self._commits_queue = []
self._ip_table = ip_table
self._terminate = False
self._is_Acceptor = True
def _send_UDP_message(self, data, IP, UDP_PORT):
"""Send pickled data through UDP socket bound to (IP, UDP_PORT)."""
import pickle
import socket
transmission = pickle.dumps(data)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(transmission, (IP, UDP_PORT))
s.close()
def _recv_prepare(self, message):
"""
Handle reception of prepare message as described in Synod Algorithm.
Prepare messages of form ("prepare", m, log_slot, sender_ID)
"""
m, log_slot, sender_ID = message[1:]
if log_slot not in self._accNums.keys():
self._accNums[log_slot] = None
if log_slot not in self._accVals.keys():
self._accVals[log_slot] = None
if m > self._maxPrepare:
self._maxPrepare = m
IP, UDP_PORT = self._ip_table[sender_ID][0], self._ip_table[sender_ID][2]
self._send_promise(IP, UDP_PORT, self._accNums[log_slot], self._accVals[log_slot], log_slot)
def _send_promise(self, IP, PORT, accNum, accVal, log_slot):
"""Send promise message with given accNum, accVal to given IP, PORT."""
transmission = ("promise", accNum, Calendar.serialize(accVal), log_slot)
self._send_UDP_message(transmission, IP, PORT)
def _recv_accept(self, message):
"""
Handle reception of accept message as described in Synod Algorithm.
"""
m, v, log_slot, sender_ID = message[1:]
if log_slot not in self._accNums.keys():
self._accNums[log_slot] = None
if log_slot not in self._accVals.keys():
self._accVals[log_slot] = None
if m >= self._maxPrepare:
self._accNums[log_slot] = m
self._accVals[log_slot] = v
IP, UDP_PORT = self._ip_table[sender_ID][0], self._ip_table[sender_ID][2]
self._send_ack(IP, UDP_PORT, self._accNums[log_slot], self._accVals[log_slot], log_slot)
def _send_ack(self, IP, PORT, accNum, accVal, log_slot):
"""Send ack with given accNum, accVal to given IP, PORT."""
transmission = ("ack", accNum, Calendar.serialize(accVal), log_slot)
self._send_UDP_message(transmission, IP, PORT)
def _recv_commit(self, message):
"""
Handle reception of commit message as described in Synod Algorithm.
"""
v, log_slot = message[1], message[2]
self._commits_queue.append((log_slot, v))
def start(self):
"""Start the Acceptor; serve messages in its queue."""
stap = []
while True:
if self._command_queue:
message = self._command_queue.pop()
message_command_type = message[0]
debug_str = "Acceptor; "
if message_command_type == "prepare":
#print debug_str + "type: prepare with slot = " + str(message[2]) + ", m = " + str(message[1])
self._recv_prepare(message)
elif message_command_type == "accept":
#print debug_str + "type: accept with slot = " + str(message[3]) + ", m = " + str(message[1])
self._recv_accept(message)
elif message_command_type == "commit":
#print debug_str + "type: commit " + str(message[2])
self._recv_commit(message)
if self._terminate:
break
time.sleep(.001)
def __str__(self):
"""Implement str(Acceptor)."""
ret_str = "Acceptor\n\tMaxPrepare: " + str(self._maxPrepare)
ret_str += "\n\tAccNum: " + str(self._accNums)
ret_str += "\n\tAccVal: " + str(self._accVals)
return ret_str
def __repr__(self):
"""Implement repr(Acceptor)."""
return self.__str__()
def main():
"""Quick tests."""
a = Acceptor()
print a
if __name__ == "__main__":
main()
| {
"repo_name": "nickmarton/Paxos-Distributed-Calendar",
"path": "Classes/Acceptor.py",
"copies": "1",
"size": "5073",
"license": "mit",
"hash": 8515194687834986000,
"line_mean": 35.2357142857,
"line_max": 114,
"alpha_frac": 0.5649517051,
"autogenerated": false,
"ratio": 3.805701425356339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48706531304563394,
"avg_score": null,
"num_lines": null
} |
# Accepts a filename containing numeric values in each line of which the statistics need to be calculated.
# Alternately as second argument, the bucket size can also be passed.
import sys
import numpy as np
fileName = sys.argv[1]
# find median of bucket splits of data
buckets = 5
if len(sys.argv) == 3:
# Assuming the 2nd argument is the bucket size
# if it exists
buckets = int(sys.argv[2])
data = np.fromfile(fileName,sep="\n")
nmap = len(data) # Number of hashes used.
print("(mean of {} hashes) = {}".format(nmap,np.mean(data)))
# Finding mean of medians and vice-versa now
idxes = np.arange(nmap)
np.random.shuffle(idxes)
# numpy.array_split allows unequal splits of data
groups = np.array_split(idxes, buckets)
mean_of_medians = np.mean([np.median(data[group_idxs]) for group_idxs in groups])
median_of_means = np.median([np.mean(data[group_idxs]) for group_idxs in groups])
print("mean of medians (from {} buckets of data) : {}".format(buckets, mean_of_medians))
print("median of means (from {} buckets of data) : {}".format(buckets, median_of_means))
print("Standard Deviation : {}".format(np.std(data)))
| {
"repo_name": "kunalghosh/T-61.5060-Algorithmic-Methods-of-Data-Mining",
"path": "source/approximate/mean_of_medians.py",
"copies": "1",
"size": "1131",
"license": "mit",
"hash": 1139873434777670100,
"line_mean": 38,
"line_max": 106,
"alpha_frac": 0.7170645447,
"autogenerated": false,
"ratio": 3.2593659942363113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4476430538936311,
"avg_score": null,
"num_lines": null
} |
''' Accepts a polygon shapefile in WGS84 and finds the UTM zone '''
# modified code from https://pcjericks.github.io/py-gdalogr-cookbook/
# vector_layers.html
from osgeo import ogr
import math
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("filename", help="the path to the input shapefile")
ARGS = PARSER.parse_args()
# /Users/jbranigan/Documents/phila-city_limits_shp
def check_latlng(check_bbox):
''' Checks to see if the file coordinates are in lat/lng '''
for i in check_bbox:
if i < -180 or i > 180:
failure('This file is already projected.')
return True
def check_width(check_bbox):
''' Checsk to see if the bounding box fits in a UTM zone '''
wide = check_bbox[1] - check_bbox[0]
if wide > 3:
failure('This file is too many degrees wide for UTM')
return True
def get_zone(coord):
''' Finds the UTM zone of the coordinate '''
# print 'zone function on ', coord
# There are 60 longitudinal projection zones numbered 1 to 60 starting at 180W
# So that's -180 = 1, -174 = 2, -168 = 3
zone = ((coord - -180) / 6.0)
return math.ceil(zone)
def get_bbox(shapefile):
''' Gets the bounding box of a shapefile input '''
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.Open(shapefile, 0) # 0 means read, 1 means write
# Check to see if shapefile is found.
if data_source is None:
print 'Could not open %s' % (shapefile)
else:
print 'Opened %s' % (shapefile)
layer = data_source.GetLayer()
shape_bbox = layer.GetExtent()
return shape_bbox
def failure(why):
''' Quits the script with an exit message '''
print why
raise SystemExit
BBOX = get_bbox(ARGS.filename)
LATLNG = check_latlng(BBOX)
LNG_EXTENT = check_width(BBOX)
BBOX_CENTER = ((BBOX[1] - BBOX[0]) / 2) + BBOX[0]
UTMZONE = get_zone(BBOX_CENTER)
print 'The UTM zone is: %d' % UTMZONE
| {
"repo_name": "jbranigan/geo-scripts-python",
"path": "latlng2utm/detect-utm-zone.py",
"copies": "1",
"size": "1945",
"license": "mit",
"hash": 7280076488567632000,
"line_mean": 30.8852459016,
"line_max": 82,
"alpha_frac": 0.6575835476,
"autogenerated": false,
"ratio": 3.274410774410774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9409960677262467,
"avg_score": 0.004406728949661525,
"num_lines": 61
} |
"""Accepts or declines project invitation"""
from baseCmd import *
from baseResponse import *
class updateProjectInvitationCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""id of the project to join"""
"""Required"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
"""if true, accept the invitation, decline if false. True by default"""
self.accept = None
self.typeInfo['accept'] = 'boolean'
"""account that is joining the project"""
self.account = None
self.typeInfo['account'] = 'string'
"""list invitations for specified account; this parameter has to be specified with domainId"""
self.token = None
self.typeInfo['token'] = 'string'
self.required = ["projectid", ]
class updateProjectInvitationResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/updateProjectInvitation.py",
"copies": "1",
"size": "1206",
"license": "apache-2.0",
"hash": 1945406101304615000,
"line_mean": 31.5945945946,
"line_max": 102,
"alpha_frac": 0.6144278607,
"autogenerated": false,
"ratio": 4.246478873239437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027289425347677775,
"num_lines": 37
} |
#Accepts X and y and returns a list of training x and a test x and a training y and a validate y
def folds(x_one,x_two,size):
####hhhhhmmmmmmmmmm
train_x = []
train_y = []
predict_x = []
validate_y = []
cut_index_one = int(len(x_one)/float(size))
cut_index_two = int(len(x_two)/float(size))
for i in range(size):
predict_cut,validate_cut,train_cut_x,train_cut_y = ([] for i in range(4))
cut_index_start_one = i*cut_index_one
cut_index_end_one = (i+1)*cut_index_one
cut_index_start_two = i*cut_index_two
cut_index_end_two = (i+1)*cut_index_two
predict_cut.extend(x_one[cut_index_start_one:cut_index_end_one])
[validate_cut.append(-1) for i in x_one[cut_index_start_one:cut_index_end_one]]
predict_cut.extend(x_two[cut_index_start_two:cut_index_end_two])
[validate_cut.append(1) for i in x_two[cut_index_start_two:cut_index_end_two]]
train_cut_x.extend(x_one[0:cut_index_start_one])
train_cut_x.extend(x_one[cut_index_end_one:len(x_one)])
[train_cut_y.append(-1) for i in range(len(x_one)-cut_index_one)]
train_cut_x.extend(x_two[0:cut_index_start_two])
train_cut_x.extend(x_two[cut_index_end_two:len(x_two)])
[train_cut_y.append(1) for i in range(len(x_two)-cut_index_two)]
train_x.append(train_cut_x)
train_y.append(train_cut_y)
predict_x.append(predict_cut)
validate_y.append(validate_cut)
return train_x, train_y, predict_x, validate_y
| {
"repo_name": "dssg/wikienergy",
"path": "proto/SVM/fold.py",
"copies": "1",
"size": "1529",
"license": "mit",
"hash": -2258415620281786000,
"line_mean": 45.3333333333,
"line_max": 96,
"alpha_frac": 0.6265533028,
"autogenerated": false,
"ratio": 2.7109929078014185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8655290283684576,
"avg_score": 0.03645118538336832,
"num_lines": 33
} |
#Accept walls and understand unpassable terrain
#Easily recalcuate map
from bad_numbers import *
import pathfinding
import bad_numbers
import numpy
import maps
import cProfile
import generate_dijkstra_map as fast_gen
def create_dijkstra_map(center,source_map,targets):
#Calculate the maximum size of the of the map by testing distances to all targets
_farthest_distance = 10#bad_numbers.distance(center,targets[0]['position'])
print _farthest_distance
_min_x = clip(center[0]-(_farthest_distance),0,MAP_SIZE[0])
_max_x = clip(center[0]+(_farthest_distance),0,MAP_SIZE[0])
_min_y = clip(center[1]-(_farthest_distance),0,MAP_SIZE[1])
_max_y = clip(center[1]+(_farthest_distance),0,MAP_SIZE[1])
_map = numpy.ones((_max_y,_max_x))
_ignore = []
#TODO: This number controls how much detail we get in the map
_map*=30
for x in range(_min_x,_max_x):
for y in range(_min_y,_max_y):
if source_map[x][y][center[2]+1]:
_ignore.append((x,y))
#Create structure
_dijkstra = {'map': _map,
'x_range': (_min_x,_max_x),
'y_range': (_min_y,_max_y),
'ignore': _ignore,
'targets': targets}
return _dijkstra
def generate_dijkstra_map(dijkstra):
targets = dijkstra['targets']
_min_x,_max_x = dijkstra['x_range']
_min_y,_max_y = dijkstra['y_range']
if not targets:
raise Exception('No targets passed to create_dijkstra_map()')
_target_positions = [tuple(target['position']) for target in targets]
_map = dijkstra['map']
_orig_map = None
for target in targets:
_map[target['position'][1]-_min_y,target['position'][0]-_min_x] = 0
if 'inverted' in dijkstra:
_starting_lowest = -9000
else:
_starting_lowest = 9000
while 1:
#print 'running'
_orig_map = _map.copy()
for _x in range(_min_x,_max_x):
for _y in range(_min_y,_max_y):
if (_x,_y) in _target_positions or (_x,_y) in dijkstra['ignore']:
continue
_real_x = _x-_min_x
_real_y = _y-_min_y
_lowest_score = _starting_lowest
for x_mod in range(-1,2):
_map_x_pos = (_real_x)+x_mod
_xx = _x+x_mod
if 0>_map_x_pos or _map_x_pos>=_map.shape[1]-1:
continue
for y_mod in range(-1,2):
_yy = _y+y_mod
if (x_mod,y_mod) == (0,0) or (_xx,_yy) in dijkstra['ignore']:
continue
if (x_mod,y_mod) in [(-1,-1),(1,-1),(-1,1),(-1,1)]:
continue
_dist = 1#bad_numbers.distance((_x,_y),(_xx,_yy))
_map_y_pos = (_real_y)+y_mod
if 0>_map_y_pos or _map_y_pos>=_map.shape[0]-2:
continue
if _starting_lowest == 9000:
#print _orig_map[_y-_min_y,_x-_min_x]-_orig_map[_map_y_pos,_map_x_pos]
if _orig_map[_real_y,_real_x]-_orig_map[_map_y_pos,_map_x_pos]>=2:
_lowest_score = _orig_map[_map_y_pos,_map_x_pos]+(1*_dist)
else:
#print _orig_map[_y-_min_y,_x-_min_x],_orig_map[_map_y_pos,_map_x_pos]
if _orig_map[_real_y,_real_x]+_orig_map[_map_y_pos,_map_x_pos]<-2:
_lowest_score = _orig_map[_map_y_pos,_map_x_pos]+(1)#*_dist)
if _starting_lowest == 9000:
if _lowest_score < 9000:
_map[_real_y,_real_x] = _lowest_score
else:
if _lowest_score > -9000:
_map[_real_y,_real_x] = -_lowest_score
if numpy.array_equal(_map,_orig_map):
break
def invert_dijkstra_map(dijkstra):
_min_x,_max_x = dijkstra['x_range']
_min_y,_max_y = dijkstra['y_range']
#for _x in range(_min_x,_max_x):
# for _y in range(_min_y,_max_y):
# if(_x,_y) in dijkstra['ignore']:
# continue
dijkstra['map'] *= -1.41
#draw_dijkstra(dijkstra)
dijkstra['inverted'] = True
generate_dijkstra_map(dijkstra)
def draw_dijkstra(dijkstra,path=None):
for _y in range(dijkstra['y_range'][0],dijkstra['y_range'][1]):
y = _y-dijkstra['y_range'][0]
for _x in range(dijkstra['x_range'][0],dijkstra['x_range'][1]):
x = _x-dijkstra['x_range'][0]
if not path:
if (_x,_y) in dijkstra['ignore']:
print '# ',
continue
#else:
# print '.',
_n = str(bad_numbers.clip(abs(int(dijkstra['map'][y,x])),0,41))
if len(_n)==1:
print '%s ' % _n,
else:
print _n,
else:
#print path
if (_x,_y,0) in path:
print 'o',
elif (_x,_y) in dijkstra['ignore']:
print '#',
else:
print ' ',
print
def _main():
_targets = [{'position': (40,30),'score': 50}]
MAP = maps.load_map('map1.dat')
_a = create_dijkstra_map((44,26,2),MAP,_targets)
_stime = time.time()
generate_dijkstra_map(_a)
invert_dijkstra_map(_a)
print time.time()-_stime
draw_dijkstra(_a)
_path = pathfinding.path_from_dijkstra((43,30,2),_a,downhill=False)
draw_dijkstra(_a,path=_path)
_main()
#cProfile.run('_main()','profile.dat')
| {
"repo_name": "flags/Reactor-3",
"path": "dijkstra.py",
"copies": "1",
"size": "4711",
"license": "mit",
"hash": 2226683458924064500,
"line_mean": 25.615819209,
"line_max": 82,
"alpha_frac": 0.5881978349,
"autogenerated": false,
"ratio": 2.4321115126484254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35203093475484254,
"avg_score": null,
"num_lines": null
} |
""" Access ADCs vias SysFS interface """
import glob
import os
class ADC(object):
def __init__(self, num, repeat=8, source='iio'):
"""Initalize the ADC for reading via a sysfs source"""
if not (0 <= num <= 6):
raise ValueError('ADC num must be 0-6')
self.num = num
self.repeat = repeat
self.source = source
if source == 'iio':
self.sysfs = "/sys/bus/iio/devices/iio:device0/in_voltage" + str(num) + "_raw"
self.scale = 4096
elif source == 'ocp':
# need to read a glob here, since device numbering is not consistent
self.sysfs = glob.glob("/sys/devices/ocp.*/helper.*/AIN" + str(num))[0]
self.scale = 1800
else:
raise ValueError('Bad sysfs source')
def __str__(self):
out = "ADC#%d (%s)" % (self.num, self.source)
return out
@property
def mV(self):
# calculate from raw value for a little extra precision
return self.raw()*(1800.0/self.scale)
@property
def volts(self):
# calculate from raw value for a little extra precision
return self.raw()*(1.8/self.scale)
def raw(self, repeat=None):
"""Raw ADC value read via sysfs entry
Approximately 100Hz when using 8x reads (1.25ms/ea)
"""
if not repeat:
repeat = self.repeat
# repeat read multiple times to handle ADC driver bug that returns
# stale values
for i in range(repeat):
val = None
fd = os.open(self.sysfs, os.O_RDONLY)
while not val:
try:
# ~10% faster than using File.read()
val = os.read(fd,4)
# resource can be temporarily unavailable
except (IOError, OSError):
pass
os.close(fd)
#print val
return int(val)
| {
"repo_name": "jschornick/pybbb",
"path": "bbb/adc.py",
"copies": "1",
"size": "1938",
"license": "mit",
"hash": -6349788903319857000,
"line_mean": 30.2580645161,
"line_max": 90,
"alpha_frac": 0.536119711,
"autogenerated": false,
"ratio": 3.915151515151515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49512712261515146,
"avg_score": null,
"num_lines": null
} |
"""Access ADCs vias SysFS interface."""
import glob
import os
class ADC(object):
def __init__(self, num, repeat=8, source='iio'):
"""Initalize the ADC for reading via a sysfs source"""
if not (0 <= num <= 6):
raise ValueError('ADC num must be 0-6')
self.num = num
self.repeat = repeat
self.source = source
if source == 'iio':
self.sysfs = "/sys/bus/iio/devices/iio:device0/in_voltage" + str(num) + "_raw"
self.scale = 4096
elif source == 'ocp':
# need to read a glob here, since device numbering is not consistent
self.sysfs = glob.glob("/sys/devices/ocp.*/helper.*/AIN" + str(num))[0]
self.scale = 1800
else:
raise ValueError('Bad sysfs source')
def __str__(self):
out = "ADC#%d (%s)" % (self.num, self.source)
return out
@property
def mV(self):
# calculate from raw value for a little extra precision
return self.raw() * (1800.0 / self.scale)
@property
def volts(self):
# calculate from raw value for a little extra precision
return self.raw() * (1.8 / self.scale)
def raw(self, repeat=None):
"""Raw ADC value read via sysfs entry
Approximately 100Hz when using 8x reads (1.25ms/ea)
"""
if not repeat:
repeat = self.repeat
# repeat read multiple times to handle ADC driver bug that returns
# stale values
for i in range(repeat):
val = None
fd = os.open(self.sysfs, os.O_RDONLY)
while not val:
try:
# ~10% faster than using File.read()
val = os.read(fd, 4)
# resource can be temporarily unavailable
except (IOError, OSError):
pass
os.close(fd)
# print val
return int(val)
| {
"repo_name": "IEEERobotics/pybbb",
"path": "bbb/adc.py",
"copies": "1",
"size": "1947",
"license": "mit",
"hash": 2193016655191696000,
"line_mean": 30.4032258065,
"line_max": 90,
"alpha_frac": 0.5336414997,
"autogenerated": false,
"ratio": 3.9175050301810863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49511465298810864,
"avg_score": null,
"num_lines": null
} |
"""Access an ARP cache."""
class Error(Exception):
"""Base error class for arp module."""
pass
class MACNotFound(Error):
"""No MAC address is associated with this address."""
pass
# Path to the ARP cache file under the /proc filesystem.
_ARP_FILENAME = '/proc/net/arp'
class ArpCache(object):
def __init__(self, arp_lines=None):
"""Parses an ARP cache.
Example:
$ cat /proc/net/arp
IP address HW type Flags HW address Mask Device
23.228.128.130 0x1 0x2 78:19:f7:86:44:81 * eth0
Args:
arp_lines: None or list of str, lines from an ARP cache. If None,
read lines from the system ARP cache.
"""
if arp_lines is None:
with open(_ARP_FILENAME) as arp_file:
arp_lines = arp_file.readlines()
self._ip_to_mac = {}
# Skip the header.
for line in arp_lines[1:]:
fields = line.split()
self._ip_to_mac[fields[0]] = fields[3]
def ip_to_mac(self, ip_address):
"""Returns the MAC address of the given IPv4 address.
Args:
ip_address: str, IPv4 address to lookup in the ARP cache.
Raises:
MACNotFound: the ip address is not found in the ARP cache.
"""
try:
return self._ip_to_mac[ip_address]
except KeyError:
raise MACNotFound('IP not found in ARP cache: %s' % ip_address)
| {
"repo_name": "m-lab/collectd-mlab",
"path": "site-packages/mlab/disco/arp.py",
"copies": "2",
"size": "1498",
"license": "apache-2.0",
"hash": -6205099673732982000,
"line_mean": 27.2641509434,
"line_max": 79,
"alpha_frac": 0.5527369826,
"autogenerated": false,
"ratio": 3.754385964912281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005896226415094339,
"num_lines": 53
} |
""" Access and control log capturing. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
from contextlib import contextmanager
import py
import six
import pytest
from _pytest.compat import dummy_context_manager
from _pytest.config import create_terminal_writer
from _pytest.pathlib import Path
DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
class ColoredLevelFormatter(logging.Formatter):
"""
Colorize the %(levelname)..s part of the log format passed to __init__.
"""
LOGLEVEL_COLOROPTS = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.WARN: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
}
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)")
def __init__(self, terminalwriter, *args, **kwargs):
super(ColoredLevelFormatter, self).__init__(*args, **kwargs)
if six.PY2:
self._original_fmt = self._fmt
else:
self._original_fmt = self._style._fmt
self._level_to_fmt_mapping = {}
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
if not levelname_fmt_match:
return
levelname_fmt = levelname_fmt_match.group()
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
formatted_levelname = levelname_fmt % {
"levelname": logging.getLevelName(level)
}
# add ANSI escape sequences around the formatted levelname
color_kwargs = {name: True for name in color_opts}
colorized_formatted_levelname = terminalwriter.markup(
formatted_levelname, **color_kwargs
)
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
colorized_formatted_levelname, self._fmt
)
def format(self, record):
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
if six.PY2:
self._fmt = fmt
else:
self._style._fmt = fmt
return super(ColoredLevelFormatter, self).format(record)
def get_option_ini(config, *names):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytest_addoption(parser):
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--no-print-logs",
dest="log_print",
action="store_const",
const=False,
default=True,
type="bool",
help="disable printing caught logs on failed tests.",
)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
help="logging level used by the logging module",
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
@contextmanager
def catching_logs(handler, formatter=None, level=None):
"""Context manager that prepares the whole logging machinery properly."""
root_logger = logging.getLogger()
if formatter is not None:
handler.setFormatter(formatter)
if level is not None:
handler.setLevel(level)
# Adding the same handler twice would confuse logging system.
# Just don't do that.
add_new_handler = handler not in root_logger.handlers
if add_new_handler:
root_logger.addHandler(handler)
if level is not None:
orig_level = root_logger.level
root_logger.setLevel(min(orig_level, level))
try:
yield handler
finally:
if level is not None:
root_logger.setLevel(orig_level)
if add_new_handler:
root_logger.removeHandler(handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
def __init__(self):
"""Creates a new log handler."""
logging.StreamHandler.__init__(self, py.io.TextIO())
self.records = []
def emit(self, record):
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
logging.StreamHandler.emit(self, record)
def reset(self):
self.records = []
self.stream = py.io.TextIO()
class LogCaptureFixture(object):
"""Provides access and control of log capturing."""
def __init__(self, item):
"""Creates a new funcarg."""
self._item = item
# dict of log name -> log level
self._initial_log_levels = {} # Dict[str, int]
def _finalize(self):
"""Finalizes the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# restore log levels
for logger_name, level in self._initial_log_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self):
"""
:rtype: LogCaptureHandler
"""
return self._item.catch_log_handler
def get_records(self, when):
"""
Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:rtype: List[logging.LogRecord]
:return: the list of captured records at the given stage
.. versionadded:: 3.4
"""
handler = self._item.catch_log_handlers.get(when)
if handler:
return handler.records
else:
return []
@property
def text(self):
"""Returns the log text."""
return self.handler.stream.getvalue()
@property
def records(self):
"""Returns the list of log records."""
return self.handler.records
@property
def record_tuples(self):
"""Returns a list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self):
"""Returns a list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list
are all interpolated.
Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with
levels, timestamps, etc, making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments
to the logging functions) is not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self):
"""Reset the list of log records and the captured log text."""
self.handler.reset()
def set_level(self, level, logger=None):
"""Sets the level for capturing of logs. The level will be restored to its previous value at the end of
the test.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be restored to their initial values at the
end of the test.
"""
logger_name = logger
logger = logging.getLogger(logger_name)
# save the original log-level to restore it during teardown
self._initial_log_levels.setdefault(logger_name, logger.level)
logger.setLevel(level)
@contextmanager
def at_level(self, level, logger=None):
"""Context manager that sets the level for capturing of logs. After the end of the 'with' statement the
level is restored to its original value.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
"""
logger = logging.getLogger(logger)
orig_level = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(orig_level)
@pytest.fixture
def caplog(request):
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node)
yield result
result._finalize()
def get_actual_log_level(config, *setting_names):
"""Return the actual logging level."""
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return
if isinstance(log_level, six.string_types):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError:
# Python logging does not recognise this as a logging level
raise pytest.UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
)
# run after terminalreporter/capturemanager are configured
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
class LoggingPlugin(object):
"""Attaches to the logging module and captures log messages for each test.
"""
def __init__(self, config):
"""Creates a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
# enable verbose output automatically if live logging is enabled
if self._log_cli_enabled() and config.getoption("verbose") < 1:
config.option.verbose = 1
self.print_logs = get_option_ini(config, "log_print")
self.formatter = logging.Formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
)
self.log_level = get_actual_log_level(config, "log_level")
self.log_file_level = get_actual_log_level(config, "log_file_level")
self.log_file_format = get_option_ini(config, "log_file_format", "log_format")
self.log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
self.log_file_formatter = logging.Formatter(
self.log_file_format, datefmt=self.log_file_date_format
)
log_file = get_option_ini(config, "log_file")
if log_file:
self.log_file_handler = logging.FileHandler(
log_file, mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
else:
self.log_file_handler = None
self.log_cli_handler = None
self.live_logs_context = lambda: dummy_context_manager()
# Note that the lambda for the live_logs_context is needed because
# live_logs_context can otherwise not be entered multiple times due
# to limitations of contextlib.contextmanager.
if self._log_cli_enabled():
self._setup_cli_logging()
def _setup_cli_logging(self):
config = self._config
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
if terminal_reporter is None:
# terminal reporter is disabled e.g. by pytest-xdist.
return
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
log_cli_format = get_option_ini(config, "log_cli_format", "log_format")
log_cli_date_format = get_option_ini(
config, "log_cli_date_format", "log_date_format"
)
if (
config.option.color != "no"
and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)
):
log_cli_formatter = ColoredLevelFormatter(
create_terminal_writer(config),
log_cli_format,
datefmt=log_cli_date_format,
)
else:
log_cli_formatter = logging.Formatter(
log_cli_format, datefmt=log_cli_date_format
)
log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level")
self.log_cli_handler = log_cli_handler
self.live_logs_context = lambda: catching_logs(
log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
)
def set_log_path(self, fname):
"""Public method, which can set filename parameter for
Logging.FileHandler(). Also creates parent directory if
it does not exist.
.. warning::
Please considered as an experimental API.
"""
fname = Path(fname)
if not fname.is_absolute():
fname = Path(self._config.rootdir, fname)
if not fname.parent.exists():
fname.parent.mkdir(exist_ok=True, parents=True)
self.log_file_handler = logging.FileHandler(
str(fname), mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
def _log_cli_enabled(self):
"""Return True if log_cli should be considered enabled, either explicitly
or because --log-cli-level was given in the command-line.
"""
return self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("collection")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for(self, item, when):
with self._runtest_for_main(item, when):
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for_main(self, item, when):
"""Implements the internals of pytest_runtest_xxx() hook."""
with catching_logs(
LogCaptureHandler(), formatter=self.formatter, level=self.log_level
) as log_handler:
if self.log_cli_handler:
self.log_cli_handler.set_when(when)
if item is None:
yield # run the test
return
if not hasattr(item, "catch_log_handlers"):
item.catch_log_handlers = {}
item.catch_log_handlers[when] = log_handler
item.catch_log_handler = log_handler
try:
yield # run test
finally:
if when == "teardown":
del item.catch_log_handler
del item.catch_log_handlers
if self.print_logs:
# Add a captured log section to the report.
log = log_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self._runtest_for(item, "setup"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self._runtest_for(item, "call"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self._runtest_for(item, "teardown"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logstart(self):
if self.log_cli_handler:
self.log_cli_handler.reset()
with self._runtest_for(None, "start"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logfinish(self):
with self._runtest_for(None, "finish"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logreport(self):
with self._runtest_for(None, "logreport"):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionfinish")
if self.log_file_handler is not None:
try:
with catching_logs(
self.log_file_handler, level=self.log_file_level
):
yield
finally:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
self.log_file_handler.close()
else:
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionstart")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session):
"""Runs all collected test items."""
with self.live_logs_context():
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # run all the tests
else:
yield # run all the tests
class _LiveLoggingStreamHandler(logging.StreamHandler):
"""
Custom StreamHandler used by the live logging feature: it will write a newline before the first log message
in each test.
During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured
and won't appear in the terminal.
"""
def __init__(self, terminal_reporter, capture_manager):
"""
:param _pytest.terminal.TerminalReporter terminal_reporter:
:param _pytest.capture.CaptureManager capture_manager:
"""
logging.StreamHandler.__init__(self, stream=terminal_reporter)
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self):
"""Reset the handler; should be called before the start of each test"""
self._first_record_emitted = False
def set_when(self, when):
"""Prepares for the given test phase (setup/call/teardown)"""
self._when = when
self._section_name_shown = False
if when == "start":
self._test_outcome_written = False
def emit(self, record):
ctx_manager = (
self.capture_manager.global_and_fixture_disabled()
if self.capture_manager
else dummy_context_manager()
)
with ctx_manager:
if not self._first_record_emitted:
self.stream.write("\n")
self._first_record_emitted = True
elif self._when in ("teardown", "finish"):
if not self._test_outcome_written:
self._test_outcome_written = True
self.stream.write("\n")
if not self._section_name_shown and self._when:
self.stream.section("live log " + self._when, sep="-", bold=True)
self._section_name_shown = True
logging.StreamHandler.emit(self, record)
| {
"repo_name": "lmregus/Portfolio",
"path": "python/design_patterns/env/lib/python3.7/site-packages/_pytest/logging.py",
"copies": "1",
"size": "22795",
"license": "mit",
"hash": 494967049537021900,
"line_mean": 33.4335347432,
"line_max": 118,
"alpha_frac": 0.5983329678,
"autogenerated": false,
"ratio": 4.1558796718322695,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00043228399725101516,
"num_lines": 662
} |
""" Access and control log capturing. """
import logging
import re
from contextlib import contextmanager
from io import StringIO
from typing import AbstractSet
from typing import Dict
from typing import Generator
from typing import List
from typing import Mapping
from typing import Optional
import pytest
from _pytest import nodes
from _pytest.compat import nullcontext
from _pytest.config import _strtobool
from _pytest.config import Config
from _pytest.config import create_terminal_writer
from _pytest.pathlib import Path
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
def _remove_ansi_escape_sequences(text):
return _ANSI_ESCAPE_SEQ.sub("", text)
class ColoredLevelFormatter(logging.Formatter):
"""
Colorize the %(levelname)..s part of the log format passed to __init__.
"""
LOGLEVEL_COLOROPTS = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.WARN: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
} # type: Mapping[int, AbstractSet[str]]
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)")
def __init__(self, terminalwriter, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._original_fmt = self._style._fmt
self._level_to_fmt_mapping = {} # type: Dict[int, str]
assert self._fmt is not None
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
if not levelname_fmt_match:
return
levelname_fmt = levelname_fmt_match.group()
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
formatted_levelname = levelname_fmt % {
"levelname": logging.getLevelName(level)
}
# add ANSI escape sequences around the formatted levelname
color_kwargs = {name: True for name in color_opts}
colorized_formatted_levelname = terminalwriter.markup(
formatted_levelname, **color_kwargs
)
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
colorized_formatted_levelname, self._fmt
)
def format(self, record):
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
self._style._fmt = fmt
return super().format(record)
class PercentStyleMultiline(logging.PercentStyle):
"""A logging style with special support for multiline messages.
If the message of a record consists of multiple lines, this style
formats the message as if each line were logged separately.
"""
def __init__(self, fmt, auto_indent):
super().__init__(fmt)
self._auto_indent = self._get_auto_indent(auto_indent)
@staticmethod
def _update_message(record_dict, message):
tmp = record_dict.copy()
tmp["message"] = message
return tmp
@staticmethod
def _get_auto_indent(auto_indent_option) -> int:
"""Determines the current auto indentation setting
Specify auto indent behavior (on/off/fixed) by passing in
extra={"auto_indent": [value]} to the call to logging.log() or
using a --log-auto-indent [value] command line or the
log_auto_indent [value] config option.
Default behavior is auto-indent off.
Using the string "True" or "on" or the boolean True as the value
turns auto indent on, using the string "False" or "off" or the
boolean False or the int 0 turns it off, and specifying a
positive integer fixes the indentation position to the value
specified.
Any other values for the option are invalid, and will silently be
converted to the default.
:param any auto_indent_option: User specified option for indentation
from command line, config or extra kwarg. Accepts int, bool or str.
str option accepts the same range of values as boolean config options,
as well as positive integers represented in str form.
:returns: indentation value, which can be
-1 (automatically determine indentation) or
0 (auto-indent turned off) or
>0 (explicitly set indentation position).
"""
if type(auto_indent_option) is int:
return int(auto_indent_option)
elif type(auto_indent_option) is str:
try:
return int(auto_indent_option)
except ValueError:
pass
try:
if _strtobool(auto_indent_option):
return -1
except ValueError:
return 0
elif type(auto_indent_option) is bool:
if auto_indent_option:
return -1
return 0
def format(self, record):
if "\n" in record.message:
if hasattr(record, "auto_indent"):
# passed in from the "extra={}" kwarg on the call to logging.log()
auto_indent = self._get_auto_indent(record.auto_indent)
else:
auto_indent = self._auto_indent
if auto_indent:
lines = record.message.splitlines()
formatted = self._fmt % self._update_message(record.__dict__, lines[0])
if auto_indent < 0:
indentation = _remove_ansi_escape_sequences(formatted).find(
lines[0]
)
else:
# optimizes logging by allowing a fixed indentation
indentation = auto_indent
lines[0] = formatted
return ("\n" + " " * indentation).join(lines)
return self._fmt % record.__dict__
def get_option_ini(config, *names):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytest_addoption(parser):
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--no-print-logs",
dest="log_print",
action="store_const",
const=False,
default=True,
type="bool",
help="disable printing caught logs on failed tests.",
)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
metavar="LEVEL",
help=(
"level of messages to catch/display.\n"
"Not set by default, so it depends on the root/parent log handler's"
' effective level, where it is "WARNING" by default.'
),
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-auto-indent",
dest="log_auto_indent",
default=None,
help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.",
)
@contextmanager
def catching_logs(handler, formatter=None, level=None):
"""Context manager that prepares the whole logging machinery properly."""
root_logger = logging.getLogger()
if formatter is not None:
handler.setFormatter(formatter)
if level is not None:
handler.setLevel(level)
# Adding the same handler twice would confuse logging system.
# Just don't do that.
add_new_handler = handler not in root_logger.handlers
if add_new_handler:
root_logger.addHandler(handler)
if level is not None:
orig_level = root_logger.level
root_logger.setLevel(min(orig_level, level))
try:
yield handler
finally:
if level is not None:
root_logger.setLevel(orig_level)
if add_new_handler:
root_logger.removeHandler(handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
def __init__(self) -> None:
"""Creates a new log handler."""
logging.StreamHandler.__init__(self, StringIO())
self.records = [] # type: List[logging.LogRecord]
def emit(self, record: logging.LogRecord) -> None:
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
logging.StreamHandler.emit(self, record)
def reset(self) -> None:
self.records = []
self.stream = StringIO()
class LogCaptureFixture:
"""Provides access and control of log capturing."""
def __init__(self, item) -> None:
"""Creates a new funcarg."""
self._item = item
# dict of log name -> log level
self._initial_log_levels = {} # type: Dict[str, int]
def _finalize(self) -> None:
"""Finalizes the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# restore log levels
for logger_name, level in self._initial_log_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self) -> LogCaptureHandler:
"""
:rtype: LogCaptureHandler
"""
return self._item.catch_log_handler # type: ignore[no-any-return] # noqa: F723
def get_records(self, when: str) -> List[logging.LogRecord]:
"""
Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:rtype: List[logging.LogRecord]
:return: the list of captured records at the given stage
.. versionadded:: 3.4
"""
handler = self._item.catch_log_handlers.get(when)
if handler:
return handler.records # type: ignore[no-any-return] # noqa: F723
else:
return []
@property
def text(self):
"""Returns the formatted log text."""
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
@property
def records(self):
"""Returns the list of log records."""
return self.handler.records
@property
def record_tuples(self):
"""Returns a list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self):
"""Returns a list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list
are all interpolated.
Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with
levels, timestamps, etc, making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments
to the logging functions) is not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self):
"""Reset the list of log records and the captured log text."""
self.handler.reset()
def set_level(self, level, logger=None):
"""Sets the level for capturing of logs. The level will be restored to its previous value at the end of
the test.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be restored to their initial values at the
end of the test.
"""
logger_name = logger
logger = logging.getLogger(logger_name)
# save the original log-level to restore it during teardown
self._initial_log_levels.setdefault(logger_name, logger.level)
logger.setLevel(level)
@contextmanager
def at_level(self, level, logger=None):
"""Context manager that sets the level for capturing of logs. After the end of the 'with' statement the
level is restored to its original value.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
"""
logger = logging.getLogger(logger)
orig_level = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(orig_level)
@pytest.fixture
def caplog(request):
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.messages -> list of format-interpolated log messages
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node)
yield result
result._finalize()
def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]:
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return None
if isinstance(log_level, str):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError:
# Python logging does not recognise this as a logging level
raise pytest.UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
)
# run after terminalreporter/capturemanager are configured
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
class LoggingPlugin:
"""Attaches to the logging module and captures log messages for each test.
"""
def __init__(self, config: Config) -> None:
"""Creates a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
self.print_logs = get_option_ini(config, "log_print")
if not self.print_logs:
from _pytest.warnings import _issue_warning_captured
from _pytest.deprecated import NO_PRINT_LOGS
_issue_warning_captured(NO_PRINT_LOGS, self._config.hook, stacklevel=2)
self.formatter = self._create_formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
get_option_ini(config, "log_auto_indent"),
)
self.log_level = get_log_level_for_setting(config, "log_level")
self.log_file_level = get_log_level_for_setting(config, "log_file_level")
self.log_file_format = get_option_ini(config, "log_file_format", "log_format")
self.log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
self.log_file_formatter = logging.Formatter(
self.log_file_format, datefmt=self.log_file_date_format
)
log_file = get_option_ini(config, "log_file")
if log_file:
self.log_file_handler = logging.FileHandler(
log_file, mode="w", encoding="UTF-8"
) # type: Optional[logging.FileHandler]
self.log_file_handler.setFormatter(self.log_file_formatter)
else:
self.log_file_handler = None
self.log_cli_handler = None
self.live_logs_context = lambda: nullcontext()
# Note that the lambda for the live_logs_context is needed because
# live_logs_context can otherwise not be entered multiple times due
# to limitations of contextlib.contextmanager.
if self._log_cli_enabled():
self._setup_cli_logging()
def _create_formatter(self, log_format, log_date_format, auto_indent):
# color option doesn't exist if terminal plugin is disabled
color = getattr(self._config.option, "color", "no")
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
log_format
):
formatter = ColoredLevelFormatter(
create_terminal_writer(self._config), log_format, log_date_format
) # type: logging.Formatter
else:
formatter = logging.Formatter(log_format, log_date_format)
formatter._style = PercentStyleMultiline(
formatter._style._fmt, auto_indent=auto_indent
)
return formatter
def _setup_cli_logging(self):
config = self._config
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
if terminal_reporter is None:
# terminal reporter is disabled e.g. by pytest-xdist.
return
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
log_cli_formatter = self._create_formatter(
get_option_ini(config, "log_cli_format", "log_format"),
get_option_ini(config, "log_cli_date_format", "log_date_format"),
get_option_ini(config, "log_auto_indent"),
)
log_cli_level = get_log_level_for_setting(config, "log_cli_level", "log_level")
self.log_cli_handler = log_cli_handler
self.live_logs_context = lambda: catching_logs(
log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
)
def set_log_path(self, fname):
"""Public method, which can set filename parameter for
Logging.FileHandler(). Also creates parent directory if
it does not exist.
.. warning::
Please considered as an experimental API.
"""
fname = Path(fname)
if not fname.is_absolute():
fname = Path(self._config.rootdir, fname)
if not fname.parent.exists():
fname.parent.mkdir(exist_ok=True, parents=True)
self.log_file_handler = logging.FileHandler(
str(fname), mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
def _log_cli_enabled(self):
"""Return True if log_cli should be considered enabled, either explicitly
or because --log-cli-level was given in the command-line.
"""
return self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self) -> Generator[None, None, None]:
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("collection")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for(self, item, when):
with self._runtest_for_main(item, when):
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for_main(
self, item: nodes.Item, when: str
) -> Generator[None, None, None]:
"""Implements the internals of pytest_runtest_xxx() hook."""
with catching_logs(
LogCaptureHandler(), formatter=self.formatter, level=self.log_level
) as log_handler:
if self.log_cli_handler:
self.log_cli_handler.set_when(when)
if item is None:
yield # run the test
return
if not hasattr(item, "catch_log_handlers"):
item.catch_log_handlers = {} # type: ignore[attr-defined] # noqa: F821
item.catch_log_handlers[when] = log_handler # type: ignore[attr-defined] # noqa: F821
item.catch_log_handler = log_handler # type: ignore[attr-defined] # noqa: F821
try:
yield # run test
finally:
if when == "teardown":
del item.catch_log_handler # type: ignore[attr-defined] # noqa: F821
del item.catch_log_handlers # type: ignore[attr-defined] # noqa: F821
if self.print_logs:
# Add a captured log section to the report.
log = log_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self._runtest_for(item, "setup"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self._runtest_for(item, "call"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self._runtest_for(item, "teardown"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logstart(self):
if self.log_cli_handler:
self.log_cli_handler.reset()
with self._runtest_for(None, "start"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logfinish(self):
with self._runtest_for(None, "finish"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logreport(self):
with self._runtest_for(None, "logreport"):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionfinish")
if self.log_file_handler is not None:
try:
with catching_logs(
self.log_file_handler, level=self.log_file_level
):
yield
finally:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
self.log_file_handler.close()
else:
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionstart")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session):
"""Runs all collected test items."""
if session.config.option.collectonly:
yield
return
if self._log_cli_enabled() and self._config.getoption("verbose") < 1:
# setting verbose flag is needed to avoid messy test progress output
self._config.option.verbose = 1
with self.live_logs_context():
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # run all the tests
else:
yield # run all the tests
class _LiveLoggingStreamHandler(logging.StreamHandler):
"""
Custom StreamHandler used by the live logging feature: it will write a newline before the first log message
in each test.
During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured
and won't appear in the terminal.
"""
def __init__(self, terminal_reporter, capture_manager):
"""
:param _pytest.terminal.TerminalReporter terminal_reporter:
:param _pytest.capture.CaptureManager capture_manager:
"""
logging.StreamHandler.__init__(self, stream=terminal_reporter)
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self):
"""Reset the handler; should be called before the start of each test"""
self._first_record_emitted = False
def set_when(self, when):
"""Prepares for the given test phase (setup/call/teardown)"""
self._when = when
self._section_name_shown = False
if when == "start":
self._test_outcome_written = False
def emit(self, record):
ctx_manager = (
self.capture_manager.global_and_fixture_disabled()
if self.capture_manager
else nullcontext()
)
with ctx_manager:
if not self._first_record_emitted:
self.stream.write("\n")
self._first_record_emitted = True
elif self._when in ("teardown", "finish"):
if not self._test_outcome_written:
self._test_outcome_written = True
self.stream.write("\n")
if not self._section_name_shown and self._when:
self.stream.section("live log " + self._when, sep="-", bold=True)
self._section_name_shown = True
logging.StreamHandler.emit(self, record)
| {
"repo_name": "markshao/pytest",
"path": "src/_pytest/logging.py",
"copies": "2",
"size": "28189",
"license": "mit",
"hash": -8362641723993197000,
"line_mean": 34.8182973316,
"line_max": 118,
"alpha_frac": 0.6015467026,
"autogenerated": false,
"ratio": 4.1687370600414075,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005260567530394842,
"num_lines": 787
} |
"""Access and manage a Google Tag Manager account."""
import argparse
import sys
import httplib2
from apiclient.discovery import build
from oauth2client import client
from oauth2client import file
from oauth2client import tools
def printThis(statement):
print statement
def GetService(api_name, api_version, scope, client_secrets_path):
"""Get a service that communicates to a Google API.
Args:
api_name: string The name of the api to connect to.
api_version: string The api version to connect to.
scope: A list of strings representing the auth scopes to authorize for the
connection.
client_secrets_path: string A path to a valid client secrets file.
Returns:
A service that is connected to the specified API.
"""
# Parser command-line arguments.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args([])
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
client_secrets_path, scope=scope,
message=tools.message_if_missing(client_secrets_path))
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = file.Storage(api_name + '.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def FindGreetingsContainerId(service, account_id):
"""Find the greetings container ID.
Args:
service: the Tag Manager service object.
account_id: the ID of the Tag Manager account from which to retrieve the
Greetings container.
Returns:
The dictionary that represents the greetings container if it exists, or None
if it does not.
"""
# Query the Tag Manager API to list all containers for the given account.
container_wrapper = service.accounts().containers().list(
accountId=account_id).execute()
# Find and return the Greetings container if it exists.
for container in container_wrapper['containers']:
if container['name'] == 'CONTAINER NAME':
return container['containerId']
return None
def DefineFieldToSetWithUserID(tag, user_id_value):
user_id_object = {
'type':'list',
'key':'fieldsToSet',
'list':[
{
'type':'map',
'map':[
{
'type':'template',
'key':'fieldName',
'value':'&uid'
},
{
'type':'template',
'key':'value',
'value':user_id_value
}
]
}
]
}
tag['parameter'].append(user_id_object)
return tag
def AddFieldToSetWithUserID(tag, user_id_value):
user_id_object = {
'type':'map',
'map':[
{
'type':'template',
'key':'fieldName',
'value':'&uid'
},
{
'type':'template',
'key':'value',
'value':user_id_value
}
]
}
for parameter in tag['parameter']:
if parameter['key'] == 'fieldsToSet':
parameter['list'].append(user_id_object)
pass
pass
return tag
def UpdateTagWithUserID(service, account_id, container_id, tag):
# """Update a Tag with a Rule.
# Args:
# service: the Tag Manager service object.
# account_id: the ID of the account holding the container.
# container_id: the ID of the container to create the rule in.
# tag_id: the ID of the tag to associate with the rule.
# rule_id: the ID of the rule to associate with the tag.
# """
# # Get the tag to update.
# tag = service.accounts().containers().tags().get(
# accountId=account_id,
# containerId=container_id,
# tagId=tag_id).execute()
# Update the Firing Rule for the Tag.
# tag = DefineFieldToSetWithUserID(tag, 'user id')
user_id_present = False
field_to_set_present = False
for parameter in tag['parameter']:
if parameter['key'] == 'fieldsToSet':
field_to_set_present = True
for maps in parameter['list']:
for field in maps['map']:
if field['value'] == '&uid':
user_id_present = True
break
pass
pass
pass
pass
pass
if user_id_present == False and field_to_set_present == False:
print 'Will Add Field to Set AND User ID'
tag = DefineFieldToSetWithUserID(tag, '{{user id}}')
tag_id = tag['tagId']
# Update the Tag.
response = service.accounts().containers().tags().update(
accountId=account_id,
containerId=container_id,
tagId=tag_id,
body=tag).execute()
pass
if user_id_present == False and field_to_set_present == True:
print 'Will Add User ID to Field to Set'
tag = AddFieldToSetWithUserID(tag,'{{user id}}')
tag_id = tag['tagId']
# Update the Tag.
response = service.accounts().containers().tags().update(
accountId=account_id,
containerId=container_id,
tagId=tag_id,
body=tag).execute()
pass
if user_id_present == True:
print 'Tag already has User ID'
pass
def ReturnAllUniversalAnalyticsTags(service, account_id, container_id):
universal_tags = []
tags = service.accounts().containers().tags().list(
accountId=account_id,
containerId=container_id).execute()
for tag in tags['tags']:
if tag['type'] == 'ua':
universal_tags.append(tag)
pass
return universal_tags
def ReturnAllTags(service, account_id, container_id):
tags = service.accounts().containers().tags().list(
accountId=account_id,
containerId=container_id
).execute()
return tags
def ReturnAllTriggers(service, account_id, container_id):
triggers = service.accounts().containers().triggers().list(
accountId=account_id,
containerId=container_id).execute()
return triggers
def DeleteTagWithTagId(service, account_id, container_id, tag_id):
try:
service.accounts().containers().tags().delete(
accountId=account_id,
containerId=container_id,
tagId=tag_id).execute()
print 'Deleted tagId: {}'.format(tag_id)
except TypeError, error:
print 'There was an error in building the query: %s' %error
# except HttpError, error:
# print ('There was an API error: %s :%s' % (error.resp.status, error.resp.reason))
def DeleteTriggerWithTriggerId(service, account_id, container_id, trigger_id):
try:
service.accounts().containers().triggers().delete(
accountId=account_id,
containerId=container_id,
triggerId=trigger_id).execute()
print 'Deleted triggerId: {}'.format(trigger_id)
except TypeError, error:
print 'There was an error in building the query: %s' %error
# except HttpError, error:
# print ('There was an API error: %s :%s' % (error.resp.status, error.resp.reason))
def DeleteAllTriggersThatHaveNoTag(service, account_id,container_id):
triggers = ReturnAllTriggers(service, account_id, container_id)
triggers = triggers['triggers']
triggerObjects = []
blankTriggerIds = []
tags = ReturnAllTags(service, account_id, container_id)
tags = tags['tags']
tagFiringTriggersIds = []
# Gets all Trigger IDs in container
for trigger in triggers:
trigger_id = trigger['triggerId']
triggerObjects.append(trigger_id)
# Gets all Triggers IDs that fire a Tag
for tag in tags:
if 'firingTriggerId' in tag:
for triggerId in tag['firingTriggerId']:
tagFiringTriggersIds.append(triggerId)
if 'blockingTriggerId' in tag:
for triggerId in tag['blockingTriggerId']:
tagFiringTriggersIds.append(triggerId)
if 'firingRuleId' in tag:
for triggerId in tag['firingRuleId']:
tagFiringTriggersIds.append(triggerId)
if 'blockingRuleId' in tag:
for triggerId in tag['blockingRuleId']:
tagFiringTriggersIds.append(triggerId)
tagFiringTriggersIds = set(tagFiringTriggersIds)
# Test array
firingTriggerId = []
# Get all Trigger IDs that are not attached to a Tag
for triggerId in triggerObjects:
if triggerId in tagFiringTriggersIds:
firingTriggerId.append(triggerId)
else:
blankTriggerIds.append(triggerId)
for trigger_id in blankTriggerIds:
DeleteTriggerWithTriggerId(service, account_id, container_id, trigger_id)
def DeleteAllTagsThatHaveNoTriggers(service, account_id, container_id):
tags = ReturnAllTags(service, account_id, container_id)
tags = tags['tags']
for tag in tags:
if 'firingTriggerId' in tag:
pass
elif 'teardownTag' in tag:
pass
elif tag['tagId'] == '741':
pass
else:
print tag['tagId']
DeleteTagWithTagId(service,account_id,container_id,tag['tagId'])
pass
def DeleteVariableWithVariableID(service, account_id, container_id, variable_id):
try:
service.accounts().containers().variables().delete(
accountId=account_id,
containerId=container_id,
variableId=variable_id).execute()
print 'Deleted variableId: {}'.format(variable_id)
except TypeError, error:
print 'There was an error in building the query: %s' %error
except HttpError, error:
print ('There was an API error: %s :%s' % (error.resp.status, error.resp.reason))
def main(argv):
# Get tag manager account ID from command line.
assert len(argv) == 2 and 'usage: test-gtm.py <account_id>'
account_id = str(argv[1])
# Define the auth scopes to request.
scope = ['https://www.googleapis.com/auth/tagmanager.edit.containers']
# Authenticate and construct service.
service = GetService('tagmanager', 'v1', scope, 'client_secrets.json')
# Find the greetings container.
container_id = FindGreetingsContainerId(service, account_id)
triggers = ReturnAllTriggers(service, account_id, container_id)
triggers = triggers['triggers']
variables = service.accounts().containers().variables().list(
accountId=account_id,
containerId=container_id).execute()
# Total List of Variable IDs in the GTM Container
variableObject = []
variables = variables['variables']
for variable in variables:
variableId = variable['variableId']
variableName = variable['name']
variableObject.append(
{
'name': variableName,
'variableId': variableId
})
# All Custom JavaScript Variables
customJavaScriptVariables = []
for variable in variables:
if variable['type'] == 'jsm':
customJavaScriptVariables.append(variable)
# All LookUp Variables
lookupVariable = []
for variable in variables:
if variable['type'] == 'smm':
lookupVariable.append(variable)
# Total List of Tags in the GTM Container
tags = ReturnAllTags(service, account_id, container_id)
tags = tags['tags']
usedVariables = []
for variable in variableObject:
name = variable['name']
seq = ('{{', name, '}}')
gtmVariableName = ''.join(seq)
# Stringify Tags
for tag in tags:
tagString = str(tag)
isValuePresent = tagString.find(gtmVariableName)
if isValuePresent != -1:
usedVariables.append(variable['variableId'])
print 'Variable ID: {} is present'.format(variable['variableId'])
for jsmVariable in customJavaScriptVariables:
jsmVariableString = str(jsmVariable)
isValuePresent = jsmVariableString.find(gtmVariableName)
if isValuePresent != -1:
usedVariables.append(variable['variableId'])
print 'Variable ID: {} is present'.format(variable['variableId'])
for smmVariable in lookupVariable:
smmVariableString = str(smmVariable)
isValuePresent = smmVariableString.find(gtmVariableName)
if isValuePresent != -1:
usedVariables.append(variable['variableId'])
print 'Variable ID: {} is present'.format(variable['variableId'])
for trigger in triggers:
triggerString = str(trigger)
isValuePresent = triggerString.find(gtmVariableName)
if isValuePresent != -1:
usedVariables.append(variable['variableId'])
print 'Variable ID: {} is present'.format(variable['variableId'])
usedVariables = set(usedVariables)
unusedVariableIds = []
for variable in variableObject:
variableId = variable['variableId']
if variableId in usedVariables:
print 'VariableId: {} is Used'.format(variableId)
pass
else:
print 'VariableId: {} is Unused'.format(variableId)
unusedVariableIds.append(variableId)
unusedVariableIds.remove('817')
unusedVariableIds.remove('823')
unusedVariableIds.remove('824')
unusedVariableIds.remove('855')
unusedVariableIds.remove('857')
print 'Unused:'
print unusedVariableIds
print 'Used:'
print usedVariables
for variableId in unusedVariableIds:
print variableId
DeleteVariableWithVariableID(service, account_id, container_id, variableId)
if __name__ == "__main__":
main(sys.argv) | {
"repo_name": "vlee90/python-gtm-scripts",
"path": "triggers.py",
"copies": "1",
"size": "13135",
"license": "apache-2.0",
"hash": 3606298544060551000,
"line_mean": 29.691588785,
"line_max": 87,
"alpha_frac": 0.6738484964,
"autogenerated": false,
"ratio": 3.839520608009354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5013369104409353,
"avg_score": null,
"num_lines": null
} |
"""Access and/or modify INI files
* Compatiable with ConfigParser
* Preserves order of sections & options
* Preserves comments/blank lines/etc
* More conveninet access to data
Example:
>>> from StringIO import StringIO
>>> sio = StringIO('''# configure foo-application
... [foo]
... bar1 = qualia
... bar2 = 1977
... [foo-ext]
... special = 1''')
>>> cfg = INIConfig(sio)
>>> print cfg.foo.bar1
qualia
>>> print cfg['foo-ext'].special
1
>>> cfg.foo.newopt = 'hi!'
>>> print cfg
# configure foo-application
[foo]
bar1 = qualia
bar2 = 1977
newopt = hi!
[foo-ext]
special = 1
"""
# An ini parser that supports ordered sections/options
# Also supports updates, while preserving structure
# Backward-compatiable with ConfigParser
try:
set
except NameError:
from sets import Set as set
import re
from ConfigParser import DEFAULTSECT, ParsingError, MissingSectionHeaderError
import config
class LineType(object):
line = None
def __init__(self, line=None):
if line is not None:
self.line = line.strip('\n')
# Return the original line for unmodified objects
# Otherwise construct using the current attribute values
def __str__(self):
if self.line is not None:
return self.line
else:
return self.to_string()
# If an attribute is modified after initialization
# set line to None since it is no longer accurate.
def __setattr__(self, name, value):
if hasattr(self,name):
self.__dict__['line'] = None
self.__dict__[name] = value
def to_string(self):
raise Exception('This method must be overridden in derived classes')
class SectionLine(LineType):
regex = re.compile(r'^\['
r'(?P<name>[^]]+)'
r'\]\s*'
r'((?P<csep>;|#)(?P<comment>.*))?$')
def __init__(self, name, comment=None, comment_separator=None,
comment_offset=-1, line=None):
super(SectionLine, self).__init__(line)
self.name = name
self.comment = comment
self.comment_separator = comment_separator
self.comment_offset = comment_offset
def to_string(self):
out = '[' + self.name + ']'
if self.comment is not None:
# try to preserve indentation of comments
out = (out+' ').ljust(self.comment_offset)
out = out + self.comment_separator + self.comment
return out
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('name'), m.group('comment'),
m.group('csep'), m.start('csep'),
line)
parse = classmethod(parse)
class OptionLine(LineType):
def __init__(self, name, value, separator=' = ', comment=None,
comment_separator=None, comment_offset=-1, line=None):
super(OptionLine, self).__init__(line)
self.name = name
self.value = value
self.separator = separator
self.comment = comment
self.comment_separator = comment_separator
self.comment_offset = comment_offset
def to_string(self):
out = '%s%s%s' % (self.name, self.separator, self.value)
if self.comment is not None:
# try to preserve indentation of comments
out = (out+' ').ljust(self.comment_offset)
out = out + self.comment_separator + self.comment
return out
regex = re.compile(r'^(?P<name>[^:=\s[][^:=]*)'
r'(?P<sep>[:=]\s*)'
r'(?P<value>.*)$')
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
name = m.group('name').rstrip()
value = m.group('value')
sep = m.group('name')[len(name):] + m.group('sep')
# comments are not detected in the regex because
# ensuring total compatibility with ConfigParser
# requires that:
# option = value ;comment // value=='value'
# option = value;1 ;comment // value=='value;1 ;comment'
#
# Doing this in a regex would be complicated. I
# think this is a bug. The whole issue of how to
# include ';' in the value needs to be addressed.
# Also, '#' doesn't mark comments in options...
coff = value.find(';')
if coff != -1 and value[coff-1].isspace():
comment = value[coff+1:]
csep = value[coff]
value = value[:coff].rstrip()
coff = m.start('value') + coff
else:
comment = None
csep = None
coff = -1
return cls(name, value, sep, comment, csep, coff, line)
parse = classmethod(parse)
class CommentLine(LineType):
regex = re.compile(r'^(?P<csep>[;#]|[rR][eE][mM])'
r'(?P<comment>.*)$')
def __init__(self, comment='', separator='#', line=None):
super(CommentLine, self).__init__(line)
self.comment = comment
self.separator = separator
def to_string(self):
return self.separator + self.comment
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('comment'), m.group('csep'), line)
parse = classmethod(parse)
class EmptyLine(LineType):
# could make this a singleton
def to_string(self):
return ''
value = property(lambda _: '')
def parse(cls, line):
if line.strip(): return None
return cls(line)
parse = classmethod(parse)
class ContinuationLine(LineType):
regex = re.compile(r'^\s+(?P<value>.*)$')
def __init__(self, value, value_offset=None, line=None):
super(ContinuationLine, self).__init__(line)
self.value = value
if value_offset is None:
value_offset = 8
self.value_offset = value_offset
def to_string(self):
return ' '*self.value_offset + self.value
def parse(cls, line):
m = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('value'), m.start('value'), line)
parse = classmethod(parse)
class LineContainer(object):
def __init__(self, d=None):
self.contents = []
self.orgvalue = None
if d:
if isinstance(d, list): self.extend(d)
else: self.add(d)
def add(self, x):
self.contents.append(x)
def extend(self, x):
for i in x: self.add(i)
def get_name(self):
return self.contents[0].name
def set_name(self, data):
self.contents[0].name = data
def get_value(self):
if self.orgvalue is not None:
return self.orgvalue
elif len(self.contents) == 1:
return self.contents[0].value
else:
return '\n'.join([('%s' % x.value) for x in self.contents
if not isinstance(x, CommentLine)])
def set_value(self, data):
self.orgvalue = data
lines = ('%s' % data).split('\n')
# If there is an existing ContinuationLine, use its offset
value_offset = None
for v in self.contents:
if isinstance(v, ContinuationLine):
value_offset = v.value_offset
break
# Rebuild contents list, preserving initial OptionLine
self.contents = self.contents[0:1]
self.contents[0].value = lines[0]
del lines[0]
for line in lines:
if line.strip():
self.add(ContinuationLine(line, value_offset))
else:
self.add(EmptyLine())
name = property(get_name, set_name)
value = property(get_value, set_value)
def __str__(self):
s = [x.__str__() for x in self.contents]
return '\n'.join(s)
def finditer(self, key):
for x in self.contents[::-1]:
if hasattr(x, 'name') and x.name==key:
yield x
def find(self, key):
for x in self.finditer(key):
return x
raise KeyError(key)
def _make_xform_property(myattrname, srcattrname=None):
private_attrname = myattrname + 'value'
private_srcname = myattrname + 'source'
if srcattrname is None:
srcattrname = myattrname
def getfn(self):
srcobj = getattr(self, private_srcname)
if srcobj is not None:
return getattr(srcobj, srcattrname)
else:
return getattr(self, private_attrname)
def setfn(self, value):
srcobj = getattr(self, private_srcname)
if srcobj is not None:
setattr(srcobj, srcattrname, value)
else:
setattr(self, private_attrname, value)
return property(getfn, setfn)
class INISection(config.ConfigNamespace):
_lines = None
_options = None
_defaults = None
_optionxformvalue = None
_optionxformsource = None
_compat_skip_empty_lines = set()
def __init__(self, lineobj, defaults = None,
optionxformvalue=None, optionxformsource=None):
self._lines = [lineobj]
self._defaults = defaults
self._optionxformvalue = optionxformvalue
self._optionxformsource = optionxformsource
self._options = {}
_optionxform = _make_xform_property('_optionxform')
def _compat_get(self, key):
# identical to __getitem__ except that _compat_XXX
# is checked for backward-compatible handling
if key == '__name__':
return self._lines[-1].name
if self._optionxform: key = self._optionxform(key)
try:
value = self._options[key].value
del_empty = key in self._compat_skip_empty_lines
except KeyError:
if self._defaults and key in self._defaults._options:
value = self._defaults._options[key].value
del_empty = key in self._defaults._compat_skip_empty_lines
else:
raise
if del_empty:
value = re.sub('\n+', '\n', value)
return value
def __getitem__(self, key):
if key == '__name__':
return self._lines[-1].name
if self._optionxform: key = self._optionxform(key)
try:
return self._options[key].value
except KeyError:
if self._defaults and key in self._defaults._options:
return self._defaults._options[key].value
else:
raise
def __setitem__(self, key, value):
if self._optionxform: xkey = self._optionxform(key)
else: xkey = key
if xkey in self._compat_skip_empty_lines:
self._compat_skip_empty_lines.remove(xkey)
if xkey not in self._options:
# create a dummy object - value may have multiple lines
obj = LineContainer(OptionLine(key, ''))
self._lines[-1].add(obj)
self._options[xkey] = obj
# the set_value() function in LineContainer
# automatically handles multi-line values
self._options[xkey].value = value
def __delitem__(self, key):
if self._optionxform: key = self._optionxform(key)
if key in self._compat_skip_empty_lines:
self._compat_skip_empty_lines.remove(key)
for l in self._lines:
remaining = []
for o in l.contents:
if isinstance(o, LineContainer):
n = o.name
if self._optionxform: n = self._optionxform(n)
if key != n: remaining.append(o)
else:
remaining.append(o)
l.contents = remaining
del self._options[key]
def __iter__(self):
d = set()
for l in self._lines:
for x in l.contents:
if isinstance(x, LineContainer):
if self._optionxform:
ans = self._optionxform(x.name)
else:
ans = x.name
if ans not in d:
yield ans
d.add(ans)
if self._defaults:
for x in self._defaults:
if x not in d:
yield x
d.add(x)
def _new_namespace(self, name):
raise Exception('No sub-sections allowed', name)
def make_comment(line):
return CommentLine(line.rstrip('\n'))
def readline_iterator(f):
"""iterate over a file by only using the file object's readline method"""
have_newline = False
while True:
line = f.readline()
if not line:
if have_newline:
yield ""
return
if line.endswith('\n'):
have_newline = True
else:
have_newline = False
yield line
def lower(x):
return x.lower()
class INIConfig(config.ConfigNamespace):
_data = None
_sections = None
_defaults = None
_optionxformvalue = None
_optionxformsource = None
_sectionxformvalue = None
_sectionxformsource = None
_parse_exc = None
_bom = False
def __init__(self, fp=None, defaults=None, parse_exc=True,
optionxformvalue=lower, optionxformsource=None,
sectionxformvalue=None, sectionxformsource=None):
self._data = LineContainer()
self._parse_exc = parse_exc
self._optionxformvalue = optionxformvalue
self._optionxformsource = optionxformsource
self._sectionxformvalue = sectionxformvalue
self._sectionxformsource = sectionxformsource
self._sections = {}
if defaults is None: defaults = {}
self._defaults = INISection(LineContainer(), optionxformsource=self)
for name, value in defaults.iteritems():
self._defaults[name] = value
if fp is not None:
self._readfp(fp)
_optionxform = _make_xform_property('_optionxform', 'optionxform')
_sectionxform = _make_xform_property('_sectionxform', 'optionxform')
def __getitem__(self, key):
if key == DEFAULTSECT:
return self._defaults
if self._sectionxform: key = self._sectionxform(key)
return self._sections[key]
def __setitem__(self, key, value):
raise Exception('Values must be inside sections', key, value)
def __delitem__(self, key):
if self._sectionxform: key = self._sectionxform(key)
for line in self._sections[key]._lines:
self._data.contents.remove(line)
del self._sections[key]
def __iter__(self):
d = set()
d.add(DEFAULTSECT)
for x in self._data.contents:
if isinstance(x, LineContainer):
if x.name not in d:
yield x.name
d.add(x.name)
def _new_namespace(self, name):
if self._data.contents:
self._data.add(EmptyLine())
obj = LineContainer(SectionLine(name))
self._data.add(obj)
if self._sectionxform: name = self._sectionxform(name)
if name in self._sections:
ns = self._sections[name]
ns._lines.append(obj)
else:
ns = INISection(obj, defaults=self._defaults,
optionxformsource=self)
self._sections[name] = ns
return ns
def __str__(self):
if self._bom:
fmt = u'\ufeff%s'
else:
fmt = '%s'
return fmt % self._data.__str__()
__unicode__ = __str__
_line_types = [EmptyLine, CommentLine,
SectionLine, OptionLine,
ContinuationLine]
def _parse(self, line):
for linetype in self._line_types:
lineobj = linetype.parse(line)
if lineobj:
return lineobj
else:
# can't parse line
return None
def _readfp(self, fp):
cur_section = None
cur_option = None
cur_section_name = None
cur_option_name = None
pending_lines = []
pending_empty_lines = False
try:
fname = fp.name
except AttributeError:
fname = '<???>'
linecount = 0
exc = None
line = None
for line in readline_iterator(fp):
# Check for BOM on first line
if linecount == 0 and isinstance(line, unicode):
if line[0] == u'\ufeff':
line = line[1:]
self._bom = True
lineobj = self._parse(line)
linecount += 1
if not cur_section and not isinstance(lineobj,
(CommentLine, EmptyLine, SectionLine)):
if self._parse_exc:
raise MissingSectionHeaderError(fname, linecount, line)
else:
lineobj = make_comment(line)
if lineobj is None:
if self._parse_exc:
if exc is None: exc = ParsingError(fname)
exc.append(linecount, line)
lineobj = make_comment(line)
if isinstance(lineobj, ContinuationLine):
if cur_option:
if pending_lines:
cur_option.extend(pending_lines)
pending_lines = []
if pending_empty_lines:
optobj._compat_skip_empty_lines.add(cur_option_name)
pending_empty_lines = False
cur_option.add(lineobj)
else:
# illegal continuation line - convert to comment
if self._parse_exc:
if exc is None: exc = ParsingError(fname)
exc.append(linecount, line)
lineobj = make_comment(line)
if isinstance(lineobj, OptionLine):
if pending_lines:
cur_section.extend(pending_lines)
pending_lines = []
pending_empty_lines = False
cur_option = LineContainer(lineobj)
cur_section.add(cur_option)
if self._optionxform:
cur_option_name = self._optionxform(cur_option.name)
else:
cur_option_name = cur_option.name
if cur_section_name == DEFAULTSECT:
optobj = self._defaults
else:
optobj = self._sections[cur_section_name]
optobj._options[cur_option_name] = cur_option
if isinstance(lineobj, SectionLine):
self._data.extend(pending_lines)
pending_lines = []
pending_empty_lines = False
cur_section = LineContainer(lineobj)
self._data.add(cur_section)
cur_option = None
cur_option_name = None
if cur_section.name == DEFAULTSECT:
self._defaults._lines.append(cur_section)
cur_section_name = DEFAULTSECT
else:
if self._sectionxform:
cur_section_name = self._sectionxform(cur_section.name)
else:
cur_section_name = cur_section.name
if cur_section_name not in self._sections:
self._sections[cur_section_name] = \
INISection(cur_section, defaults=self._defaults,
optionxformsource=self)
else:
self._sections[cur_section_name]._lines.append(cur_section)
if isinstance(lineobj, (CommentLine, EmptyLine)):
pending_lines.append(lineobj)
if isinstance(lineobj, EmptyLine):
pending_empty_lines = True
self._data.extend(pending_lines)
if line and line[-1]=='\n':
self._data.add(EmptyLine())
if exc:
raise exc
| {
"repo_name": "m00dawg/holland",
"path": "plugins/holland.backup.mysqldump/holland/backup/mysqldump/util/ini.py",
"copies": "1",
"size": "20347",
"license": "bsd-3-clause",
"hash": 2260486351616587500,
"line_mean": 31.0930599369,
"line_max": 83,
"alpha_frac": 0.5350665946,
"autogenerated": false,
"ratio": 4.219618415595189,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5254685010195188,
"avg_score": null,
"num_lines": null
} |
# Access and save reviews from the Goodreads API
from goodreads import client
import pandas as pd
from dateutil.parser import parse
import time
import pickle
from my_settings import api_key, api_secret, username # loading private information
gc = client.GoodreadsClient(api_key, api_secret)
gc.authenticate()
user = gc.user(username=username)
data = list()
page = 1
# Loop over the pages grabbing all the reviews
try:
while True:
reviews = user.reviews(page)
for review in reviews:
# Eliminate non-reviews
if review.body is None:
continue
# Process how long it took me to read
# timespan = pd.to_datetime(review.read_at) - pd.to_datetime(review.started_at) # messes up timezone
try:
timespan = parse(review.read_at) - parse(review.started_at)
timespan = timespan.days
except:
timespan = None
# print(review.book['title'])
# print('{}/5'.format(review.rating))
# print(review.body)
# print('{} - {}'.format(review.started_at, review.read_at))
# print('{} days'.format(timespan.days))
data.append([review.book['title'],
review.book['authors']['author']['name'],
review.rating,
review.body,
timespan,
parse(review.read_at).year,
review.book['publication_year'],
review.book['publication_month'],
review.book['ratings_count'],
review.book['average_rating'],
review.book['num_pages']])
page += 1
time.sleep(1)
except KeyError: # done with reviews
print('Could not grab additional reviews')
pass
columns = ['title', 'author', 'rating', 'text', 'timespan', 'year_read', 'publication_year',
'publication_month', 'number_ratings', 'average_rating', 'number_pages']
df = pd.DataFrame(data, columns=columns)
# Save to pickle to avoid re-accessing API
with open('data/reviews.pkl', 'w') as f:
pickle.dump(df, f)
# Extract additional author information from Goodreads
author_data = list()
for name in set(df['author']):
author = gc.find_author(name)
print(name)
try:
print(author._author_dict)
works = author.works_count
fans = author.fans_count()['#text']
town = author.hometown
gender = author.gender
except:
works, fans, town, gender = '', '', '', ''
author_data.append([name, works, fans, town, gender])
time.sleep(1)
columns = ['author', 'works', 'fans', 'hometown', 'gender']
df_author = pd.DataFrame(author_data, columns=columns)
for col in ['works', 'fans']:
df_author[col] = pd.to_numeric(df_author[col], errors='ignore')
with open('data/author_info.pkl', 'w') as f:
pickle.dump(df_author, f)
| {
"repo_name": "dr-rodriguez/Exploring-Goodreads",
"path": "api_load.py",
"copies": "1",
"size": "3006",
"license": "mit",
"hash": 5200624348503774000,
"line_mean": 31.3225806452,
"line_max": 113,
"alpha_frac": 0.5755156354,
"autogenerated": false,
"ratio": 3.8737113402061856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9938283188179085,
"avg_score": 0.0021887574854202113,
"num_lines": 93
} |
""" Access backend for storing permissions in using SQLAlchemy """
from sqlalchemy import (engine_from_config, Column, String, Text, Boolean, Table,
ForeignKey)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, backref
from sqlalchemy import orm
# pylint: disable=F0401,E0611,W0403
from zope.sqlalchemy import ZopeTransactionExtension
# pylint: enable=F0401,E0611,W0403
from .base import IMutableAccessBackend
# pylint: disable=C0103,W0231
Base = declarative_base()
association_table = Table(
'pypicloud_user_groups', Base.metadata,
Column('username', String(length=255),
ForeignKey('pypicloud_users.username', ondelete='CASCADE'),
primary_key=True),
Column('group', String(length=255),
ForeignKey('pypicloud_groups.name', ondelete='CASCADE'),
primary_key=True)
)
# pylint: enable=C0103
class KeyVal(Base):
""" Simple model for storing key-value pairs """
__tablename__ = 'pypicloud_keyvals'
key = Column(String(length=255), primary_key=True)
value = Column(Text())
def __init__(self, key, value):
self.key = key
self.value = value
class User(Base):
""" User record """
__tablename__ = 'pypicloud_users'
username = Column(String(length=255), primary_key=True)
password = Column('password', Text(), nullable=False)
admin = Column(Boolean(), nullable=False)
pending = Column(Boolean(), nullable=False)
groups = orm.relationship('Group', secondary=association_table,
cascade='all', collection_class=set,
backref=backref('users', collection_class=set))
def __init__(self, username, password, pending=True):
self.username = username
self.password = password
self.groups = set()
self.permissions = []
self.admin = False
self.pending = pending
class Group(Base):
""" Group record """
__tablename__ = 'pypicloud_groups'
name = Column(String(length=255), primary_key=True)
def __init__(self, name):
self.name = name
self.users = set()
self.permissions = []
class Permission(Base):
""" Base class for user and group permissions """
__abstract__ = True
package = Column(String(length=255), primary_key=True)
read = Column(Boolean())
write = Column(Boolean())
def __init__(self, package, read, write):
self.package = package
self.read = read
self.write = write
@property
def permissions(self):
""" Construct permissions list """
perms = []
if self.read:
perms.append('read')
if self.write:
perms.append('write')
return perms
# pylint: disable=E1002
class UserPermission(Permission):
""" Permissions for a user on a package """
__tablename__ = 'pypicloud_user_permissions'
username = Column(String(length=255), ForeignKey(User.username), primary_key=True)
user = orm.relationship("User",
backref=backref('permissions',
cascade='all, delete-orphan'))
def __init__(self, package, username, read=False, write=False):
super(UserPermission, self).__init__(package, read, write)
self.username = username
class GroupPermission(Permission):
""" Permissions for a group on a package """
__tablename__ = 'pypicloud_group_permissions'
groupname = Column(String(length=255), ForeignKey(Group.name), primary_key=True)
group = orm.relationship("Group",
backref=backref('permissions',
cascade='all, delete-orphan'))
def __init__(self, package, groupname, read=False, write=False):
super(GroupPermission, self).__init__(package, read, write)
self.groupname = groupname
class SQLAccessBackend(IMutableAccessBackend):
"""
This backend allows you to store all user and package permissions in a SQL
database
"""
def __init__(self, request=None, dbmaker=None, **kwargs):
super(SQLAccessBackend, self).__init__(request, **kwargs)
self.db = dbmaker()
def cleanup(_):
""" Close the session after the request """
self.db.close()
request.add_finished_callback(cleanup)
@classmethod
def configure(cls, settings):
kwargs = super(SQLAccessBackend, cls).configure(settings)
engine = engine_from_config(settings, prefix='auth.db.')
kwargs['dbmaker'] = sessionmaker(
bind=engine, extension=ZopeTransactionExtension())
# Create SQL schema if not exists
Base.metadata.create_all(bind=engine)
return kwargs
def allow_register(self):
ret = self.db.query(KeyVal).filter_by(key='allow_register').first()
return ret is None
def set_allow_register(self, allow):
if allow:
self.db.query(KeyVal).filter_by(key='allow_register').delete()
else:
k = KeyVal('allow_register', 'false')
self.db.add(k)
def _get_password_hash(self, username):
user = self.db.query(User).filter_by(username=username).first()
if user:
return user.password
def groups(self, username=None):
if username is None:
query = self.db.query(Group)
return [g.name for g in query]
else:
user = self.db.query(User).filter_by(username=username).first()
if user is None:
return []
return [g.name for g in user.groups]
def group_members(self, group):
g = self.db.query(Group).filter_by(name=group).first()
if not g:
return []
return [u.username for u in g.users]
def is_admin(self, username):
user = self.db.query(User).filter_by(username=username).first()
return user and user.admin
def group_permissions(self, package, group=None):
if group is None:
query = self.db.query(GroupPermission).filter_by(package=package)
perms = {}
for perm in query:
perms[perm.groupname] = perm.permissions
return perms
else:
perm = self.db.query(GroupPermission)\
.filter_by(package=package, groupname=group).first()
if perm:
return perm.permissions
return []
def user_permissions(self, package, username=None):
if username is None:
query = self.db.query(UserPermission).filter_by(package=package)
perms = {}
for perm in query:
perms[perm.username] = perm.permissions
return perms
else:
perm = self.db.query(UserPermission)\
.filter_by(package=package, username=username).first()
if perm:
return perm.permissions
return []
def user_package_permissions(self, username):
query = self.db.query(UserPermission).filter_by(username=username)
packages = []
for perm in query:
packages.append({
'package': perm.package,
'permissions': perm.permissions,
})
return packages
def group_package_permissions(self, group):
query = self.db.query(GroupPermission).filter_by(groupname=group)
packages = []
for perm in query:
packages.append({
'package': perm.package,
'permissions': perm.permissions,
})
return packages
def user_data(self, username=None):
if username is None:
query = self.db.query(User).filter_by(pending=False)
users = []
for user in query:
users.append({
'username': user.username,
'admin': user.admin,
})
return users
else:
user = self.db.query(User).filter_by(username=username,
pending=False).first()
if user is not None:
return {
'username': user.username,
'admin': user.admin,
'groups': [g.name for g in user.groups],
}
def need_admin(self):
return self.db.query(User).filter_by(admin=True).first() is None
def _register(self, username, password):
user = User(username, password)
self.db.add(user)
def pending_users(self):
query = self.db.query(User).filter_by(pending=True)
return [u.username for u in query]
def approve_user(self, username):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.pending = False
def _set_password_hash(self, username, password_hash):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.password = password_hash
def delete_user(self, username):
self.db.query(User).filter_by(username=username).delete()
clause = association_table.c.username == username
self.db.execute(association_table.delete(clause))
def set_user_admin(self, username, admin):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.admin = admin
def edit_user_group(self, username, groupname, add):
user = self.db.query(User).filter_by(username=username).first()
group = self.db.query(Group).filter_by(name=groupname).first()
if user is not None and group is not None:
if add:
user.groups.add(group)
else:
user.groups.remove(group)
def create_group(self, group):
self.db.add(Group(group))
def delete_group(self, group):
self.db.query(Group).filter_by(name=group).delete()
clause = association_table.c.group == group
self.db.execute(association_table.delete(clause))
def edit_user_permission(self, package, username, perm, add):
record = self.db.query(UserPermission)\
.filter_by(package=package, username=username).first()
if record is None:
if not add:
return
record = UserPermission(package, username)
self.db.add(record)
if perm == 'read':
record.read = add
elif perm == 'write':
record.write = add
else:
raise ValueError("Unrecognized permission '%s'" % perm)
if not record.read and not record.write:
self.db.delete(record)
def edit_group_permission(self, package, group, perm, add):
record = self.db.query(GroupPermission)\
.filter_by(package=package, groupname=group).first()
if record is None:
if not add:
return
record = GroupPermission(package, group)
self.db.add(record)
if perm == 'read':
record.read = add
elif perm == 'write':
record.write = add
else:
raise ValueError("Unrecognized permission '%s'" % perm)
if not record.read and not record.write:
self.db.delete(record)
| {
"repo_name": "rubikloud/pypicloud",
"path": "pypicloud/access/sql.py",
"copies": "1",
"size": "11461",
"license": "mit",
"hash": 8204297972364384000,
"line_mean": 32.808259587,
"line_max": 86,
"alpha_frac": 0.5838059506,
"autogenerated": false,
"ratio": 4.226032448377581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5309838398977581,
"avg_score": null,
"num_lines": null
} |
""" Access backend for storing permissions in using SQLAlchemy """
from sqlalchemy import (engine_from_config, Column, Text, Boolean, Table,
ForeignKey)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, backref
from sqlalchemy import orm
# pylint: disable=F0401,E0611
from zope.sqlalchemy import ZopeTransactionExtension
# pylint: enable=F0401,E0611
from .base import IMutableAccessBackend
# pylint: disable=C0103,W0231
Base = declarative_base()
association_table = Table(
'pypicloud_user_groups', Base.metadata,
Column('username', Text(), ForeignKey('pypicloud_users.username'), primary_key=True),
Column('group', Text(), ForeignKey('pypicloud_groups.name'), primary_key=True)
)
# pylint: enable=C0103
class KeyVal(Base):
""" Simple model for storing key-value pairs """
__tablename__ = 'pypicloud_keyvals'
key = Column(Text(), primary_key=True)
value = Column(Text())
def __init__(self, key, value):
self.key = key
self.value = value
class User(Base):
""" User record """
__tablename__ = 'pypicloud_users'
username = Column(Text(), primary_key=True)
password = Column('password', Text(), nullable=False)
admin = Column(Boolean(), nullable=False)
pending = Column(Boolean(), nullable=False)
groups = orm.relationship('Group', secondary=association_table,
cascade='all', collection_class=set,
backref=backref('users', collection_class=set))
def __init__(self, username, password, pending=True):
self.username = username
self.password = password
self.groups = set()
self.permissions = []
self.admin = False
self.pending = pending
class Group(Base):
""" Group record """
__tablename__ = 'pypicloud_groups'
name = Column(Text(), primary_key=True)
def __init__(self, name):
self.name = name
self.users = set()
self.permissions = []
class Permission(Base):
""" Base class for user and group permissions """
__abstract__ = True
package = Column(Text(), primary_key=True)
read = Column(Boolean())
write = Column(Boolean())
def __init__(self, package, read, write):
self.package = package
self.read = read
self.write = write
@property
def permissions(self):
""" Construct permissions list """
perms = []
if self.read:
perms.append('read')
if self.write:
perms.append('write')
return perms
# pylint: disable=E1002
class UserPermission(Permission):
""" Permissions for a user on a package """
__tablename__ = 'pypicloud_user_permissions'
username = Column(Text(), ForeignKey(User.username), primary_key=True)
user = orm.relationship("User",
backref=backref('permissions',
cascade='all, delete-orphan'))
def __init__(self, package, username, read=False, write=False):
super(UserPermission, self).__init__(package, read, write)
self.username = username
class GroupPermission(Permission):
""" Permissions for a group on a package """
__tablename__ = 'pypicloud_group_permissions'
groupname = Column(Text(), ForeignKey(Group.name), primary_key=True)
group = orm.relationship("Group",
backref=backref('permissions',
cascade='all, delete-orphan'))
def __init__(self, package, groupname, read=False, write=False):
super(GroupPermission, self).__init__(package, read, write)
self.groupname = groupname
class SQLAccessBackend(IMutableAccessBackend):
"""
This backend allows you to store all user and package permissions in a SQL
database
"""
def __init__(self, request=None, dbmaker=None, **kwargs):
super(SQLAccessBackend, self).__init__(request, **kwargs)
self.db = dbmaker()
def cleanup(_):
""" Close the session after the request """
self.db.close()
request.add_finished_callback(cleanup)
@classmethod
def configure(cls, settings):
kwargs = super(SQLAccessBackend, cls).configure(settings)
engine = engine_from_config(settings, prefix='auth.db.')
kwargs['dbmaker'] = sessionmaker(
bind=engine, extension=ZopeTransactionExtension())
# Create SQL schema if not exists
Base.metadata.create_all(bind=engine)
return kwargs
def allow_register(self):
ret = self.db.query(KeyVal).filter_by(key='allow_register').first()
return ret is None
def set_allow_register(self, allow):
if allow:
self.db.query(KeyVal).filter_by(key='allow_register').delete()
else:
k = KeyVal('allow_register', 'false')
self.db.add(k)
def _get_password_hash(self, username):
user = self.db.query(User).filter_by(username=username).first()
if user:
return user.password
def groups(self, username=None):
if username is None:
query = self.db.query(Group)
return [g.name for g in query]
else:
user = self.db.query(User).filter_by(username=username).first()
if user is None:
return []
return [g.name for g in user.groups]
def group_members(self, group):
g = self.db.query(Group).filter_by(name=group).first()
if not g:
return []
return [u.username for u in g.users]
def is_admin(self, username):
user = self.db.query(User).filter_by(username=username).first()
return user and user.admin
def group_permissions(self, package, group=None):
if group is None:
query = self.db.query(GroupPermission).filter_by(package=package)
perms = {}
for perm in query:
perms[perm.groupname] = perm.permissions
return perms
else:
perm = self.db.query(GroupPermission)\
.filter_by(package=package, groupname=group).first()
if perm:
return perm.permissions
return []
def user_permissions(self, package, username=None):
if username is None:
query = self.db.query(UserPermission).filter_by(package=package)
perms = {}
for perm in query:
perms[perm.username] = perm.permissions
return perms
else:
perm = self.db.query(UserPermission)\
.filter_by(package=package, username=username).first()
if perm:
return perm.permissions
return []
def user_package_permissions(self, username):
query = self.db.query(UserPermission).filter_by(username=username)
packages = []
for perm in query:
packages.append({
'package': perm.package,
'permissions': perm.permissions,
})
return packages
def group_package_permissions(self, group):
query = self.db.query(GroupPermission).filter_by(groupname=group)
packages = []
for perm in query:
packages.append({
'package': perm.package,
'permissions': perm.permissions,
})
return packages
def user_data(self, username=None):
if username is None:
query = self.db.query(User).filter_by(pending=False)
users = []
for user in query:
users.append({
'username': user.username,
'admin': user.admin,
})
return users
else:
user = self.db.query(User).filter_by(username=username,
pending=False).first()
if user is not None:
return {
'username': user.username,
'admin': user.admin,
'groups': [g.name for g in user.groups],
}
def need_admin(self):
return self.db.query(User).filter_by(admin=True).first() is None
def _register(self, username, password):
user = User(username, password)
self.db.add(user)
def pending_users(self):
query = self.db.query(User).filter_by(pending=True)
return [u.username for u in query]
def approve_user(self, username):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.pending = False
def _set_password_hash(self, username, password_hash):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.password = password_hash
def delete_user(self, username):
self.db.query(User).filter_by(username=username).delete()
clause = association_table.c.username == username
self.db.execute(association_table.delete(clause))
def set_user_admin(self, username, admin):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.admin = admin
def edit_user_group(self, username, groupname, add):
user = self.db.query(User).filter_by(username=username).first()
group = self.db.query(Group).filter_by(name=groupname).first()
if user is not None and group is not None:
if add:
user.groups.add(group)
else:
user.groups.remove(group)
def create_group(self, group):
self.db.add(Group(group))
def delete_group(self, group):
self.db.query(Group).filter_by(name=group).delete()
clause = association_table.c.group == group
self.db.execute(association_table.delete(clause))
def edit_user_permission(self, package, username, perm, add):
record = self.db.query(UserPermission)\
.filter_by(package=package, username=username).first()
if record is None:
if not add:
return
record = UserPermission(package, username)
self.db.add(record)
if perm == 'read':
record.read = add
elif perm == 'write':
record.write = add
else:
raise ValueError("Unrecognized permission '%s'" % perm)
if not record.read and not record.write:
self.db.delete(record)
def edit_group_permission(self, package, group, perm, add):
record = self.db.query(GroupPermission)\
.filter_by(package=package, groupname=group).first()
if record is None:
if not add:
return
record = GroupPermission(package, group)
self.db.add(record)
if perm == 'read':
record.read = add
elif perm == 'write':
record.write = add
else:
raise ValueError("Unrecognized permission '%s'" % perm)
if not record.read and not record.write:
self.db.delete(record)
| {
"repo_name": "johnswanson/pypicloud",
"path": "pypicloud/access/sql.py",
"copies": "2",
"size": "11261",
"license": "mit",
"hash": 1601103574331413200,
"line_mean": 32.6149253731,
"line_max": 89,
"alpha_frac": 0.5822751088,
"autogenerated": false,
"ratio": 4.236644093303235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5818919202103235,
"avg_score": null,
"num_lines": null
} |
""" Access backend for storing permissions in using SQLAlchemy """
import zope.sqlalchemy
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
String,
Table,
Text,
engine_from_config,
orm,
)
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, sessionmaker
from .base import IMutableAccessBackend
# pylint: disable=C0103,W0231
Base = declarative_base()
association_table = Table(
"pypicloud_user_groups",
Base.metadata,
Column(
"username",
String(length=255),
ForeignKey("pypicloud_users.username", ondelete="CASCADE"),
primary_key=True,
),
Column(
"group",
String(length=255),
ForeignKey("pypicloud_groups.name", ondelete="CASCADE"),
primary_key=True,
),
)
# pylint: enable=C0103
class KeyVal(Base):
"""Simple model for storing key-value pairs"""
__tablename__ = "pypicloud_keyvals"
key = Column(String(length=255), primary_key=True)
value = Column(Text())
def __init__(self, key, value):
self.key = key
self.value = value
class User(Base):
"""User record"""
__tablename__ = "pypicloud_users"
username = Column(String(length=255), primary_key=True)
password = Column("password", Text(), nullable=False)
admin = Column(Boolean(), nullable=False)
pending = Column(Boolean(), nullable=False)
groups = orm.relationship(
"Group",
secondary=association_table,
cascade="all",
collection_class=set,
backref=backref("users", collection_class=set),
)
def __init__(self, username, password, pending=True):
self.username = username
self.password = password
self.groups = set()
self.permissions = []
self.admin = False
self.pending = pending
class Group(Base):
"""Group record"""
__tablename__ = "pypicloud_groups"
name = Column(String(length=255), primary_key=True)
def __init__(self, name):
self.name = name
self.users = set()
self.permissions = []
class Permission(Base):
"""Base class for user and group permissions"""
__abstract__ = True
package = Column(String(length=255), primary_key=True)
read = Column(Boolean())
write = Column(Boolean())
def __init__(self, package, read, write):
self.package = package
self.read = read
self.write = write
@property
def permissions(self):
"""Construct permissions list"""
perms = []
if self.read:
perms.append("read")
if self.write:
perms.append("write")
return perms
class UserPermission(Permission):
"""Permissions for a user on a package"""
__tablename__ = "pypicloud_user_permissions"
username = Column(
String(length=255),
ForeignKey(User.username, ondelete="CASCADE"),
primary_key=True,
)
user = orm.relationship(
"User", backref=backref("permissions", cascade="all, delete-orphan")
)
def __init__(self, package, username, read=False, write=False):
super(UserPermission, self).__init__(package, read, write)
self.username = username
class GroupPermission(Permission):
"""Permissions for a group on a package"""
__tablename__ = "pypicloud_group_permissions"
groupname = Column(
String(length=255), ForeignKey(Group.name, ondelete="CASCADE"), primary_key=True
)
group = orm.relationship(
"Group", backref=backref("permissions", cascade="all, delete-orphan")
)
def __init__(self, package, groupname, read=False, write=False):
super(GroupPermission, self).__init__(package, read, write)
self.groupname = groupname
class SQLAccessBackend(IMutableAccessBackend):
"""
This backend allows you to store all user and package permissions in a SQL
database
"""
def __init__(self, request=None, dbmaker=None, **kwargs):
super(SQLAccessBackend, self).__init__(request, **kwargs)
self._db = None
self._dbmaker = dbmaker
@property
def db(self):
"""Lazy-create the DB session"""
if self._db is None:
self._db = self._dbmaker()
if self.request is not None:
zope.sqlalchemy.register(self._db, transaction_manager=self.request.tm)
return self._db
@classmethod
def configure(cls, settings):
kwargs = super(SQLAccessBackend, cls).configure(settings)
engine = engine_from_config(settings, prefix="auth.db.")
kwargs["dbmaker"] = sessionmaker(bind=engine)
# Create SQL schema if not exists
Base.metadata.create_all(bind=engine)
return kwargs
@classmethod
def postfork(cls, **kwargs):
# Have to dispose of connections after uWSGI forks,
# otherwise they'll get corrupted.
kwargs["dbmaker"].kw["bind"].dispose()
def allow_register(self):
ret = self.db.query(KeyVal).filter_by(key="allow_register").first()
return ret is not None and ret.value == "true"
def set_allow_register(self, allow):
if allow:
k = KeyVal("allow_register", "true")
self.db.merge(k)
else:
self.db.query(KeyVal).filter_by(key="allow_register").delete()
def _get_password_hash(self, username):
user = self.db.query(User).filter_by(username=username).first()
if user:
return user.password
def groups(self, username=None):
if username is None:
query = self.db.query(Group)
return [g.name for g in query]
else:
user = self.db.query(User).filter_by(username=username).first()
if user is None:
return []
return [g.name for g in user.groups]
def group_members(self, group):
g = self.db.query(Group).filter_by(name=group).first()
if not g:
return []
return [u.username for u in g.users]
def is_admin(self, username):
user = self.db.query(User).filter_by(username=username).first()
return user and user.admin
def group_permissions(self, package):
query = self.db.query(GroupPermission).filter_by(package=package)
perms = {}
for perm in query:
perms[perm.groupname] = perm.permissions
return perms
def user_permissions(self, package):
query = self.db.query(UserPermission).filter_by(package=package)
perms = {}
for perm in query:
perms[perm.username] = perm.permissions
return perms
def user_package_permissions(self, username):
query = self.db.query(UserPermission).filter_by(username=username)
packages = []
for perm in query:
packages.append({"package": perm.package, "permissions": perm.permissions})
return packages
def group_package_permissions(self, group):
query = self.db.query(GroupPermission).filter_by(groupname=group)
packages = []
for perm in query:
packages.append({"package": perm.package, "permissions": perm.permissions})
return packages
def user_data(self, username=None):
if username is None:
query = self.db.query(User).filter_by(pending=False)
users = []
for user in query:
users.append({"username": user.username, "admin": user.admin})
return users
else:
user = (
self.db.query(User).filter_by(username=username, pending=False).first()
)
if user is not None:
return {
"username": user.username,
"admin": user.admin,
"groups": [g.name for g in user.groups],
}
def need_admin(self):
return self.db.query(User).filter_by(admin=True).first() is None
def _register(self, username, password):
user = User(username, password)
self.db.add(user)
def pending_users(self):
query = self.db.query(User).filter_by(pending=True)
return [u.username for u in query]
def approve_user(self, username):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.pending = False
def _set_password_hash(self, username, password_hash):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.password = password_hash
def delete_user(self, username):
self.db.query(User).filter_by(username=username).delete()
clause = association_table.c.username == username
self.db.execute(association_table.delete(clause))
def set_user_admin(self, username, admin):
user = self.db.query(User).filter_by(username=username).first()
if user is not None:
user.admin = admin
def edit_user_group(self, username, groupname, add):
user = self.db.query(User).filter_by(username=username).first()
group = self.db.query(Group).filter_by(name=groupname).first()
if user is not None and group is not None:
if add:
user.groups.add(group)
else:
user.groups.remove(group)
def create_group(self, group):
self.db.add(Group(group))
def delete_group(self, group):
self.db.query(Group).filter_by(name=group).delete()
clause = association_table.c.group == group
self.db.execute(association_table.delete(clause))
def edit_user_permission(self, package, username, perm, add):
record = (
self.db.query(UserPermission)
.filter_by(package=package, username=username)
.first()
)
if record is None:
if not add:
return
record = UserPermission(package, username)
self.db.add(record)
if perm == "read":
record.read = add
elif perm == "write":
record.write = add
else:
raise ValueError("Unrecognized permission '%s'" % perm)
if not record.read and not record.write:
self.db.delete(record)
def edit_group_permission(self, package, group, perm, add):
record = (
self.db.query(GroupPermission)
.filter_by(package=package, groupname=group)
.first()
)
if record is None:
if not add:
return
record = GroupPermission(package, group)
self.db.add(record)
if perm == "read":
record.read = add
elif perm == "write":
record.write = add
else:
raise ValueError("Unrecognized permission '%s'" % perm)
if not record.read and not record.write:
self.db.delete(record)
def check_health(self):
try:
self.db.query(KeyVal).first()
except SQLAlchemyError as e:
return (False, str(e))
else:
return (True, "")
| {
"repo_name": "stevearc/pypicloud",
"path": "pypicloud/access/sql.py",
"copies": "1",
"size": "11243",
"license": "mit",
"hash": 3451530162464525300,
"line_mean": 29.5516304348,
"line_max": 88,
"alpha_frac": 0.5934359157,
"autogenerated": false,
"ratio": 4.08687749909124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.518031341479124,
"avg_score": null,
"num_lines": null
} |
"""
Support for the Microsoft Access database.
.. note::
The Access dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
"""
from sqlalchemy import sql, schema, types, exc, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import processors
class AcNumeric(types.Numeric):
def get_col_spec(self):
return "NUMERIC"
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
return None
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
return processors.to_str
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.SmallInteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def get_col_spec(self):
return "DATETIME"
class AcText(types.Text):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.LargeBinary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
class AccessExecutionContext(default.DefaultExecutionContext):
def _has_implicit_sequence(self, column):
if column.primary_key and column.autoincrement:
if isinstance(column.type, types.Integer) and \
not column.foreign_keys:
if column.default is None or \
(isinstance(column.default, schema.Sequence) and \
column.default.optional):
return True
return False
def post_exec(self):
"""If we inserted into a row with a COUNTER column, fetch the ID"""
if self.compiled.isinsert:
tbl = self.compiled.statement.table
if not hasattr(tbl, 'has_sequence'):
tbl.has_sequence = None
for column in tbl.c:
if getattr(column, 'sequence', False) or \
self._has_implicit_sequence(column):
tbl.has_sequence = column
break
if bool(tbl.has_sequence):
# TBD: for some reason _last_inserted_ids doesn't exist here
# (but it does at corresponding point in mssql???)
#if not len(self._last_inserted_ids) or
# self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])]
#+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
super(AccessExecutionContext, self).post_exec()
const, daoEngine = None, None
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode : AcUnicode,
types.Integer : AcInteger,
types.SmallInteger: AcSmallInteger,
types.Numeric : AcNumeric,
types.Float : AcFloat,
types.DateTime : AcDateTime,
types.Date : AcDate,
types.String : AcString,
types.LargeBinary : AcBinary,
types.Boolean : AcBoolean,
types.Text : AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
name = 'access'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
ported_sqla_06 = False
def type_descriptor(self, typeobj):
newobj = types.adapt_type(typeobj, self.colspecs)
return newobj
def __init__(self, **params):
super(AccessDialect, self).__init__(**params)
self.text_as_varchar = False
self._dtbs = None
@classmethod
def dbapi(cls):
import win32com.client, pythoncom
global const, daoEngine
if const is None:
const = win32com.client.constants
for suffix in (".36", ".35", ".30"):
try:
daoEngine = win32com.client.\
gencache.\
EnsureDispatch("DAO.DBEngine" + suffix)
break
except pythoncom.com_error:
pass
else:
raise exc.InvalidRequestError(
"Can't find a DB engine. Check "
"http://support.microsoft.com/kb/239114 for details.")
import pyodbc as module
return module
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def last_inserted_ids(self):
return self.context.last_inserted_ids
def do_execute(self, cursor, statement, params, context=None):
if params == {}:
params = ()
super(AccessDialect, self).\
do_execute(cursor, statement, params, **kwargs)
def _execute(self, c, statement, parameters):
try:
if parameters == {}:
parameters = ()
c.execute(statement, parameters)
self.context.rowcount = c.rowcount
except Exception, e:
raise exc.DBAPIError.instance(statement, parameters, e)
def has_table(self, connection, tablename, schema=None):
# This approach seems to be more reliable that using DAO
try:
connection.execute('select top 1 * from [%s]' % tablename)
return True
except Exception, e:
return False
def reflecttable(self, connection, table, include_columns):
# This is defined in the function, as it relies on win32com constants,
# that aren't imported until dbapi method is called
if not hasattr(self, 'ischema_names'):
self.ischema_names = {
const.dbByte: AcBinary,
const.dbInteger: AcInteger,
const.dbLong: AcInteger,
const.dbSingle: AcFloat,
const.dbDouble: AcFloat,
const.dbDate: AcDateTime,
const.dbLongBinary: AcBinary,
const.dbMemo: AcText,
const.dbBoolean: AcBoolean,
const.dbText: AcUnicode, # All Access strings are
# unicode
const.dbCurrency: AcNumeric,
}
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
try:
for tbl in dtbs.TableDefs:
if tbl.Name.lower() == table.name.lower():
break
else:
raise exc.NoSuchTableError(table.name)
for col in tbl.Fields:
coltype = self.ischema_names[col.Type]
if col.Type == const.dbText:
coltype = coltype(col.Size)
colargs = \
{
'nullable': not(col.Required or
col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
if col.Attributes & const.dbAutoIncrField:
colargs['default'] = schema.Sequence(col.Name + '_seq')
elif default:
if col.Type == const.dbBoolean:
default = default == 'Yes' and '1' or '0'
colargs['server_default'] = \
schema.DefaultClause(sql.text(default))
table.append_column(
schema.Column(col.Name, coltype, **colargs))
# TBD: check constraints
# Find primary key columns first
for idx in tbl.Indexes:
if idx.Primary:
for col in idx.Fields:
thecol = table.c[col.Name]
table.primary_key.add(thecol)
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and
isinstance(
thecol.default.arg,
schema.Sequence
)):
thecol.autoincrement = False
# Then add other indexes
for idx in tbl.Indexes:
if not idx.Primary:
if len(idx.Fields) == 1:
col = table.c[idx.Fields[0].Name]
if not col.primary_key:
col.index = True
col.unique = idx.Unique
else:
pass # TBD: multi-column indexes
for fk in dtbs.Relations:
if fk.ForeignTable != table.name:
continue
scols = [c.ForeignName for c in fk.Fields]
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
table.append_constraint(
schema.ForeignKeyConstraint(scols, rcols,\
link_to_name=True))
finally:
dtbs.Close()
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs
if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
dtbs.Close()
return names
class AccessCompiler(compiler.SQLCompiler):
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'month': 'm',
'day': 'd',
'year': 'yyyy',
'second': 's',
'hour': 'h',
'doy': 'y',
'minute': 'n',
'quarter': 'q',
'dow': 'w',
'week': 'ww'
})
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exc.InvalidRequestError(
'Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label()
else:
return super(AccessCompiler, self).\
label_select_column(select, column, asfrom)
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names;
rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
# Strip schema
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
return self.preparer.quote(table.name, table.quote)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (self.process(join.left, asfrom=True) + \
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + \
self.process(join.onclause))
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
class AccessDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
column.type.dialect_impl(self.dialect).get_col_spec()
# install a sequence if we have an implicit IDENTITY column
if (not getattr(column.table, 'has_sequence', False)) and \
column.primary_key and \
column.autoincrement and \
isinstance(column.type, types.Integer) and \
not column.foreign_keys:
if column.default is None or \
(isinstance(column.default, schema.Sequence) and
column.default.optional):
column.sequence = schema.Sequence(column.name + '_seq')
if not column.nullable:
colspec += " NOT NULL"
if hasattr(column, 'sequence'):
column.table.has_sequence = column
colspec = self.preparer.format_column(column) + " counter"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
index = drop.element
self.append("\nDROP INDEX [%s].[%s]" % \
(index.table.name,
self._index_identifier(index.name)))
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).\
__init__(dialect, initial_quote='[', final_quote=']')
dialect = AccessDialect
dialect.poolclass = pool.SingletonThreadPool
dialect.statement_compiler = AccessCompiler
dialect.ddlcompiler = AccessDDLCompiler
dialect.preparer = AccessIdentifierPreparer
dialect.execution_ctx_cls = AccessExecutionContext
| {
"repo_name": "aurofable/medhack-server",
"path": "venv/lib/python2.7/site-packages/sqlalchemy/dialects/access/base.py",
"copies": "2",
"size": "16060",
"license": "mit",
"hash": 6579272445243638000,
"line_mean": 34.6097560976,
"line_max": 84,
"alpha_frac": 0.5509339975,
"autogenerated": false,
"ratio": 4.319526627218935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5870460624718936,
"avg_score": null,
"num_lines": null
} |
"""
Support for the Microsoft Access database.
This dialect is *not* ported to SQLAlchemy 0.6 or 0.7.
This dialect is *not* tested on SQLAlchemy 0.6 or 0.7.
"""
from sqlalchemy import sql, schema, types, exc, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import processors
class AcNumeric(types.Numeric):
def get_col_spec(self):
return "NUMERIC"
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
return None
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
return processors.to_str
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.SmallInteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def get_col_spec(self):
return "DATETIME"
class AcText(types.Text):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.LargeBinary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
class AccessExecutionContext(default.DefaultExecutionContext):
def _has_implicit_sequence(self, column):
if column.primary_key and column.autoincrement:
if isinstance(column.type, types.Integer) and \
not column.foreign_keys:
if column.default is None or \
(isinstance(column.default, schema.Sequence) and \
column.default.optional):
return True
return False
def post_exec(self):
"""If we inserted into a row with a COUNTER column, fetch the ID"""
if self.compiled.isinsert:
tbl = self.compiled.statement.table
if not hasattr(tbl, 'has_sequence'):
tbl.has_sequence = None
for column in tbl.c:
if getattr(column, 'sequence', False) or \
self._has_implicit_sequence(column):
tbl.has_sequence = column
break
if bool(tbl.has_sequence):
# TBD: for some reason _last_inserted_ids doesn't exist here
# (but it does at corresponding point in mssql???)
#if not len(self._last_inserted_ids) or
# self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])]
#+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
super(AccessExecutionContext, self).post_exec()
const, daoEngine = None, None
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode : AcUnicode,
types.Integer : AcInteger,
types.SmallInteger: AcSmallInteger,
types.Numeric : AcNumeric,
types.Float : AcFloat,
types.DateTime : AcDateTime,
types.Date : AcDate,
types.String : AcString,
types.LargeBinary : AcBinary,
types.Boolean : AcBoolean,
types.Text : AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
name = 'access'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
ported_sqla_06 = False
def type_descriptor(self, typeobj):
newobj = types.adapt_type(typeobj, self.colspecs)
return newobj
def __init__(self, **params):
super(AccessDialect, self).__init__(**params)
self.text_as_varchar = False
self._dtbs = None
@classmethod
def dbapi(cls):
import win32com.client, pythoncom
global const, daoEngine
if const is None:
const = win32com.client.constants
for suffix in (".36", ".35", ".30"):
try:
daoEngine = win32com.client.\
gencache.\
EnsureDispatch("DAO.DBEngine" + suffix)
break
except pythoncom.com_error:
pass
else:
raise exc.InvalidRequestError(
"Can't find a DB engine. Check "
"http://support.microsoft.com/kb/239114 for details.")
import pyodbc as module
return module
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def last_inserted_ids(self):
return self.context.last_inserted_ids
def do_execute(self, cursor, statement, params, context=None):
if params == {}:
params = ()
super(AccessDialect, self).\
do_execute(cursor, statement, params, **kwargs)
def _execute(self, c, statement, parameters):
try:
if parameters == {}:
parameters = ()
c.execute(statement, parameters)
self.context.rowcount = c.rowcount
except Exception, e:
raise exc.DBAPIError.instance(statement, parameters, e)
def has_table(self, connection, tablename, schema=None):
# This approach seems to be more reliable that using DAO
try:
connection.execute('select top 1 * from [%s]' % tablename)
return True
except Exception, e:
return False
def reflecttable(self, connection, table, include_columns):
# This is defined in the function, as it relies on win32com constants,
# that aren't imported until dbapi method is called
if not hasattr(self, 'ischema_names'):
self.ischema_names = {
const.dbByte: AcBinary,
const.dbInteger: AcInteger,
const.dbLong: AcInteger,
const.dbSingle: AcFloat,
const.dbDouble: AcFloat,
const.dbDate: AcDateTime,
const.dbLongBinary: AcBinary,
const.dbMemo: AcText,
const.dbBoolean: AcBoolean,
const.dbText: AcUnicode, # All Access strings are
# unicode
const.dbCurrency: AcNumeric,
}
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
try:
for tbl in dtbs.TableDefs:
if tbl.Name.lower() == table.name.lower():
break
else:
raise exc.NoSuchTableError(table.name)
for col in tbl.Fields:
coltype = self.ischema_names[col.Type]
if col.Type == const.dbText:
coltype = coltype(col.Size)
colargs = \
{
'nullable': not(col.Required or
col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
if col.Attributes & const.dbAutoIncrField:
colargs['default'] = schema.Sequence(col.Name + '_seq')
elif default:
if col.Type == const.dbBoolean:
default = default == 'Yes' and '1' or '0'
colargs['server_default'] = \
schema.DefaultClause(sql.text(default))
table.append_column(
schema.Column(col.Name, coltype, **colargs))
# TBD: check constraints
# Find primary key columns first
for idx in tbl.Indexes:
if idx.Primary:
for col in idx.Fields:
thecol = table.c[col.Name]
table.primary_key.add(thecol)
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and
isinstance(
thecol.default.arg,
schema.Sequence
)):
thecol.autoincrement = False
# Then add other indexes
for idx in tbl.Indexes:
if not idx.Primary:
if len(idx.Fields) == 1:
col = table.c[idx.Fields[0].Name]
if not col.primary_key:
col.index = True
col.unique = idx.Unique
else:
pass # TBD: multi-column indexes
for fk in dtbs.Relations:
if fk.ForeignTable != table.name:
continue
scols = [c.ForeignName for c in fk.Fields]
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
table.append_constraint(
schema.ForeignKeyConstraint(scols, rcols,\
link_to_name=True))
finally:
dtbs.Close()
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs
if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
dtbs.Close()
return names
class AccessCompiler(compiler.SQLCompiler):
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'month': 'm',
'day': 'd',
'year': 'yyyy',
'second': 's',
'hour': 'h',
'doy': 'y',
'minute': 'n',
'quarter': 'q',
'dow': 'w',
'week': 'ww'
})
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exc.InvalidRequestError(
'Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label()
else:
return super(AccessCompiler, self).\
label_select_column(select, column, asfrom)
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names;
rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
# Strip schema
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
return self.preparer.quote(table.name, table.quote)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (self.process(join.left, asfrom=True) + \
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + \
self.process(join.onclause))
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
class AccessDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
column.type.dialect_impl(self.dialect).get_col_spec()
# install a sequence if we have an implicit IDENTITY column
if (not getattr(column.table, 'has_sequence', False)) and \
column.primary_key and \
column.autoincrement and \
isinstance(column.type, types.Integer) and \
not column.foreign_keys:
if column.default is None or \
(isinstance(column.default, schema.Sequence) and
column.default.optional):
column.sequence = schema.Sequence(column.name + '_seq')
if not column.nullable:
colspec += " NOT NULL"
if hasattr(column, 'sequence'):
column.table.has_sequence = column
colspec = self.preparer.format_column(column) + " counter"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
index = drop.element
self.append("\nDROP INDEX [%s].[%s]" % \
(index.table.name,
self._index_identifier(index.name)))
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).\
__init__(dialect, initial_quote='[', final_quote=']')
dialect = AccessDialect
dialect.poolclass = pool.SingletonThreadPool
dialect.statement_compiler = AccessCompiler
dialect.ddlcompiler = AccessDDLCompiler
dialect.preparer = AccessIdentifierPreparer
dialect.execution_ctx_cls = AccessExecutionContext
| {
"repo_name": "awagnon/maraschino",
"path": "lib/sqlalchemy/dialects/access/base.py",
"copies": "37",
"size": "16036",
"license": "mit",
"hash": 4990536608219460000,
"line_mean": 34.6355555556,
"line_max": 84,
"alpha_frac": 0.5509478673,
"autogenerated": false,
"ratio": 4.310752688172043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Support for the Microsoft Access database.
"""
from sqlalchemy import sql, schema, types, exc, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import processors
class AcNumeric(types.Numeric):
def get_col_spec(self):
return "NUMERIC"
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
return None
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
return processors.to_str
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.SmallInteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def get_col_spec(self):
return "DATETIME"
class AcText(types.Text):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.LargeBinary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
class AccessExecutionContext(default.DefaultExecutionContext):
def get_lastrowid(self):
self.cursor.execute("SELECT @@identity AS lastrowid")
return self.cursor.fetchone()[0]
class AccessCompiler(compiler.SQLCompiler):
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({
'month': 'm',
'day': 'd',
'year': 'yyyy',
'second': 's',
'hour': 'h',
'doy': 'y',
'minute': 'n',
'quarter': 'q',
'dow': 'w',
'week': 'ww'
})
def visit_cast(self, cast, **kwargs):
return cast.clause._compiler_dispatch(self, **kwargs)
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exc.InvalidRequestError(
'Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func, **kwargs):
"""Access function names differ from the ANSI SQL names;
rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
# Strip schema
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
return self.preparer.quote(table.name, table.quote)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return ('(' + self.process(join.left, asfrom=True) + \
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + \
self.process(join.onclause) + ')')
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
class AccessDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
if column.table is None:
raise exc.CompileError(
"access requires Table-bound columns "
"in order to generate DDL")
colspec = self.preparer.format_column(column)
seq_col = column.table._autoincrement_column
if seq_col is column:
colspec += " AUTOINCREMENT"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
if column.nullable is not None and not column.primary_key:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
index = drop.element
self.append("\nDROP INDEX [%s].[%s]" % \
(index.table.name,
self._index_identifier(index.name)))
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).\
__init__(dialect, initial_quote='[', final_quote=']')
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode: AcUnicode,
types.Integer: AcInteger,
types.SmallInteger: AcSmallInteger,
types.Numeric: AcNumeric,
types.Float: AcFloat,
types.DateTime: AcDateTime,
types.Date: AcDate,
types.String: AcString,
types.LargeBinary: AcBinary,
types.Boolean: AcBoolean,
types.Text: AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
name = 'access'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
poolclass = pool.SingletonThreadPool
statement_compiler = AccessCompiler
ddl_compiler = AccessDDLCompiler
preparer = AccessIdentifierPreparer
execution_ctx_cls = AccessExecutionContext
@classmethod
def dbapi(cls):
import pyodbc as module
return module
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def last_inserted_ids(self):
return self.context.last_inserted_ids
def has_table(self, connection, tablename, schema=None):
result = connection.scalar(
sql.text(
"select count(*) from msysobjects where "
"type=1 and name=:name"), name=tablename
)
return bool(result)
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
result = connection.execute("select name from msysobjects where "
"type=1 and name not like 'MSys%'")
table_names = [r[0] for r in result]
return table_names
| {
"repo_name": "luzfcb/sqlalchemy-access",
"path": "sqlalchemy_access/base.py",
"copies": "1",
"size": "8541",
"license": "mit",
"hash": 9214732004954154000,
"line_mean": 30.8694029851,
"line_max": 84,
"alpha_frac": 0.5967685283,
"autogenerated": false,
"ratio": 3.9985955056179776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095364033917977,
"avg_score": null,
"num_lines": null
} |
"""Access BlueZ Media Player functionality"""
import dbus
# python-bluezero imports
from bluezero import constants
from bluezero import dbus_tools
from bluezero import tools
logger = tools.create_module_logger(__name__)
class MediaPlayerError(Exception):
"""Custom exception"""
pass
def _find_player_path(device_address):
""" finds the player_path corresponding to the device addr"""
player_path_list = []
mngd_objs = dbus_tools.get_managed_objects()
for path in dbus_tools.get_managed_objects():
if mngd_objs[path].get(constants.MEDIA_PLAYER_IFACE):
player_path_list.append(path)
for path in player_path_list:
found_address = dbus_tools.get_device_address_from_dbus_path(path)
if device_address == found_address:
return path
raise MediaPlayerError("No player found for the device")
class MediaPlayer:
"""Bluetooth MediaPlayer Class.
This class instantiates an object that is able to interact with
the player of a Bluetooth device and get audio from its source.
"""
def __init__(self, device_addr):
"""Default initialiser.
Creates the interface to the remote Bluetooth device.
:param device_addr: Address of Bluetooth device player to use.
"""
self.player_path = _find_player_path(device_addr)
self.player_object = dbus_tools.get_dbus_obj(self.player_path)
self.player_methods = dbus_tools.get_dbus_iface(
constants.MEDIA_PLAYER_IFACE, self.player_object)
self.player_props = dbus_tools.get_dbus_iface(
dbus.PROPERTIES_IFACE, self.player_object)
@property
def browsable(self):
"""If present indicates the player can be browsed using MediaFolder
interface."""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Browsable')
@property
def searchable(self):
"""If present indicates the player can be searched using
MediaFolder interface."""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Searchable')
@property
def track(self):
"""Return a dict of the track metadata."""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Track')
@property
def device(self):
"""Return Device object path"""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Device')
@property
def playlist(self):
"""Return the Playlist object path."""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Playlist')
@property
def equalizer(self):
"""Return the equalizer value."""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Equalizer')
@equalizer.setter
def equalizer(self, value):
"""Possible values: "off" or "on"."""
self.player_props.Set(
constants.MEDIA_PLAYER_IFACE, 'Equalizer', value)
@property
def name(self):
"""Return the player name"""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Name')
@property
def repeat(self):
"""Return the repeat value"""
return self.player_props.Get(
constants.MEDIA_PLAYER_IFACE, 'Repeat')
@repeat.setter
def repeat(self, value):
"""Possible values: "off", "singletrack", "alltracks" or "group"""
self.player_props.Set(
constants.MEDIA_PLAYER_IFACE, 'Repeat', value)
@property
def shuffle(self):
"""Return the shuffle value"""
return self.player_props.Get(constants.MEDIA_PLAYER_IFACE, 'Shuffle')
@shuffle.setter
def shuffle(self, value):
""""Possible values: "off", "alltracks" or "group" """
self.player_props.Set(constants.MEDIA_PLAYER_IFACE, 'Shuffle', value)
@property
def status(self):
"""Return the status of the player
Possible status: "playing", "stopped", "paused",
"forward-seek", "reverse-seek" or "error" """
return self.player_props.Get(constants.MEDIA_PLAYER_IFACE, 'Status')
@property
def subtype(self):
"""Return the player subtype"""
return self.player_props.Get(constants.MEDIA_PLAYER_IFACE, 'Subtype')
def type(self, player_type):
"""Player type. Possible values are:
* "Audio"
* "Video"
* "Audio Broadcasting"
* "Video Broadcasting"
"""
self.player_props.Set(
constants.MEDIA_PLAYER_IFACE, 'Type', player_type)
@property
def position(self):
"""Return the playback position in milliseconds."""
return self.player_props.Get(constants.MEDIA_PLAYER_IFACE, 'Position')
def next(self):
"""Goes the next track and play it."""
self.player_methods.Next()
def play(self):
"""Resume the playback."""
self.player_methods.Play()
def pause(self):
"""Pause the track."""
self.player_methods.Pause()
def stop(self):
"""Stops the playback."""
self.player_methods.Stop()
def previous(self):
"""Goes the previous track and play it"""
self.player_methods.Previous()
def fast_forward(self):
"""Fast forward playback, this action is only stopped
when another method in this interface is called.
"""
self.player_methods.FastForward()
def rewind(self):
"""Rewind playback, this action is only stopped
when another method in this interface is called.
"""
self.player_methods.Rewind()
| {
"repo_name": "ukBaz/python-bluezero",
"path": "bluezero/media_player.py",
"copies": "1",
"size": "5679",
"license": "mit",
"hash": 9015436617573692000,
"line_mean": 30.2032967033,
"line_max": 78,
"alpha_frac": 0.6207078711,
"autogenerated": false,
"ratio": 4.024805102763997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145512973863997,
"avg_score": null,
"num_lines": null
} |
"""Access classess and identity for partitions.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
# import os
from sqlalchemy.orm import lazyload, noload
from sqlalchemy.orm.exc import NoResultFound
from six import iteritems
from ..identity import PartitionIdentity, PartitionNameQuery, NameQuery # , PartitionName
# from util.typecheck import accepts, returns
from ..util import Constant
class Partitions(object):
"""Container and manager for the set of partitions.
This object is always accessed from Bundle.partitions""
"""
STATE = Constant()
STATE.NEW = 'new'
STATE.BUILDING = 'building'
STATE.BUILT = 'built'
STATE.ERROR = 'error'
STATE.FINALIZED = 'finalized'
bundle = None
_partitions = None
def __init__(self, bundle):
self.bundle = bundle
self._partitions = {}
def partition(self, id_):
"""Get a partition by the id number.
Arguments:
id_ -- a partition id value
Returns:
A partitions.Partition object
Throws:
a Sqlalchemy exception if the partition either does not exist or
is not unique
Because this method works on the bundle, the id_ ( without version information )
is equivalent to the vid ( with version information )
"""
from ..orm import Partition as OrmPartition
from sqlalchemy import or_
from ..identity import PartialPartitionName
if isinstance(id_, PartitionIdentity):
id_ = id_.id_
elif isinstance(id_, PartialPartitionName):
id_ = id_.promote(self.bundle.identity.name)
session = self.bundle.dataset._database.session
q = session\
.query(OrmPartition)\
.filter(OrmPartition.d_vid == self.bundle.dataset.vid)\
.filter(or_(OrmPartition.id == str(id_).encode('ascii'),
OrmPartition.vid == str(id_).encode('ascii')))
try:
orm_partition = q.one()
return self.bundle.wrap_partition(orm_partition)
except NoResultFound:
orm_partition = None
if not orm_partition:
q = session\
.query(OrmPartition)\
.filter(OrmPartition.d_vid == self.bundle.dataset.vid)\
.filter(OrmPartition.name == str(id_).encode('ascii'))
try:
orm_partition = q.one()
return self.bundle.wrap_partition(orm_partition)
except NoResultFound:
orm_partition = None
return orm_partition # Always None
def _find_orm(self, pnq):
"""Return a Partition object from the database based on a PartitionId.
An ORM object is returned, so changes can be persisted.
"""
# import sqlalchemy.orm.exc
from ambry.orm import Partition as OrmPartition # , Table
from sqlalchemy.orm import joinedload # , joinedload_all
assert isinstance(pnq, PartitionNameQuery), "Expected PartitionNameQuery, got {}".format(type(pnq))
pnq = pnq.with_none()
q = self.bundle.dataset._database.session.query(OrmPartition)
if pnq.fqname is not NameQuery.ANY:
q = q.filter(OrmPartition.fqname == pnq.fqname)
elif pnq.vname is not NameQuery.ANY:
q = q.filter(OrmPartition.vname == pnq.vname)
elif pnq.name is not NameQuery.ANY:
q = q.filter(OrmPartition.name == str(pnq.name))
else:
if pnq.time is not NameQuery.ANY:
q = q.filter(OrmPartition.time == pnq.time)
if pnq.space is not NameQuery.ANY:
q = q.filter(OrmPartition.space == pnq.space)
if pnq.grain is not NameQuery.ANY:
q = q.filter(OrmPartition.grain == pnq.grain)
if pnq.format is not NameQuery.ANY:
q = q.filter(OrmPartition.format == pnq.format)
if pnq.segment is not NameQuery.ANY:
q = q.filter(OrmPartition.segment == pnq.segment)
if pnq.table is not NameQuery.ANY:
if pnq.table is None:
q = q.filter(OrmPartition.t_id is None)
else:
tr = self.bundle.table(pnq.table)
if not tr:
raise ValueError("Didn't find table named {} in {} bundle path = {}".format(
pnq.table, pnq.vname, self.bundle.database.path))
q = q.filter(OrmPartition.t_vid == tr.vid)
ds = self.bundle.dataset
q = q.filter(OrmPartition.d_vid == ds.vid)
q = q.order_by(
OrmPartition.vid.asc()).order_by(
OrmPartition.segment.asc())
q = q.options(joinedload(OrmPartition.table))
return q
def clean(self, session):
from ambry.orm import Partition as OrmPartition
session.query(OrmPartition).filter(OrmPartition.d_vid == self.bundle.dataset.vid).delete()
return self
def new_partition(self, name=None, data=None, **kwargs):
from ambry.identity import PartialPartitionName
import os
if name:
name_parts = [e[0] for e in PartialPartitionName._name_parts]
kwargs.update((k, str(v)) for k, v in iteritems(name.dict)
if k in name_parts)
assert 'table' in kwargs
p = self.bundle.dataset.new_partition(data=data, **kwargs)
# These are called from before_insert and before_update,
# but calling them here can avoid some requirements for early commits()
p._update_names()
return self.bundle.wrap_partition(p)
def get_or_new_partition(self, pname, data=None, **kwargs):
p = self.bundle.partitions.partition(pname)
if not p:
p = self.bundle.partitions.new_partition(pname, data=data, **kwargs)
self.bundle.commit()
assert p.d_vid == self.bundle.dataset.vid
return p
def __iter__(self):
"""Iterate over the type 'p' partitions, ignoring the 's' type. """
from ambry.orm.partition import Partition
for p in (self.bundle.dataset.session.query(Partition)
.filter(Partition.type==Partition.TYPE.UNION)
.filter(Partition.d_vid == self.bundle.identity.vid)
.all()):
yield self.bundle.wrap_partition(p)
def new_db_from_pandas(self, frame, table=None, data=None, load=True, **kwargs):
"""Create a new db partition from a pandas data frame.
If the table does not exist, it will be created
"""
from ..orm import Column
# from dbexceptions import ConfigurationError
# Create the table from the information in the data frame.
with self.bundle.session:
sch = self.bundle.schema
t = sch.new_table(table)
if frame.index.name:
id_name = frame.index.name
else:
id_name = 'id'
sch.add_column(t, id_name,
datatype=Column.convert_numpy_type(frame.index.dtype),
is_primary_key=True)
for name, type_ in zip([row for row in frame.columns],
[row for row in frame.convert_objects(convert_numeric=True,
convert_dates=True).dtypes]):
sch.add_column(t, name, datatype=Column.convert_numpy_type(type_))
sch.write_schema()
p = self.new_partition(table=table, data=data, **kwargs)
if load:
pk_name = frame.index.name
with p.inserter(table) as ins:
for i, row in frame.iterrows():
d = dict(row)
d[pk_name] = i
ins.insert(d)
return p
def _repr_html_(self):
from tabulate import tabulate
from ambry.util import drop_empty
def record_gen():
for i, p in enumerate(self):
if i == 0:
yield ['vid','vname','table','time','space','grain', 'description','sub-desc']
yield [
p.vid, p.vname, p.table.name, p.time, p.space, p.grain, p.description,p.display.sub_description
]
records = list(record_gen())
records = drop_empty(records)
return "<h2>Partitions in {} </h2>".format(self.bundle.identity.name) \
+ tabulate(records[1:], headers=records[0], tablefmt="html")
| {
"repo_name": "CivicKnowledge/ambry",
"path": "ambry/bundle/partitions.py",
"copies": "1",
"size": "8776",
"license": "bsd-2-clause",
"hash": 7292074365437424000,
"line_mean": 31.2647058824,
"line_max": 115,
"alpha_frac": 0.5704193254,
"autogenerated": false,
"ratio": 4.106691623771643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5177110949171643,
"avg_score": null,
"num_lines": null
} |
"""Access classess and identity for partitions.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
import os
from identity import PartitionIdentity, PartitionNameQuery, PartitionName, PartialPartitionName, NameQuery
from sqlalchemy.orm.exc import NoResultFound
from util.typecheck import accepts, returns
from dbexceptions import ConflictError
from util import Constant
class Partitions(object):
'''Continer and manager for the set of partitions.
This object is always accessed from Bundle.partitions""
'''
STATE=Constant()
STATE.NEW = 'new'
STATE.BUILDING = 'building'
STATE.BUILT = 'built'
STATE.ERROR = 'error'
bundle = None
_partitions = None
def __init__(self, bundle):
self.bundle = bundle
self._partitions = {}
def partition(self, arg, **kwargs):
'''Get a local partition object from either a Partion ORM object, or
a partition name
:param arg:
:param kwargs:
Arguments:
arg -- a orm.Partition or Partition object.
'''
from ambry.orm import Partition as OrmPartition
from ambry.identity import PartitionNumber
from identity import PartitionIdentity
from sqlalchemy import or_
from partition import new_partition
session = self.bundle.database.session
if isinstance(arg,OrmPartition):
orm_partition = arg
elif isinstance(arg, basestring):
orm_partition = session.query(OrmPartition).filter(or_(OrmPartition.id_==arg,OrmPartition.vid==arg)).one()
elif isinstance(arg, PartitionNumber):
orm_partition = session.query(OrmPartition).filter(OrmPartition.id_==str(arg) ).one()
elif isinstance(arg, PartitionIdentity):
orm_partition = session.query(OrmPartition).filter(OrmPartition.id_==str(arg.id_) ).one()
else:
raise ValueError("Arg must be a Partition or PartitionNumber. Got {}".format(type(arg)))
vid = orm_partition.vid
if vid in self._partitions:
return self._partitions[vid]
else:
p = new_partition(self.bundle, orm_partition, **kwargs)
self._partitions[vid] = p
return p
@property
def count(self):
from ambry.orm import Partition as OrmPartition
return (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.d_vid == self.bundle.dataset.vid)).count()
@property
def all(self): #@ReservedAssignment
'''Return an iterator of all partitions
:type self: object
'''
from ambry.orm import Partition as OrmPartition
import sqlalchemy.exc
try:
ds = self.bundle.dataset
q = (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.d_vid == ds.vid)
.order_by(OrmPartition.vid.asc())
.order_by(OrmPartition.segment.asc()))
return [self.partition(op) for op in q.all()]
except sqlalchemy.exc.OperationalError:
raise
return []
@property
def all_nocsv(self): #@ReservedAssignment
'''Return an iterator of all partitions, excluding CSV format partitions'''
from ambry.orm import Partition as OrmPartition
import sqlalchemy.exc
try:
ds = self.bundle.dataset
q = (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.d_vid == ds.vid)
.filter(OrmPartition.format != 'csv')
.order_by(OrmPartition.vid.asc())
.order_by(OrmPartition.segment.asc()))
return [self.partition(op) for op in q.all()]
except sqlalchemy.exc.OperationalError:
raise
return []
def __iter__(self):
return iter(self.all)
def close(self):
for vid, p in self._partitions.items():
p.close()
self._partitions = {}
def get(self, id_):
'''Get a partition by the id number
Arguments:
id_ -- a partition id value
Returns:
A partitions.Partition object
Throws:
a Sqlalchemy exception if the partition either does not exist or
is not unique
Because this method works on the bundle, it the id_ ( without version information )
is equivalent to the vid ( with version information )
'''
from ambry.orm import Partition as OrmPartition
from sqlalchemy import or_
if isinstance(id_, PartitionIdentity):
id_ = id_.identity.id_
s = self.bundle.database.session
q = (s
.query(OrmPartition)
.filter(or_(
OrmPartition.id_==str(id_).encode('ascii'),
OrmPartition.vid==str(id_).encode('ascii')
)))
try:
orm_partition = q.one()
return self.partition(orm_partition)
except NoResultFound:
orm_partition = None
if not orm_partition:
q = (s.query(OrmPartition)
.filter(OrmPartition.name==id_.encode('ascii')))
try:
orm_partition = q.one()
return self.partition(orm_partition)
except NoResultFound:
orm_partition = None
return orm_partition
def find_table(self, table_name):
'''Return the first partition that has the given table name'''
for partition in self.all:
if partition.table and partition.table.name == table_name:
return partition
return None
def find_id(self, id_):
'''Find a partition from an id or vid
:param id_:
'''
from ambry.orm import Partition as OrmPartition
from sqlalchemy import or_
q = (self.bundle.database.session.query(OrmPartition)
.filter(or_(
OrmPartition.id_==str(id_).encode('ascii'),
OrmPartition.vid==str(id_).encode('ascii')
)))
return q.first()
def find(self, pnq=None, use_library=False, **kwargs):
'''Return a Partition object from the database based on a PartitionId.
The object returned is immutable; changes are not persisted'''
import sqlalchemy.orm.exc
if pnq is None:
pnq = PartitionNameQuery(**kwargs)
assert isinstance(pnq,PartitionNameQuery), "Expected NameQuery, got {}".format(type(pnq))
try:
partitions = [ self.partition(op, memory=kwargs.get('memory',False))
for op in self._find_orm(pnq).all()]
if len(partitions) == 1:
p = partitions.pop()
if use_library and not p.database.exists:
# Try to get it from the library, if it exists.
b = self.bundle.library.get(p.identity.vname)
if not b or not b.partition:
return p
else:
return b.partition
else:
return p
elif len(partitions) > 1 :
from ambry.dbexceptions import ResultCountError
rl = ";\n".join([p.identity.vname for p in partitions])
raise ResultCountError("Got too many results: for {}\n{}".format(vars(pnq), rl))
else:
return None
except sqlalchemy.orm.exc.NoResultFound:
return None
def find_all(self, pnq=None, **kwargs):
'''Return a Partition object from the database based on a PartitionId.
The object returned is immutable; changes are not persisted'''
from identity import Identity
if pnq is None:
pnq = PartitionNameQuery(**kwargs)
ops = self._find_orm(pnq).all()
return [ self.partition(op) for op in ops]
def _find_orm(self, pnq):
'''Return a Partition object from the database based on a PartitionId.
An ORM object is returned, so changes can be persisted. '''
import sqlalchemy.orm.exc
from ambry.orm import Partition as OrmPartition
assert isinstance(pnq,PartitionNameQuery), "Expected PartitionNameQuery, got {}".format(type(pnq))
pnq = pnq.with_none()
q = self.bundle.database.session.query(OrmPartition)
if pnq.fqname is not NameQuery.ANY:
q = q.filter(OrmPartition.fqname==pnq.fqname)
elif pnq.vname is not NameQuery.ANY:
q = q.filter(OrmPartition.vname==pnq.vname)
elif pnq.name is not NameQuery.ANY:
q = q.filter(OrmPartition.name==pnq.name)
else:
if pnq.time is not NameQuery.ANY:
q = q.filter(OrmPartition.time==pnq.time)
if pnq.space is not NameQuery.ANY:
q = q.filter(OrmPartition.space==pnq.space)
if pnq.grain is not NameQuery.ANY:
q = q.filter(OrmPartition.grain==pnq.grain)
if pnq.format is not NameQuery.ANY:
q = q.filter(OrmPartition.format==pnq.format)
if pnq.segment is not NameQuery.ANY:
q = q.filter(OrmPartition.segment==pnq.segment)
if pnq.table is not NameQuery.ANY:
if pnq.table is None:
q = q.filter(OrmPartition.t_id==None)
else:
tr = self.bundle.schema.table(pnq.table)
if not tr:
raise ValueError("Didn't find table named {} in {} bundle path = {}".format(pnq.table, pnq.vname, self.bundle.database.path))
q = q.filter(OrmPartition.t_id==tr.id_)
ds = self.bundle.dataset
q = q.filter(OrmPartition.d_vid == ds.vid)
q = q.order_by(OrmPartition.vid.asc()).order_by(OrmPartition.segment.asc())
return q
def _new_orm_partition(self, pname, tables=None, data=None, memory = False):
'''Create a new ORM Partrition object, or return one if
it already exists '''
from ambry.orm import Partition as OrmPartition, Table
from sqlalchemy.exc import IntegrityError
assert type(pname) == PartialPartitionName, "Expected PartialPartitionName, got {}".format(type(pname))
if tables and not isinstance(tables, (list,tuple, set)):
raise ValueError("If specified, 'tables' must be a list, set or tuple")
if not data:
data = {}
pname = pname.promote(self.bundle.identity)
pname.is_valid()
session = self.bundle.database.session
if pname.table:
q =session.query(Table).filter( (Table.name==pname.table) | (Table.id_==pname.table) )
try:
table = q.one()
except:
from dbexceptions import NotFoundError
raise NotFoundError('Failed to find table for name or id: {}'.format(pname.table))
else:
table = None
if tables:
tables = list(tables)
if tables and pname and pname.table and pname.table not in tables:
tables = list(tables)
tables.append(pname.table)
if tables:
data['tables'] = tables
d = pname.dict
# Promoting to a PartitionName create the partitionName subcless from
# the format, which is required to get the correct cache_key
d['cache_key'] = pname.promote(self.bundle.identity.name).cache_key
if not 'format' in d:
d['format'] = 'db'
try: del d['table'] # OrmPartition requires t_id instead
except: pass
if 'dataset' in d:
del d['dataset']
# This code must have the session established in the context be active.
op = OrmPartition(
self.bundle.get_dataset(),
t_id = table.id_ if table else None,
data=data,
state = Partitions.STATE.NEW,
**d
)
if memory:
from random import randint
from identity import ObjectNumber
op.dataset = self.bundle.get_dataset()
op.table = table
op.set_ids(randint(100000,ObjectNumber.PARTMAXVAL))
return op
session.add(op)
# We need to do this here to ensure that the before_commit()
# routine is run, which sets the fqname and vid, which are needed later
try:
session.commit()
except IntegrityError as e:
from dbexceptions import ConflictError
raise ConflictError('Integrity error in database {}, while creating partition {}\n{}\n{}'
.format(self.bundle.database.dsn, str(pname), pname.cache_key, e.message))
if not op.format:
raise Exception("Must have a format!")
return op
def clean(self, session):
from ambry.orm import Partition as OrmPartition
session.query(OrmPartition).delete()
def _new_partition(self, ppn, tables=None, data=None, clean = False, create=True):
'''Creates a new OrmPartition record'''
assert type(ppn) == PartialPartitionName, "Expected PartialPartitionName, got {}".format(type(ppn))
with self.bundle.session as s:
op = self._new_orm_partition(ppn, tables=tables, data=data)
# Return the partition from the managed session, which prevents the
# partition from being tied to a session that is closed.
fqname = op.fqname
partition = self.find(PartitionNameQuery(fqname=fqname))
try:
assert bool(partition), '''Failed to find partition that was just created'''
except AssertionError:
self.bundle.error("Failed to get partition for: created={}, fqname={}, database={} "
.format(ppn, fqname, self.bundle.database.dsn))
raise
if create:
if tables and hasattr(partition, 'create_with_tables'):
partition.create_with_tables(tables, clean)
else:
partition.create()
partition.close()
return partition
def _find_or_new(self, kwargs, clean=False, format=None, tables=None, data=None, create=True):
'''
Returns True if the partition was found, not created, False if it
was created
'''
pnq = PartitionNameQuery(**kwargs)
ppn = PartialPartitionName(**kwargs)
if tables:
tables = set(tables)
if ppn.table:
if not tables:
tables = set()
tables.add(ppn.table)
if format:
ppn.format = format
pnq.format = format
partition = self.find(pnq)
if partition:
return partition, True
partition = self._new_partition(ppn, tables=tables, data=data, create=create)
return partition, False
def new_partition(self, clean=False, tables=None, data=None, **kwargs):
return self.new_db_partition(clean=clean, tables=tables, data=data, **kwargs)
def find_or_new(self, clean = False, tables=None, data=None, **kwargs):
return self.find_or_new_db(tables=tables, clean = clean, data=data, **kwargs)
def new_db_partition(self, clean=False, tables=None, data=None, create=True, **kwargs):
p, found = self._find_or_new(kwargs, clean = False, tables=tables, data=data, create=create, format='db')
if found:
raise ConflictError("Partition {} already exists".format(p.name))
return p
def new_db_from_pandas(self, frame, table=None, data=None, load=True, **kwargs):
'''Create a new db partition from a pandas data frame.
If the table does not exist, it will be created
'''
import pandas as pd
import numpy as np
from orm import Column
# Create the table from the information in the data frame.
with self.bundle.session:
sch = self.bundle.schema
t = sch.add_table(table)
sch.add_column(t,frame.index.name,
datatype = Column.convert_numpy_type(frame.index.dtype),
is_primary_key = True)
for name, type_ in zip([row for row in frame.columns],
[row for row in frame.convert_objects(convert_numeric=True,
convert_dates=True).dtypes]):
sch.add_column(t,name, datatype=Column.convert_numpy_type(type_))
sch.write_schema()
p = self.new_partition(table=table, data=data, **kwargs)
if load:
pk_name = frame.index.name
with p.inserter(table) as ins:
for i, row in frame.iterrows():
d = dict(row)
d[pk_name] = i
ins.insert(d)
return p
def find_or_new_db(self, clean = False, tables=None, data=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables. String or array of tables to copy form the main partition
data. add a data field to the partition in the database
clean. Clean the database when it is created
kwargs. time,space,gran, etc; parameters to name the partition
'''
p, _ = self._find_or_new(kwargs, clean = False, tables=tables, data=data, format='db')
return p
def new_hdf_partition(self, clean=False, tables=None, data=None, **kwargs):
p, found = self._find_or_new(kwargs,format='hdf', data=data)
if found:
raise ConflictError("Partition {} alread exists".format(p.name))
return p
def find_or_new_hdf(self, clean = False, tables=None, data=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
p, _ = self._find_or_new(kwargs, clean = False, tables=tables, data=data, create=False, format='hdf')
return p
def new_csv_partition(self, pid=None, data=None, **kwargs):
p, found = self._find_or_new(kwargs,format='csv', data=data)
if found:
raise ConflictError("Partition {} alread exists".format(p.name))
return p
def find_or_new_csv(self, clean = False, tables=None, data=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
p, _ = self._find_or_new(kwargs, clean = False, tables=tables, data=data, format='csv')
return p
def new_geo_partition(self, clean=False, tables=None, data=None, shape_file=None, **kwargs):
from sqlalchemy.orm.exc import NoResultFound
p, found = self._find_or_new(kwargs,format='geo')
if found:
raise ConflictError("Partition {} alread exists".format(p.name))
if shape_file:
p.load_shapefile(shape_file)
return p
def find_or_new_geo(self, clean=False, tables=None, data=None,
create = False, shape_file=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
p, _ = self._find_or_new(kwargs, clean = False, tables=None,
data=None, create=False, format='geo')
if shape_file:
p.load_shapefile(shape_file)
return p
def new_memory_partition(self, tables=None, data=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
from partition.sqlite import SqlitePartition
from partition import partition_classes
ppn = PartialPartitionName(**kwargs)
if tables:
tables = set(tables)
if ppn.table:
if not tables:
tables = set()
tables.add(ppn.table)
op = self._new_orm_partition(ppn, tables=tables, data=data, memory = True)
cls = partition_classes().partition_by_format[kwargs.get('format','db')]
p = cls(self.bundle, op, memory=True, **kwargs)
if tables:
p.create_with_tables(tables)
else:
p.create()
return p
def delete(self, partition):
from ambry.orm import Partition as OrmPartition
q = (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.id_==partition.identity.id_))
q.delete()
@property
def info(self):
out = 'Partitions: '+str(self.count)+"\n"
for p in self.all:
out += str(p.identity.sname)+"\n"
return out
def _repr_html_(self):
from identity import PartitionName
active_parts = set()
for p in self.all:
active_parts |= set(p.name.partital_dict.keys())
cols = ['Id','Name']
for np, _, _ in PartitionName._name_parts:
if np in active_parts:
cols.append(np)
rows = ["<tr>"+''.join([ '<th>{}</th>'.format(c) for c in cols])+"</tr>" ]
for p in self.all:
cols = []
d = p.name.partital_dict
cols.append(p.identity.id_)
cols.append(p.identity.sname)
for np, _, _ in PartitionName._name_parts:
if np not in active_parts:
continue
cols.append(d[np] if np in d else '')
rows.append("<tr>"+''.join([ '<td>{}</td>'.format(c) for c in cols])+"</tr>")
return "<table>\n" + "\n".join(rows) + "\n</table>"
| {
"repo_name": "kball/ambry",
"path": "ambry/partitions.py",
"copies": "1",
"size": "23693",
"license": "bsd-2-clause",
"hash": 8105364617519946000,
"line_mean": 31.5453296703,
"line_max": 149,
"alpha_frac": 0.5462372853,
"autogenerated": false,
"ratio": 4.366568374493181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5412805659793182,
"avg_score": null,
"num_lines": null
} |
# Access control
import xpath
import base64
import random
import time
import urllib.parse
import jwt
import sys
import urllib.request, urllib.parse, urllib.error
from .vars import *
from .capability import *
from .checker import *
from .consistency import *
from .issuer import *
_igorSelfToken = IgorAccessToken()
_accessSelfToken = _igorSelfToken
# xxxjack temporary
from . import capability
capability._accessSelfToken = _accessSelfToken
from . import issuer
issuer._accessSelfToken = _accessSelfToken
def _combineTokens(token1, token2):
"""Return union of two tokens (which may be AccessToken, MultiAccessToken or None)"""
if token1 is None:
return token2
if token2 is None:
return token1
if token1 == token2:
return token1
if hasattr(token1, '_appendToken'):
token1._appendToken(token2)
return token1
return MultiAccessToken(tokenList=[token1, token2])
class OTPHandler:
"""Handle implementation of one-time-passwords (for passing tokens to plugins and scripts)"""
def __init__(self):
self._otp2token = {}
def produceOTPForToken(self, token):
"""Produce a one-time-password form of this token, for use internally or for passing to a plugin script (to be used once)"""
# The key format is carefully selected so it can be used as user:pass combination
k = '-otp-%d:%d' % (random.getrandbits(64), random.getrandbits(64))
self._otp2token[k] = token
return k
def _consumeOTPForToken(self, otp):
"""Internal method - turns an OTP back into the token it refers to and invalidates the OTP"""
# xxxjack should use a mutex here
token = self._otp2token.get(otp)
if token:
del self._otp2token[otp]
return token
else:
print('access: Invalid OTP presented: ', otp)
self.igor.app.raiseHTTPError("498 Invalid OTP presented")
def invalidateOTPForToken(self, otp):
"""Invalidate an OTP, if it still exists. Used when a plugin script exits, in case it has not used its OTP"""
if otp in self._otp2token:
del self._otp2token[otp]
class TokenStorage:
"""Handle storing and retrieving capabilities"""
def __init__(self):
self._tokenCache = {}
self._defaultTokenInstance = None
def _clearTokenCaches(self):
self._tokenCache = {}
self._defaultTokenInstance = None
def _registerTokenWithIdentifier(self, identifier, token):
self._tokenCache[identifier] = token
def _loadTokenWithIdentifier(self, identifier):
if identifier in self._tokenCache:
return self._tokenCache[identifier]
capNodeList = self.igor.database.getElements(f"//au:capability[cid='{identifier}']", 'get', _accessSelfToken, namespaces=NAMESPACES)
if len(capNodeList) == 0:
print(f'access: Warning: Cannot get token {identifier} because it is not in the database')
self.igor.app.raiseHTTPError(f"500 Access: no capability with cid={identifier}")
elif len(capNodeList) > 1:
print(f'access: Error: Cannot get token {identifier} because it occurs {len(capNodeList)} times in the database')
self.igor.app.raiseHTTPError(f"500 Access: multiple capabilities with cid={identifier}")
capData = self.igor.database.tagAndDictFromElement(capNodeList[0])[1]
return AccessToken(capData)
def _defaultToken(self):
"""Internal method - returns token(s) for operations/users/plugins/etc that have no explicit tokens"""
if self._defaultTokenInstance == None and self.igor.database:
defaultContainer = self.igor.database.getElements('au:access/au:defaultCapabilities', 'get', _accessSelfToken, namespaces=NAMESPACES)
if len(defaultContainer) != 1:
self.igor.app.raiseHTTPError("501 Database should contain single au:access/au:defaultCapabilities")
self._defaultTokenInstance = self._tokenForElement(defaultContainer[0])
if self._defaultTokenInstance == None:
print('access: _defaultToken() called but no database (or no default token in database)')
return self._defaultTokenInstance
def _tokenForUser(self, username):
"""Internal method - Return token(s) for a user with the given name"""
if not username or '/' in username:
self.igor.app.raiseHTTPError('401 Illegal username')
elements = self.igor.database.getElements(f'identities/{username}', 'get', _accessSelfToken)
if len(elements) != 1:
self.igor.app.raiseHTTPError('f501 Database error: {len(elements)} users named {username}')
element = elements[0]
token = self._tokenForElement(element, owner=f'identities/{username}')
tokenForAllUsers = self._tokenForElement(element.parentNode)
token = _combineTokens(token, tokenForAllUsers)
return _combineTokens(token, self._defaultToken())
def _tokenForElement(self, element, owner=None):
"""Internal method - returns token(s) that are stored in a given element (identity/action/plugindata/etc)"""
nodelist = xpath.find("au:capability", element, namespaces=NAMESPACES)
if not nodelist:
return None
tokenDataList = [self.igor.database.tagAndDictFromElement(e)[1] for e in nodelist]
if len(tokenDataList) > 1:
return MultiAccessToken(tokenDataList, owner=owner)
rv = AccessToken(tokenDataList[0], owner=owner)
return rv
def tokensNeededByElement(self, element, optional=False):
"""Return a list of elements describing the tokens this element needs"""
nodelist = xpath.find(".//au:needCapability", element, namespaces=NAMESPACES)
if optional:
nodelist += xpath.find(".//au:mayNeedCapability", element, namespaces=NAMESPACES)
return nodelist
class RevokeList:
"""Handles revocation list"""
def __init__(self):
self._revokeList = []
def _addToRevokeList(self, tokenId, nva=None):
"""Add given token to the revocation list"""
if self._revokeList is None:
self._loadRevokeList()
if not tokenId in self._revokeList:
self._revokeList.append(tokenId)
revokeData = dict(cid=tokenId)
if nva:
revokeData['nva'] = nva
element = self.igor.database.elementFromTagAndData("revokedCapability", revokeData, namespace=AU_NAMESPACE)
parents = self.igor.database.getElements('au:access/au:revokedCapabilities', 'post', _accessSelfToken, namespaces=NAMESPACES)
assert len(parents) == 1
parents[0].appendChild(element)
self.igor.database.setChanged()
def _isTokenOnRevokeList(self, tokenId):
"""Check whether a given token is on the revoke list"""
if self._revokeList is None:
self._loadRevokeList()
return tokenId in self._revokeList
def _loadRevokeList(self):
self._revokeList = self.igor.database.getValues('au:access/au:revokedCapabilities/au:revokedCapability/cid', _accessSelfToken, namespaces=NAMESPACES)
class UserPasswords:
"""Implements checking of passwords for users"""
def __init__(self):
pass
def userAndPasswordCorrect(self, username, password):
"""Return True if username/password combination is valid"""
assert self.igor
assert self.igor.database
# xxxjack this method should not be in the Access element
if not username:
if DEBUG: print('access: basic authentication: username missing')
return False
if '/' in username:
self.igor.app.raiseHTTPError('401 Illegal username')
encryptedPassword = self.igor.database.getValue(f'identities/{username}/encryptedPassword', _accessSelfToken)
if not encryptedPassword:
if DEBUG: print('access: basic authentication: no encryptedPassword for user', username)
return True
import passlib.hash
import passlib.utils.binary
salt = encryptedPassword.split('$')[3]
salt = passlib.utils.binary.ab64_decode(salt)
passwordHash = passlib.hash.pbkdf2_sha256.using(salt=salt).hash(password)
if encryptedPassword != passwordHash:
if DEBUG: print('access: basic authentication: password mismatch for user', username)
return False
if DEBUG: print('access: basic authentication: login for user', username)
return True
def setUserPassword(self, username, password, token):
"""Change the password for the user"""
assert self.igor
assert self.igor.database
import passlib.hash
passwordHash = passlib.hash.pbkdf2_sha256.hash(password)
element = self.igor.database.elementFromTagAndData('encryptedPassword', passwordHash)
self.igor.database.delValues(f'identities/{username}/encryptedPassword', token)
parentElements = self.igor.database.getElements(f'identities/{username}', 'post', token, postChild='encryptedPassword')
if len(parentElements) == 0:
self.igor.app.raiseHTTPError(f'404 User {username} not found')
if len(parentElements) > 1:
self.igor.app.raiseHTTPError(f'404 Multiple entries for user {username}')
parentElement = parentElements[0]
parentElement.appendChild(element)
self.igor.database.setChanged()
class Access(OTPHandler, TokenStorage, RevokeList, IssuerInterface, UserPasswords):
def __init__(self, warnOnly=False):
OTPHandler.__init__(self)
TokenStorage.__init__(self)
RevokeList.__init__(self)
IssuerInterface.__init__(self)
UserPasswords.__init__(self)
self.igor = None
self.warnOnly = warnOnly
def _save(self):
"""Save database or capability store, if possible"""
if self.igor.internal:
self.igor.internal.save(_accessSelfToken)
def hasCapabilitySupport(self):
return True
def setIgor(self, igor):
"""Inform Access singleton of main Igor object. Not passed on __init__ because of app initialization sequence."""
assert self.igor is None
self.igor = igor
self._initIssuer()
def checkerForElement(self, element):
"""Returns an AccessChecker for an XML element"""
assert self.igor
assert self.igor.database
if not element:
print('access: ERROR: attempt to get checkerForElement(None)')
return DefaultAccessChecker(self)
path = self.igor.database.getXPathForElement(element)
if not path:
print(f'access: ERROR: attempt to get checkerForElement({repr(element)}) that has no XPath')
return DefaultAccessChecker(self)
if not path.startswith('/data'):
print(f'access: ERROR: attempt to get checkerForElement({repr(element)}) with unexpected XPath: {path}')
return DefaultAccessChecker(self)
return AccessChecker(self, path)
def checkerForNewElement(self, path):
"""Returns an AccessChecker for an element that does not exist yet (specified by XPath)"""
if not path.startswith('/data'):
print(f'access: ERROR: attempt to get checkerForNewElement() with unexpected XPath: {path}')
return DefaultAccessChecker(self)
return AccessChecker(self, path)
def checkerForEntrypoint(self, entrypoint):
"""Returns an AccessChecker for an external entrypoint that is not a tree element"""
if not entrypoint or entrypoint[0] != '/' or entrypoint.startswith('/data'):
print(f'access: ERROR: attempt to get checkerForEntrypoint({entrypoint})')
return DefaultAccessChecker(self)
return AccessChecker(self, entrypoint)
def _checkerDisallowed(self, **kwargs):
if not kwargs.get('tentative'):
# We don't report errors for tentative checking of access
if kwargs.get('defaultChecker'):
print(f'\taccess: {operation} ???: no access allowed by default checker')
else:
identifiers = kwargs.get('capID', [])
print('\taccess: {} {}: no access allowed by {} tokens:'.format(kwargs.get('operation', '???'), kwargs.get('path', '???'), len(identifiers)))
for i in identifiers:
print(f'\t\t{i}')
if 'requestPath' in kwargs:
print('\taccess: On behalf of request to {}'.format(kwargs['requestPath']))
if 'action' in kwargs:
print('\taccess: On behalf of action {}'.format(kwargs['action']))
if 'representing' in kwargs:
print('\taccess: Representing {}'.format(kwargs['representing']))
self.igor.internal._accessFailure(kwargs)
if self.warnOnly:
print('\taccess: allowed anyway because of --warncapabilities mode')
# If Igor is running in warning-only mode we allow the operation anyway
return self.warnOnly
def tokenForAction(self, element, token=None):
"""Return token(s) for an <action> element"""
tokenForAction = self._tokenForElement(element)
if token is None:
token = tokenForAction
else:
token = _combineTokens(token, tokenForAction)
tokenForAllActions = self._tokenForElement(element.parentNode)
token = _combineTokens(token, tokenForAllActions)
return _combineTokens(token, self._defaultToken())
def tokenForPlugin(self, pluginname, token=None):
"""Return token(s) for a plugin with the given pluginname"""
assert self.igor
assert self.igor.database
tokenForPlugin = None
elements = self.igor.database.getElements(f"plugindata/{pluginname}", 'get', _accessSelfToken)
if elements:
tokenForPlugin = self._tokenForElement(elements[0])
token = _combineTokens(token, tokenForPlugin)
token = _combineTokens(token, self._defaultToken())
return token
def tokenForIgor(self):
"""Return token for igor itself (use sparingly)"""
return _igorSelfToken
def tokenForAdminUser(self):
"""Return token for admin user of Igor itself (use sparingly)"""
return self._tokenForUser('admin')
def tokenForRequest(self, headers):
"""Return token for the given incoming http(s) request"""
token = None
if 'HTTP_AUTHORIZATION' in headers:
authHeader = headers['HTTP_AUTHORIZATION']
authFields = authHeader.split()
if authFields[0].lower() == 'bearer':
decoded = authFields[1] # base64.b64decode(authFields[1])
if DEBUG: print('access: tokenForRequest: returning token found in Authorization: Bearer header')
token = self._externalAccessToken(decoded)
elif authFields[0].lower() == 'basic':
decoded = base64.b64decode(authFields[1]).decode('utf8')
if decoded.startswith('-otp-'):
# This is a one time pad, not a username/password combination
if DEBUG: print('access: tokenForRequest: found OTP in Authorization: Basic header')
# OTP-token should already include the default set, so just return
return self._consumeOTPForToken(decoded)
else:
username, password = decoded.split(':')
if DEBUG: print(f'access: tokenForRequest: searching for token for Authorization: Basic {username}:xxxxxx header')
if self.userAndPasswordCorrect(username, password):
# _tokenForUser already includes the default set, so just return.
return self._tokenForUser(username)
else:
self.igor.app.raiseHTTPError('401 Unauthorized', headers={'WWW_Authenticate' : 'Basic realm="igor"'})
# Add more here for other methods
return _combineTokens(token, self._defaultToken())
user = self.igor.app.getSessionItem('user')
if user:
if DEBUG: print(f'access: tokenForRequest: returning token for session.user {user}')
return self._tokenForUser(user)
# xxxjack should we allow carrying tokens in cookies?
if DEBUG: print('access: no token found for request {}'.format(headers.get('PATH_INFO', '???')), 'returning', self._defaultToken())
return self._defaultToken()
def externalTokenForHost(self, host, token=None):
"""If an external token for the given host is available (with the current token) return it"""
# If the current token gives access to the plugindata for the plugin with this <host> field we also allow access.
# xxxjack whether we should check for GET access or something else is open to discussion
pluginElements = self.igor.database.getElements(f"/data/plugindata/*[host='{host}']", 'get', token)
for pe in pluginElements:
pluginName = pe.tagName
token = self.tokenForPlugin(pluginName, token)
tid = token._hasExternalRepresentationFor(host)
if not tid:
print(f'access: WARNING: requested external token for request to {host} but not available')
return
extToken = token._getTokenWithIdentifier(tid)
assert extToken
rv = extToken._getExternalRepresentation()
assert rv
return rv
def tokensForSubject(self, sub, token):
"""Return list of token descriptions (accessible via token) valid for subject sub"""
# First get the list of all tokens valid for this subject (we filter later for accessible tokens)
idExpr = f"au:access/au:exportedCapabilities/au:capability[sub='{sub}']/cid"
idList = self.igor.database.getValues(idExpr, _accessSelfToken, namespaces=NAMESPACES)
# Now attempt to get each of these through the token we carry
rv = []
for _, tokId in idList:
tok = token._getTokenWithIdentifier(tokId)
if tok:
rv = rv + tok._getTokenDescription()
return rv
def _externalAccessToken(self, data):
"""Internal method - Create a token from the given "Authorization: bearer" data"""
content = self._decodeIncomingData(data)
cid = content.get('cid')
if not cid:
print(f'access: ERROR: no cid on bearer token {content}')
self.igor.app.raiseHTTPError('400 Missing cid on key')
if singleton._isTokenOnRevokeList(cid):
print(f'access: ERROR: token has been revoked: {content}')
self.igor.app.raiseHTTPError('400 Revoked token')
return ExternalAccessTokenImplementation(content)
def getTokenDescription(self, token, tokenId=None):
"""Returns a list of dictionaries which describe the tokens"""
if tokenId:
originalToken = token
token = token._getTokenWithIdentifier(tokenId)
if not token:
identifiers = originalToken.getIdentifiers()
print(f'\taccess: getTokenDescription: no such token ID: {tokenId}. Tokens:')
for i in identifiers:
print(f'\t\t{i}')
self.igor.app.raiseHTTPError(f'404 No such token: {tokenId}')
return token._getTokenDescription()
def newToken(self, token, tokenId, newOwner, newPath=None, **kwargs):
"""Create a new token based on an existing token. Returns ID of new token."""
assert self.igor
assert self.igor.database
#
# Split remaining args into rights and other content
#
newRights = {}
content = {}
for k, v in list(kwargs.items()):
# Note delegate right is checked implicitly, below.
if k in NORMAL_OPERATIONS:
newRights[k] = v
else:
content[k] = v
#
# Check that original token exists, and allows this delegation
#
originalToken = token
token = token._getTokenWithIdentifier(tokenId)
if newPath == None:
newPath = token._getObject()
if not token:
identifiers = originalToken.getIdentifiers()
print(f'\taccess: newToken: no such token ID: {tokenId}. Tokens:')
for i in identifiers:
print(f'\t\t{i}')
self.igor.app.raiseHTTPError(f'404 No such token: {tokenId}')
if not token._allowsDelegation(newPath, newRights, content.get('aud')):
self.igor.app.raiseHTTPError('401 Delegation not allowed')
#
# Check the new parent exists
#
parentElement = self.igor.database.getElements(newOwner, 'post', _accessSelfToken, namespaces=NAMESPACES)
if len(parentElement) != 1:
if DEBUG_DELEGATION: print(f'access: newToken: no unique destination {newOwner}')
self.igor.app.raiseNotfound()
parentElement = parentElement[0]
#
# Construct the data for the new token.
#
newId = 'c%d' % random.getrandbits(64)
token._addChild(newId)
tokenData = dict(cid=newId, obj=newPath, parent=tokenId)
moreData = token._getExternalContent()
for k, v in list(moreData.items()):
if not k in tokenData:
tokenData[k] = v
tokenData.update(newRights)
tokenData.update(content)
element = self.igor.database.elementFromTagAndData("capability", tokenData, namespace=AU_NAMESPACE)
#
# Insert into the tree
#
parentElement.appendChild(element)
self.igor.database.setChanged()
#
# Save
#
self._clearTokenCaches()
self._save()
#
# If the new token may affect actions we should update the actions
#
if newOwner.startswith('/data/actions') or newOwner.startswith('actions'):
self.igor.internal.queue('updateActions', _accessSelfToken)
#
# Return the ID
#
return newId
def createTokensNeededByElement(self, needElementList, token):
"""Create tokens (if they don't exist yet) based on a list of needCapability elements"""
toCreate = []
for needElement in needElementList:
parentElement = needElement.parentNode
# xxxjack this is a hack. The au:needCapability will be in an <action> or in the plugindata for the element
if parentElement.tagName == 'action':
parentToken = self.tokenForAction(parentElement)
newOwner = self.igor.database.getXPathForElement(parentElement)
else:
parentToken = self.tokenForPlugin(parentElement.tagName)
newOwner = self.igor.database.getXPathForElement(parentElement)
need = self.igor.database.tagAndDictFromElement(needElement)[1]
path = need.pop('obj')
if self.findCompatibleTokens(parentToken, path, **need):
# The tokens in the parent of the needCapability element already allows it. Nothing to do.
continue
# Otherwise we have to create it from the tokens we are carrying
compatibleTokenIDs = self.findCompatibleTokens(token, path, **need)
if not compatibleTokenIDs:
self.igor.app.raiseHTTPError(f"401 No rights to create capability for {self.igor.database.getXPathForElement(needElement)}")
# Remember for later creation
toCreate.append((compatibleTokenIDs[0], path, need, newOwner))
# Now create all the needed capabilities
if not toCreate:
return
for tokenId, newPath, need, newOwner in toCreate:
self.newToken(token, tokenId, newOwner, newPath, **need)
self._clearTokenCaches()
def findCompatibleTokens(self, token, newPath, **kwargs):
"""Return list of token IDs that allow the given operation."""
assert self.igor
assert self.igor.database
#
# Get rights from the args
#
newRights = {}
for k, v in list(kwargs.items()):
# Note delegate right is checked implicitly, below.
if k in NORMAL_OPERATIONS:
newRights[k] = v
rv = []
for tID in token.getIdentifiers():
t = token._getTokenWithIdentifier(tID)
if not t: continue
if t._allowsDelegation(newPath, newRights, kwargs.get('aud')):
rv = rv + t.getIdentifiers()
return rv
def passToken(self, token, tokenId, newOwner):
"""Pass token ownership to a new owner. Token must be in the set of tokens that can be passed."""
originalToken = token
tokenToPass = token._getTokenWithIdentifier(tokenId)
if not tokenToPass:
identifiers = originalToken.getIdentifiers()
print(f'\taccess: passToken: no such token ID: {tokenId}. Tokens:')
for i in identifiers:
print(f'\t\t{i}')
self.igor.app.raiseHTTPError(f"401 No such token: {tokenId}")
oldOwner = tokenToPass._getOwner()
if not oldOwner:
self.igor.app.raiseHTTPError(f"401 Not owner of token {tokenId}")
if oldOwner == newOwner:
return ''
if not tokenToPass._setOwner(newOwner):
self.igor.app.raiseHTTPError(f"401 Cannot move token {tokenId} to new owner {newOwner}")
token._removeToken(tokenId)
#
# Save
#
self._clearTokenCaches()
self._save()
def revokeToken(self, token, parentId, tokenId):
"""Revoke a token"""
parentToken = token._getTokenWithIdentifier(parentId)
if not parentToken:
identifiers = token.getIdentifiers()
print(f'\taccess: revokeToken: no such token ID: {parentId}. Tokens:')
for i in identifiers:
print(f'\t\t{i}')
self.igor.app.raiseHTTPError(f"404 No such parent token: {parentId}")
self._revokeRecursive(parentToken, tokenId, raiseError=True)
#
# Save
#
self._clearTokenCaches()
self._save()
def _revokeRecursive(self, parentToken, childTokenId, raiseError=False):
"""Helper for revoking a token"""
childToken = parentToken._getTokenWithIdentifier(childTokenId)
if not childToken:
print(f'\taccess: revokeToken: no such token ID: {childTokenId}. Tokens:')
for i in identifiers:
print(f'\t\t{i}')
if raiseError:
self.igor.app.raiseHTTPError(f"404 No such token: {childTokenId}")
print('Warning: ignored unknown token during recursive revoke')
return
# First do the recursion
grandChildren = childToken._getChildIdList()
for grandChildId in grandChildren:
self._revokeRecursive(childToken, grandChildId)
self._addToRevokeList(childTokenId, childToken.content.get('nva'))
childToken._revoke()
parentToken._delChild(childTokenId)
def exportToken(self, token, tokenId, subject=None, lifetime=None, **kwargs):
"""Create an external representation of this token, destined for the given subject"""
#
# Add keys needed for external token
#
if subject:
kwargs['sub'] = subject
if not lifetime:
lifetime = 60*60*24*365 # One year
lifetime = int(lifetime)
kwargs['nvb'] = str(int(time.time())-1)
kwargs['nva'] = str(int(time.time()) + lifetime)
if 'aud' in kwargs:
audience = kwargs['aud']
else:
audience = self.getSelfAudience()
kwargs['aud'] = self.getSelfAudience()
kwargs['iss'] = self.getSelfIssuer()
#
# Create the new token
#
# xxxjack we should check whehter the given external token already exists and
# simply return the external representation if it does...
#
newTokenId = self.newToken(token, tokenId, self._getExternalTokenOwner(), **kwargs)
tokenToExport = token._getTokenWithIdentifier(newTokenId)
if not tokenToExport:
# The new token is a grandchild of our token, so we may not be able to get it directly.
# Try harder.
parentToken = token._getTokenWithIdentifier(tokenId)
tokenToExport = parentToken._getTokenWithIdentifier(newTokenId)
if not tokenToExport:
self.igor.app.raiseHTTPError(f'500 created token {newTokenId} but it does not exist')
#
# Create the external representation
#
assert tokenToExport
assert tokenToExport._hasExternalRepresentationFor(audience)
externalRepresentation = tokenToExport._getExternalRepresentation()
#
# Save
#
self._save()
return externalRepresentation
def externalRepresentation(self, token, tokenId):
"""Return external representation for given token"""
tokenToExport = token._getTokenWithIdentifier(tokenId, recursive=True)
if not tokenToExport:
identifiers = token.getIdentifiers()
print(f'\taccess: externalRepresentation: no such token ID: {tokenId}. Tokens:')
for i in identifiers:
print(f'\t\t{i}')
self.igor.app.raiseHTTPError(f"401 No such token: {tokenId}")
assert tokenToExport._hasExternalRepresentationFor(self.getSelfAudience())
externalRepresentation = tokenToExport._getExternalRepresentation()
return externalRepresentation
def _getExternalTokenOwner(self):
"""Return the location where we store external tokens"""
return '/data/au:access/au:exportedCapabilities'
def consistency(self, token=None, fix=False, restart=False, extended=False):
assert self.igor
assert self.igor.database
assert self.igor.internal
if fix:
self.igor.internal.save(token)
checker = CapabilityConsistency(self.igor, fix, AU_NAMESPACE, _accessSelfToken, extended=extended)
nChanges, nErrors, rv = checker.check()
if nChanges:
self.igor.internal.save(token)
if restart:
rv += '\nRestarting Igor'
self.igor.internal.queue('restart', _accessSelfToken)
else:
rv += '\nRestart Igor to update capability data structures'
return rv
#
# Create a singleton Access object
#
singleton = None
def createSingleton(noCapabilities=False, warnOnly=False):
global singleton
if singleton: return
if warnOnly:
print('Warning: capability-based access control disabled, with warnings', file=sys.stderr)
AccessChecker.WARN_ONLY = True
if noCapabilities:
print('Warning: capability-based access control disabled', file=sys.stderr)
from . import dummyAccess
dummyAccess.createSingleton(noCapabilities)
singleton = dummyAccess.singleton
else:
singleton = Access(warnOnly=warnOnly)
capability.singleton = singleton
| {
"repo_name": "cwi-dis/igor",
"path": "igor/access/__init__.py",
"copies": "1",
"size": "31781",
"license": "mit",
"hash": 6476340921704707000,
"line_mean": 44.86002886,
"line_max": 157,
"alpha_frac": 0.6278908782,
"autogenerated": false,
"ratio": 4.319244359880402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009486842546975903,
"num_lines": 693
} |
"""Access control policy."""
from balrog import exceptions
class Policy(object):
"""Controls the access of a certain actor to a certain action on a resource."""
def __init__(self, roles, get_identity, get_role):
"""Create and configure access control.
:param roles: All roles of this access control.
:param get_identity: Callable that returns the currently authenticated
identity.
:param get_role: Callable that returns role name of the currently authenticated
identity.
"""
self._get_identity = get_identity
self._get_role = get_role
self.roles = {}
self.permissions = {}
for role in roles:
assert role.name not in self.roles, (
u'The role `{0}` is already registered.'.format(role.name)
)
self.roles[role.name] = role
for name, permission in role.permissions.items():
if name not in self.permissions:
self.permissions[name] = permission
def _check_permission(self, permission):
"""Check if the given permission exists in the list of permissions.
:param permission: The permission to check.
:raises: `PermissionNotFound` if the permissions wasn't found.
"""
if self.permissions and permission not in self.permissions:
raise exceptions.PermissionNotFound(
'Permission {0} was not found in the list of all permissions.'.format(permission)
)
def get_identity(self, *args, **kwargs):
"""Get current identity.
:returns: An identity object which can be provided via a callback.
"""
return self._get_identity(*args, **kwargs)
def get_role(self, identity, *args, **kwargs):
"""Get identity role.
:returns: Identity role object which name can be provided via a callback.
:raises: `RoleNotFound` if no role found for this identity.
"""
name = self._get_role(identity, *args, **kwargs)
try:
return self.roles[name]
except KeyError:
raise exceptions.RoleNotFound(name)
def check(self, permission, *args, **kwargs):
"""Check if the identity has requested permission.
:param permission: Permission name.
:return: `True` if identity role has this permission.
:raises: `RoleNotFound` if no role was found.
:raises: `PermissionNotFound` when no permission is found.
"""
self._check_permission(permission)
identity = self.get_identity(*args, **kwargs)
role = self.get_role(identity, *args, **kwargs)
return role.check(identity, permission, *args, **kwargs)
def filter(self, permission, objects, *args, **kwargs):
"""Filter objects according to the permission this identity has.
:param permission: Permission name.
:param objects: Objects to filter out.
:returns: Filtered objects.
:raises: `RoleNotFound` if no role found for this identity.
:raises: `PermissionNotFound` when no permission is found that can
filter the objects.
"""
self._check_permission(permission)
identity = self.get_identity(*args, **kwargs)
role = self.get_role(identity, *args, **kwargs)
return role.filter(identity, permission, objects, *args, **kwargs)
| {
"repo_name": "paylogic/balrog",
"path": "balrog/policy.py",
"copies": "1",
"size": "3434",
"license": "mit",
"hash": 2316811641338594000,
"line_mean": 36.7362637363,
"line_max": 97,
"alpha_frac": 0.6191030868,
"autogenerated": false,
"ratio": 4.603217158176943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005018417785963828,
"num_lines": 91
} |
"""Access control role."""
from balrog import exceptions
class Role(object):
"""Role, a set of permissions that identity can have access to."""
def __init__(self, name, permissions):
"""Create a role.
:param name: Unique role name within one policy.
:param permissions: Permissions of the role.
"""
self.name = name
self.permissions = {}
for permission in permissions:
assert permission.name not in self.permissions, (
'The permission `{0}` is already registered within this role.'.format(permission.name)
)
self.permissions[permission.name] = permission
def check(self, identity, permission, *args, **kwargs):
"""Check if the identity has requested permission.
:param identity: Currently authenticated identity.
:param permission: Permission name.
:return: True if identity role has this permission.
"""
try:
permission = self.permissions[permission]
except KeyError:
return False
else:
return permission.check(identity, *args, **kwargs)
def filter(self, identity, permission, objects, *args, **kwargs):
"""Filter objects according to the permission this identity has.
:param identity: Currently authenticated identity.
:param permission: Permission name.
:param objects: Objects to filter out.
:returns: Filtered objects.
:raises: `PermissionNotFound` when no permission is found that can
filter the objects.
"""
try:
permission = self.permissions[permission]
except KeyError:
raise exceptions.PermissionNotFound()
else:
return permission.filter(identity, objects, *args, **kwargs)
| {
"repo_name": "paylogic/balrog",
"path": "balrog/role.py",
"copies": "1",
"size": "1849",
"license": "mit",
"hash": 2544271925420795000,
"line_mean": 32.0178571429,
"line_max": 102,
"alpha_frac": 0.6192536506,
"autogenerated": false,
"ratio": 5.107734806629834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6226988457229834,
"avg_score": null,
"num_lines": null
} |
"""Access control tools"""
from tracked_model import serializer
from tracked_model.defs import TrackToken, ActionType, Field
def create_track_token(request):
"""Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
"""
from tracked_model.models import RequestInfo
request_pk = RequestInfo.create_or_get_from_request(request).pk
user_pk = None
if request.user.is_authenticated():
user_pk = request.user.pk
return TrackToken(request_pk=request_pk, user_pk=user_pk)
class TrackedModelMixin:
"""Adds change-tracking functionality to models.
Makes ``save`` method accept ``request`` or ``track_token`` keywords.
If one of them is used, changes will be stored to database.
Changes can be then accessed through model's
``tracked_model_history`` method.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tracked_model_initial_state = serializer.dump_model(self)
def save(self, *args, **kwargs):
"""Saves changes made on model instance if ``request`` or
``track_token`` keyword are provided.
"""
from tracked_model.models import History, RequestInfo
if self.pk:
action = ActionType.UPDATE
changes = None
else:
action = ActionType.CREATE
changes = serializer.dump_model(self)
request = kwargs.pop('request', None)
track_token = kwargs.pop('track_token', None)
super().save(*args, **kwargs)
if not changes:
changes = self._tracked_model_diff()
if changes:
hist = History()
hist.model_name = self._meta.model.__name__
hist.app_label = self._meta.app_label
hist.table_name = self._meta.db_table
hist.table_id = self.pk
hist.change_log = serializer.to_json(changes)
hist.action_type = action
if request:
if request.user.is_authenticated():
hist.revision_author = request.user
req_info = RequestInfo.create_or_get_from_request(request)
hist.revision_request = req_info
elif track_token:
hist.revision_author_id = track_token.user_pk
hist.revision_request_id = track_token.request_pk
hist.save()
self._tracked_model_initial_state = serializer.dump_model(self)
def delete(self, *args, **kwargs):
"""Saves history of model instance deletion"""
from tracked_model.models import History, RequestInfo
hist = History()
hist.model_name = self._meta.model.__name__
hist.app_label = self._meta.app_label
hist.table_name = self._meta.db_table
hist.table_id = self.pk
hist.action_type = ActionType.DELETE
state = serializer.dump_model(self)
hist.change_log = serializer.to_json(state)
request = kwargs.pop('request', None)
track_token = kwargs.pop('track_token', None)
if request:
if request.user.is_authenticated():
hist.revision_author = request.user
req_info = RequestInfo.create_or_get_from_request(request)
hist.revision_request = req_info
elif track_token:
hist.revision_author_id = track_token.user_pk
hist.revision_request_id = track_token.request_pk
hist.save()
super().delete(*args, **kwargs)
def _tracked_model_diff(self):
"""Returns changes made to model instance.
Returns None if no changes were made.
"""
initial_state = self._tracked_model_initial_state
current_state = serializer.dump_model(self)
if current_state == initial_state:
return None
change_log = {}
for field in initial_state:
old_value = initial_state[field][Field.VALUE]
new_value = current_state[field][Field.VALUE]
if old_value == new_value:
continue
field_data = initial_state.copy()[field]
del field_data[Field.VALUE]
field_data[Field.OLD] = old_value
field_data[Field.NEW] = new_value
change_log[field] = field_data
return change_log or None
def tracked_model_history(self):
"""Returns history of a tracked object"""
from tracked_model.models import History
return History.objects.filter(
table_name=self._meta.db_table, table_id=self.pk)
| {
"repo_name": "ojake/django-tracked-model",
"path": "tracked_model/control.py",
"copies": "1",
"size": "4797",
"license": "mit",
"hash": -806515202863928600,
"line_mean": 35.9,
"line_max": 74,
"alpha_frac": 0.6060037523,
"autogenerated": false,
"ratio": 4.135344827586207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5241348579886207,
"avg_score": null,
"num_lines": null
} |
"""Access data from json files as documents (paragraph level dicts)"""
# ---[ list ]-------------------------------------------------------------------
import os
def list_data_files():
"""iterate over paths of all data files"""
for dirpath,_,filenames in os.walk('data'):
filenames = [f for f in filenames if f.endswith('.json')]
if not filenames: continue
for f in filenames:
yield os.path.join(dirpath,f)
# ---[ convert ]----------------------------------------------------------------
import json
def json_to_docs(path):
"""iterate over paragraph level documents from one json document"""
paper = json.load(open(path,'rb'))
# parts
text_id = 0
for part in ['abstract','body_text']:
if part not in paper: continue
for x in paper[part]:
doc = {}
# metadata
doc['paper_id'] = paper['paper_id']
doc['paper_title'] = paper['metadata']['title']
doc['path'] = path
doc['part'] = part
text_id += 1
doc['text_id'] = text_id
#
doc['text'] = x['text']
doc['section'] = x['section']
# bib
doc['bib_titles'] = []
for ref in x['cite_spans']:
ref_id = ref['ref_id']
if not ref_id: continue
ref_title = paper['bib_entries'][ref_id]['title'] # ERROR
doc['bib_titles'] += [ref_title]
# ref (tables and figures)
doc['tables'] = []
doc['figures'] = []
for ref in x['ref_spans']:
ref_id = ref['ref_id']
if not ref_id: continue
r = paper['ref_entries'][ref_id] # ERROR
if r['type']=='table':
doc['tables'] += [r['text']]
if r['type']=='figure':
doc['figures'] += [r['text']]
yield doc
def doc_iter(limit=None):
"""iterate over all documents (doc = single paragraph)"""
from itertools import islice
for path in islice(list_data_files(),limit):
yield from json_to_docs(path)
def get_doc(path,text_id):
"""get single document (paragraph)"""
docs = json_to_docs(path)
for doc in docs:
if doc['text_id']==text_id:
return doc
def get_doc_by_meta(meta):
path = meta['path']
text_id = meta['text_id']
return get_doc(path,text_id)
# ------------------------------------------------------------------------------
if __name__=="__main__":
from pprint import pprint
from itertools import islice
from collections import Counter
import csv
cnt = Counter()
empty = 0
all = 0
with open('data/metadata.csv','r',encoding='utf8') as csv_f:
reader = csv.DictReader(csv_f)
for row in islice(reader,None):
if row['sha']=='':
empty += 1
else:
key = hash(row['title'])
cnt[key] += 1
all += 1
print(empty,all)
pprint(cnt.most_common(30))
print(sum([1 if c>1 else 0 for h,c in cnt.items()]))
print(sum([c if c>1 else 0 for h,c in cnt.items()]))
exit()
from tqdm import tqdm
cnt = {}
files = tqdm(list_data_files())
for path in files:
doc = json.load(open(path,'rb'))
t = doc['metadata']['title']
if t not in cnt:
cnt[t] = [path]
else:
cnt[t] += [path]
by_cnt = list(cnt.items())
by_cnt.sort(key=lambda x:len(x[1]),reverse=True)
pprint(by_cnt[:10])
| {
"repo_name": "mobarski/sandbox",
"path": "covid19/data_old.py",
"copies": "2",
"size": "3014",
"license": "mit",
"hash": -3332769433119698400,
"line_mean": 25.9107142857,
"line_max": 80,
"alpha_frac": 0.5690112807,
"autogenerated": false,
"ratio": 2.9694581280788177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4538469408778818,
"avg_score": null,
"num_lines": null
} |
"""Access Galaxy NGLIMS functionality via the standard API.
"""
from six.moves import urllib
import json
import time
class GalaxyApiAccess:
"""Simple front end for accessing Galaxy's REST API.
"""
def __init__(self, galaxy_url, api_key):
self._base_url = galaxy_url
self._key = api_key
self._max_tries = 5
def _make_url(self, rel_url, params=None):
if not params:
params = dict()
params['key'] = self._key
vals = urllib.parse.urlencode(params)
return ("%s%s" % (self._base_url, rel_url), vals)
def _get(self, url, params=None):
url, params = self._make_url(url, params)
num_tries = 0
while 1:
response = urllib.request.urlopen("%s?%s" % (url, params))
try:
out = json.loads(response.read())
break
except ValueError as msg:
if num_tries > self._max_tries:
raise
time.sleep(3)
num_tries += 1
return out
def _post(self, url, data, params=None, need_return=True):
url, params = self._make_url(url, params)
request = urllib.request.Request("%s?%s" % (url, params),
headers = {'Content-Type' : 'application/json'},
data = json.dumps(data))
response = urllib.request.urlopen(request)
try:
data = json.loads(response.read())
except ValueError:
if need_return:
raise
else:
data = {}
return data
def run_details(self, run_bc, run_date=None):
"""Next Gen LIMS specific API functionality.
"""
try:
details = self._get("/nglims/api_run_details", dict(run=run_bc))
except ValueError:
raise ValueError("Could not find information in Galaxy for run: %s" % run_bc)
if "error" in details and run_date is not None:
try:
details = self._get("/nglims/api_run_details", dict(run=run_date))
except ValueError:
raise ValueError("Could not find information in Galaxy for run: %s" % run_date)
return details
def sequencing_projects(self):
"""Next Gen LIMS: retrieve summary information of sequencing projects.
"""
return self._get("/nglims/api_projects")
def sqn_run_summary(self, run_info):
"""Next Gen LIMS: Upload sequencing run summary information.
"""
return self._post("/nglims/api_upload_sqn_run_summary",
data=run_info)
def sqn_report(self, start_date, end_date):
"""Next Gen LIMS: report of items sequenced in a time period.
"""
return self._get("/nglims/api_sqn_report",
dict(start=start_date, end=end_date))
| {
"repo_name": "vladsaveliev/bcbio-nextgen",
"path": "bcbio/galaxy/api.py",
"copies": "4",
"size": "2858",
"license": "mit",
"hash": -623586393996913900,
"line_mean": 33.8536585366,
"line_max": 95,
"alpha_frac": 0.5521343597,
"autogenerated": false,
"ratio": 3.9529737206085755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6505108080308577,
"avg_score": null,
"num_lines": null
} |
"""Access Galaxy NGLIMS functionality via the standard API.
"""
import urllib
import urllib2
import json
import time
class GalaxyApiAccess:
"""Simple front end for accessing Galaxy's REST API.
"""
def __init__(self, galaxy_url, api_key):
self._base_url = galaxy_url
self._key = api_key
self._max_tries = 5
def _make_url(self, rel_url, params=None):
if not params:
params = dict()
params['key'] = self._key
vals = urllib.urlencode(params)
return ("%s%s" % (self._base_url, rel_url), vals)
def _get(self, url, params=None):
url, params = self._make_url(url, params)
num_tries = 0
while 1:
response = urllib2.urlopen("%s?%s" % (url, params))
try:
out = json.loads(response.read())
break
except ValueError, msg:
if num_tries > self._max_tries:
raise
time.sleep(3)
num_tries += 1
return out
def _post(self, url, data, params=None, need_return=True):
url, params = self._make_url(url, params)
request = urllib2.Request("%s?%s" % (url, params),
headers = {'Content-Type' : 'application/json'},
data = json.dumps(data))
response = urllib2.urlopen(request)
try:
data = json.loads(response.read())
except ValueError:
if need_return:
raise
else:
data = {}
return data
def run_details(self, run_bc, run_date=None):
"""Next Gen LIMS specific API functionality.
"""
try:
details = self._get("/nglims/api_run_details", dict(run=run_bc))
except ValueError:
raise ValueError("Could not find information in Galaxy for run: %s" % run_bc)
if details.has_key("error") and run_date is not None:
try:
details = self._get("/nglims/api_run_details", dict(run=run_date))
except ValueError:
raise ValueError("Could not find information in Galaxy for run: %s" % run_date)
return details
def sequencing_projects(self):
"""Next Gen LIMS: retrieve summary information of sequencing projects.
"""
return self._get("/nglims/api_projects")
def sqn_run_summary(self, run_info):
"""Next Gen LIMS: Upload sequencing run summary information.
"""
return self._post("/nglims/api_upload_sqn_run_summary",
data=run_info)
def sqn_report(self, start_date, end_date):
"""Next Gen LIMS: report of items sequenced in a time period.
"""
return self._get("/nglims/api_sqn_report",
dict(start=start_date, end=end_date))
| {
"repo_name": "mjafin/bcbio-nextgen",
"path": "bcbio/galaxy/api.py",
"copies": "10",
"size": "2835",
"license": "mit",
"hash": -1893774085017352200,
"line_mean": 33.156626506,
"line_max": 95,
"alpha_frac": 0.5495590829,
"autogenerated": false,
"ratio": 3.9320388349514563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01511179272354794,
"num_lines": 83
} |
access_geom = True
import os
from PyQt5.QtCore import QVariant
lyr = iface.activeLayer()
if lyr is None:
raise Exception('select layer in TOC')
feat_cnt = 0
feats = lyr.selectedFeatures() if lyr.selectedFeatureCount() > 0 else lyr.getFeatures()
for feat in feats:
feat_cnt+=1
#print('fid:', feat.id())
if feat_cnt % 1000 == 0:
print('fid:', feat.id())
if access_geom:
geom = feat.geometry()
#print(geom.type())
#print(geom.boundingBox().toString())
if not geom.isGeosValid ():
print(feat.id(), 'NOT geos valid')
#line = geom.asPolyline()
#print(len(line))
geom_errors = geom.validateGeometry()
if len(geom_errors) > 0:
print(feat.id(), 'geometry NOT valid:')
for geom_error in geom_errors:
print(geom_error.what())
print('-------')
print('iterated features', feat_cnt)
print('layer feature count', lyr.featureCount())
print('dataprovider feature count', lyr.dataProvider().featureCount())
print('finished')
| {
"repo_name": "BergWerkGIS/gis-file-info",
"path": "QGIS/iterate-features.py",
"copies": "2",
"size": "1061",
"license": "mit",
"hash": -7794351535738653000,
"line_mean": 29.3142857143,
"line_max": 87,
"alpha_frac": 0.6098020735,
"autogenerated": false,
"ratio": 3.5966101694915253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5206412242991525,
"avg_score": null,
"num_lines": null
} |
""" Access GPIO pins via SysFS interface """
import os
class GPIO(object):
def __init__(self, num):
if not (2 <= num <= 117):
raise ValueError('GPIO num must be in 2-117')
self.sysfs = '/sys/class/gpio/gpio' + str(num)
self.value_path = self.sysfs + "/value"
self.direction_path = self.sysfs + "/direction"
self.num = num
self.direction = None
def get_value(self):
""" Approx 10kHz (0.10 ms per read), 70% faster than File.open() """
fd = os.open(self.value_path, os.O_RDONLY)
val = os.read(fd,1)
os.close(fd)
return ord(val[0]) - ord('0')
def set_value(self, val):
fd = os.open(self.value_path, os.O_WRONLY)
os.write(fd, str(val) + '\n')
os.close(fd)
value = property(get_value, set_value)
def input(self):
with open(self.direction_path, 'w') as f:
f.write('in\n')
self.direction = 'in'
def output(self):
with open(self.direction_path, 'w') as f:
f.write('out\n')
self.direction = 'in'
def __str__(self):
return "GPIO %d (%s) " % (self.num, self.direction)
| {
"repo_name": "jschornick/pybbb",
"path": "bbb/gpio.py",
"copies": "1",
"size": "1184",
"license": "mit",
"hash": 8313038470929995000,
"line_mean": 27.1904761905,
"line_max": 76,
"alpha_frac": 0.5371621622,
"autogenerated": false,
"ratio": 3.226158038147139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4263320200347139,
"avg_score": null,
"num_lines": null
} |
""" Access GPIO pins via SysFS interface """
import os
import time
class GPIO(object):
def __init__(self, num, base_dir='/sys/class/gpio/gpio'):
if not (2 <= num <= 117):
raise ValueError('GPIO num must be in 2-117')
self.num = num
self.sysfs = base_dir + str(self.num)
self.value_path = self.sysfs + "/value"
self.direction_path = self.sysfs + "/direction"
self.direction = None
def get_value(self):
""" Approx 10kHz (0.10 ms per read), 70% faster than File.open() """
fd = os.open(self.value_path, os.O_RDONLY)
val = os.read(fd, 1)
os.close(fd)
return ord(val[0]) - ord('0')
def set_value(self, val):
fd = os.open(self.value_path, os.O_WRONLY)
os.write(fd, str(val) + '\n')
os.close(fd)
value = property(get_value, set_value)
def input(self):
with open(self.direction_path, 'w') as f:
f.write('in\n')
self.direction = 'in'
def output(self):
with open(self.direction_path, 'w') as f:
f.write('out\n')
self.direction = 'out'
def __str__(self):
return "GPIO #{}: value:{}, direction:{}".format(self.num, self.value,
self.direction)
def pulse(self, duration=0.1):
"""Actuate GPIO pin for a given duration.
:param duration: How long to keep the pin on (secs.).
:type duration: float
"""
# Use try-finally in case we are interrupted in sleep
try:
self.set_value(1)
time.sleep(duration)
finally:
self.set_value(0)
| {
"repo_name": "IEEERobotics/pybbb",
"path": "bbb/gpio.py",
"copies": "1",
"size": "1691",
"license": "mit",
"hash": -5477589735185206000,
"line_mean": 28.1551724138,
"line_max": 78,
"alpha_frac": 0.5292726198,
"autogenerated": false,
"ratio": 3.5450733752620547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45743459950620546,
"avg_score": null,
"num_lines": null
} |
'''Accessing ArcObjects via python and comtypes.
...most definitely a work in progess...
Requirements:
Pathed comtypes module, https://github.com/enthought/comtypes/pull/75
Adapted from Mark Cederholm's "Using ArcObjects in Python":
http://www.pierssen.com/arcgis/misc.htm
Also see:
http://gis.stackexchange.com/questions/80/how-do-i-access-arcobjects-from-python/
'''
#**** Initialization ****
def GetLibPath():
"""Return location of ArcGIS type libraries as string"""
# This will still work on 64-bit machines because Python runs in 32 bit mode
import _winreg
HKLM = _winreg.HKEY_LOCAL_MACHINE
keyESRI = _winreg.OpenKey(HKLM, "SOFTWARE\\ESRI")
# retrieve key name of highest Desktop version
L = []
for i in xrange(_winreg.QueryInfoKey(keyESRI)[0]):
key = _winreg.EnumKey(keyESRI, i)
if 'Desktop' in key:
L.append(float(key.replace('Desktop','')))
latest = 'Desktop{}'.format(sorted(L)[-1])
keyDesktop = _winreg.OpenKey(HKLM, "SOFTWARE\\ESRI\\{}".format(latest))
return _winreg.QueryValueEx(keyDesktop, "InstallDir")[0] + "com\\"
def GetModule(sModuleName):
"""Import ArcGIS module"""
from comtypes.client import GetModule
sLibPath = GetLibPath()
GetModule(sLibPath + sModuleName)
def GetStandaloneModules():
"""Import commonly used ArcGIS libraries for standalone scripts"""
GetModule("esriSystem.olb")
GetModule("esriGeometry.olb")
GetModule("esriCarto.olb")
GetModule("esriDisplay.olb")
GetModule("esriGeoDatabase.olb")
GetModule("esriDataSourcesGDB.olb")
GetModule("esriDataSourcesFile.olb")
GetModule("esriOutput.olb")
def GetDesktopModules():
"""Import basic ArcGIS Desktop libraries"""
GetModule("esriFramework.olb")
GetModule("esriArcMapUI.olb")
GetModule("esriArcCatalogUI.olb")
#**** Helper Functions ****
def NewObj(MyClass, MyInterface):
"""Creates a new comtypes POINTER object where\n\
MyClass is the class to be instantiated,\n\
MyInterface is the interface to be assigned"""
from comtypes.client import CreateObject
try:
ptr = CreateObject(MyClass, interface=MyInterface)
return ptr
except:
return None
def human_readable(num, suffix='B'):
"""Return human readable size from bytes
http://stackoverflow.com/a/1094933/14420
(orginal by Fred Cirera)
"""
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Y', suffix)
def CType(obj, interface):
"""Casts obj to interface and returns comtypes POINTER or None"""
try:
newobj = obj.QueryInterface(interface)
return newobj
except:
return None
def CLSID(MyClass):
"""Return CLSID of MyClass as string"""
return str(MyClass._reg_clsid_)
def InitStandalone():
"""Init standalone ArcGIS license"""
# Set ArcObjects version
import comtypes
from comtypes.client import GetModule
g = comtypes.GUID("{6FCCEDE0-179D-4D12-B586-58C88D26CA78}")
GetModule((g, 1, 0))
import comtypes.gen.ArcGISVersionLib as esriVersion
import comtypes.gen.esriSystem as esriSystem
pVM = NewObj(esriVersion.VersionManager, esriVersion.IArcGISVersion)
if not pVM.LoadVersion(esriVersion.esriArcGISDesktop, "10.3"):
return False
# Get license
pInit = NewObj(esriSystem.AoInitialize, esriSystem.IAoInitialize)
ProductList = [esriSystem.esriLicenseProductCodeAdvanced, \
esriSystem.esriLicenseProductCodeStandard, \
esriSystem.esriLicenseProductCodeBasic]
for eProduct in ProductList:
licenseStatus = pInit.IsProductCodeAvailable(eProduct)
if licenseStatus != esriSystem.esriLicenseAvailable:
continue
licenseStatus = pInit.Initialize(eProduct)
return (licenseStatus == esriSystem.esriLicenseCheckedOut)
return False
def GetApp(app="ArcMap"):
"""In a standalone script, retrieves the first app session found.\n\
app must be 'ArcMap' (default) or 'ArcCatalog'\n\
Execute GetDesktopModules() first"""
if not (app == "ArcMap" or app == "ArcCatalog"):
print "app must be 'ArcMap' or 'ArcCatalog'"
return None
import comtypes.gen.esriFramework as esriFramework
import comtypes.gen.esriArcMapUI as esriArcMapUI
import comtypes.gen.esriCatalogUI as esriCatalogUI
pAppROT = NewObj(esriFramework.AppROT, esriFramework.IAppROT)
iCount = pAppROT.Count
if iCount == 0:
return None
for i in range(iCount):
pApp = pAppROT.Item(i)
if app == "ArcCatalog":
if CType(pApp, esriCatalogUI.IGxApplication):
return pApp
continue
if CType(pApp, esriArcMapUI.IMxApplication):
return pApp
return None
def GetCurrentApp():
"""Gets an IApplication handle to the current app.\n\
Must be run inside the app's Python window.\n\
Execute GetDesktopModules() first"""
import comtypes.gen.esriFramework as esriFramework
return NewObj(esriFramework.AppRef, esriFramework.IApplication)
def Msg(message="Hello world", title="Python ArcObjects"):
"""Open message dialog box with ok button"""
from ctypes import c_int, WINFUNCTYPE, windll
from ctypes.wintypes import HWND, LPCSTR, UINT
prototype = WINFUNCTYPE(c_int, HWND, LPCSTR, LPCSTR, UINT)
fn = prototype(("MessageBoxA", windll.user32))
return fn(0, message, title, 0)
#**** Standalone ****
def Standalone_OpenFileGDB(gdb):
"""Open file geodatabase and return as esri workspace object"""
GetStandaloneModules()
if not InitStandalone():
print "We've got lumps of it 'round the back..."
return
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriDataSourcesGDB as esriDataSourcesGDB
#sPath = "c:/apps/Demo/Montgomery_full.gdb"
sPath = gdb
pWSF = NewObj(esriDataSourcesGDB.FileGDBWorkspaceFactory, \
esriGeoDatabase.IWorkspaceFactory)
pWS = pWSF.OpenFromFile(sPath, 0)
pDS = CType(pWS, esriGeoDatabase.IDataset)
print "Workspace name: " + pDS.BrowseName
print "Workspace category: " + pDS.Category
return pWS
def Standalone_OpenSDE():
"""demo func, values are hardcoded"""
GetStandaloneModules()
InitStandalone()
import comtypes.gen.esriSystem as esriSystem
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriDataSourcesGDB as esriDataSourcesGDB
pPropSet = NewObj(esriSystem.PropertySet, esriSystem.IPropertySet)
pPropSet.SetProperty("SERVER", "sunprod1")
pPropSet.SetProperty("USER", "/")
pPropSet.SetProperty("INSTANCE", "sde:oracle10g:/;LOCAL=PRODUCTION_TUCSON")
pPropSet.SetProperty("AUTHENTICATION_MODE", "OSA")
pPropSet.SetProperty("VERSION", "SDE.DEFAULT")
pWSF = NewObj(esriDataSourcesGDB.SdeWorkspaceFactory, \
esriGeoDatabase.IWorkspaceFactory)
pWS = pWSF.Open(pPropSet, 0)
pDS = CType(pWS, esriGeoDatabase.IDataset)
print "Workspace name: " + pDS.BrowseName
print "Workspace category: " + pDS.Category
return pWS
def Standalone_QueryDBValues():
"""demo func, values are hardcoded"""
GetStandaloneModules()
InitStandalone()
import comtypes.gen.esriServer as esriSystem
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriDataSourcesGDB as esriDataSourcesGDB
sPath = "c:/apps/Demo/Montgomery_full.gdb"
sTabName = "Parcels"
sWhereClause = "parcel_id = 6358"
sFieldName = "zoning_s"
pWSF = NewObj(esriDataSourcesGDB.FileGDBWorkspaceFactory, esriGeoDatabase.IWorkspaceFactory)
pWS = pWSF.OpenFromFile(sPath, 0)
pFWS = CType(pWS, esriGeoDatabase.IFeatureWorkspace)
pTab = pFWS.OpenTable(sTabName)
pQF = NewObj(esriGeoDatabase.QueryFilter, esriGeoDatabase.IQueryFilter)
pQF.WhereClause = sWhereClause
pCursor = pTab.Search(pQF, True)
pRow = pCursor.NextRow()
if not pRow:
print "Query returned no rows"
return
Val = pRow.Value(pTab.FindField(sFieldName))
if Val is None:
print "Null value"
def Standalone_CreateTable():
"""demo func, values are hardcoded"""
GetStandaloneModules()
InitStandalone()
import comtypes.gen.esriServer as esriSystem
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriDataSourcesGDB as esriDataSourcesGDB
sWSPath = "c:/apps/Demo/Temp.gdb"
sTableName = "Test"
pWSF = NewObj(esriDataSourcesGDB.FileGDBWorkspaceFactory, \
esriGeoDatabase.IWorkspaceFactory)
pWS = pWSF.OpenFromFile(sWSPath, 0)
pFWS = CType(pWS, esriGeoDatabase.IFeatureWorkspace)
pOutFields = NewObj(esriGeoDatabase.Fields, esriGeoDatabase.IFields)
pFieldsEdit = CType(pOutFields, esriGeoDatabase.IFieldsEdit)
pFieldsEdit._FieldCount = 2
pNewField = NewObj(esriGeoDatabase.Field, esriGeoDatabase.IField)
pFieldEdit = CType(pNewField, esriGeoDatabase.IFieldEdit)
pFieldEdit._Name = "OBJECTID"
pFieldEdit._Type = esriGeoDatabase.esriFieldTypeOID
pFieldsEdit._Field[0] = pNewField
pNewField = NewObj(esriGeoDatabase.Field, esriGeoDatabase.IField)
pFieldEdit = CType(pNewField, esriGeoDatabase.IFieldEdit)
pFieldEdit._Name = "LUMBERJACK"
pFieldEdit._Type = esriGeoDatabase.esriFieldTypeString
pFieldEdit._Length = 50
pFieldsEdit._Field[1] = pNewField
pOutTable = pFWS.CreateTable(sTableName, pOutFields, \
None, None, "")
iField = pOutTable.FindField("LUMBERJACK")
print "'LUMBERJACK' field index = ", iField
pRow = pOutTable.CreateRow()
pRow.Value[iField] = "I sleep all night and I work all day"
pRow.Store()
def GetModifiedDate(gdb, tableName):
"""Return last modified date stamp (in seconds) for the Geodatabase table
Courtesy of Micah Babinski
https://geonet.esri.com/message/453427#453427
"""
# Setup
GetStandaloneModules()
InitStandalone()
import comtypes.gen.esriSystem as esriSystem
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriDataSourcesGDB as esriDataSourcesGDB
# Open the FGDB
pWS = Standalone_OpenFileGDB(gdb)
# Create empty Properties Set
pPropSet = NewObj(esriSystem.PropertySet, esriSystem.IPropertySet)
pPropSet.SetProperty("database", gdb)
# Cast the FGDB as IFeatureWorkspace
pFW = CType(pWS, esriGeoDatabase.IFeatureWorkspace)
# Open the table
pTab = pFW.OpenTable(tableName)
# Cast the table as a IDatasetFileStat
pDFS = CType(pTab, esriGeoDatabase.IDatasetFileStat)
# Get the date modified
return pDFS.StatTime(2)
def GetFileSize(gdb, tableName, featureDataset):
"""Return gdb feature class size in human readable units (KB,MB,GB,TB)
Courtesy of Micah Babinski
https://geonet.esri.com/message/520828#520828
"""
# Setup
GetStandaloneModules()
InitStandalone()
import comtypes.gen.esriSystem as esriSystem
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriDataSourcesGDB as esriDataSourcesGDB
# Open the FGDB
pWS = Standalone_OpenFileGDB(gdb)
# Create empty Properties Set
pPropSet = NewObj(esriSystem.PropertySet, esriSystem.IPropertySet)
pPropSet.SetProperty("database", gdb)
# Cast the FGDB as IFeatureWorkspace
pFW = CType(pWS, esriGeoDatabase.IFeatureWorkspace)
# Get the info for a stand-alone table
if featureDataset == "standalone":
# Open the table
pTab = pFW.OpenTable(tableName)
# Cast the table to an IDatasetFileStat object
pDFS = CType(pTab, esriGeoDatabase.IDatasetFileStat)
# Return the size
return convert_bytes(pDFS.StatSize)
else:
# Open the feature class
pTab = pFW.OpenFeatureClass(tableName)
# Cast the table as a IDatasetFileStat
pDFS = CType(pTab, esriGeoDatabase.IDatasetFileStat)
# Return the size
return human_readable(pDFS.StatSize)
# ***************************************************************
# NOTE: The following examples, by default, expect to be run
# within ArcMap and ArcCatalog in the Python window. To run
# them in a standalone session, supply True as the argument.
# ***************************************************************
#**** ArcMap ****
def ArcMap_GetSelectedGeometry(bStandalone=False):
GetDesktopModules()
if bStandalone:
InitStandalone()
pApp = GetApp()
else:
pApp = GetCurrentApp()
if not pApp:
print "We found this spoon, sir."
return
import comtypes.gen.esriFramework as esriFramework
import comtypes.gen.esriArcMapUI as esriArcMapUI
import comtypes.gen.esriSystem as esriSystem
import comtypes.gen.esriCarto as esriCarto
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
import comtypes.gen.esriGeometry as esriGeometry
# Get selected feature geometry
pDoc = pApp.Document
pMxDoc = CType(pDoc, esriArcMapUI.IMxDocument)
pMap = pMxDoc.FocusMap
pFeatSel = pMap.FeatureSelection
pEnumFeat = CType(pFeatSel, esriGeoDatabase.IEnumFeature)
pEnumFeat.Reset()
pFeat = pEnumFeat.Next()
if not pFeat:
print "No selection found."
return
pShape = pFeat.ShapeCopy
eType = pShape.GeometryType
if eType == esriGeometry.esriGeometryPoint:
print "Geometry type = Point"
elif eType == esriGeometry.esriGeometryPolyline:
print "Geometry type = Line"
elif eType == esriGeometry.esriGeometryPolygon:
print "Geometry type = Poly"
else:
print "Geometry type = Other"
return pShape
def ArcMap_AddTextElement(bStandalone=False):
GetDesktopModules()
import comtypes.gen.esriFramework as esriFramework
if bStandalone:
InitStandalone()
pApp = GetApp()
pFact = CType(pApp, esriFramework.IObjectFactory)
else:
pApp = GetCurrentApp()
import comtypes.gen.esriArcMapUI as esriArcMapUI
import comtypes.gen.esriSystem as esriSystem
import comtypes.gen.esriGeometry as esriGeometry
import comtypes.gen.esriCarto as esriCarto
import comtypes.gen.esriDisplay as esriDisplay
import comtypes.gen.stdole as stdole
# Get midpoint of focus map
pDoc = pApp.Document
pMxDoc = CType(pDoc, esriArcMapUI.IMxDocument)
pMap = pMxDoc.FocusMap
pAV = CType(pMap, esriCarto.IActiveView)
pSD = pAV.ScreenDisplay
pEnv = pAV.Extent
dX = (pEnv.XMin + pEnv.XMax) / 2
dY = (pEnv.YMin + pEnv.YMax) / 2
if bStandalone:
pUnk = pFact.Create(CLSID(esriGeometry.Point))
pPt = CType(pUnk, esriGeometry.IPoint)
else:
pPt = NewObj(esriGeometry.Point, esriGeometry.IPoint)
pPt.PutCoords(dX, dY)
# Create text symbol
if bStandalone:
pUnk = pFact.Create(CLSID(esriDisplay.RgbColor))
pColor = CType(pUnk, esriDisplay.IRgbColor)
else:
pColor = NewObj(esriDisplay.RgbColor, esriDisplay.IRgbColor)
pColor.Red = 255
if bStandalone:
pUnk = pFact.Create(CLSID(stdole.StdFont))
pFontDisp = CType(pUnk, stdole.IFontDisp)
else:
pFontDisp = NewObj(stdole.StdFont, stdole.IFontDisp)
pFontDisp.Name = "Arial"
pFontDisp.Bold = True
if bStandalone:
pUnk = pFact.Create(CLSID(esriDisplay.TextSymbol))
pTextSymbol = CType(pUnk, esriDisplay.ITextSymbol)
else:
pTextSymbol = NewObj(esriDisplay.TextSymbol, esriDisplay.ITextSymbol)
pTextSymbol.Font = pFontDisp
pTextSymbol.Color = pColor
pTextSymbol.Size = 24
if bStandalone:
pUnk = pFact.Create(CLSID(esriDisplay.BalloonCallout))
pTextBackground = CType(pUnk, esriDisplay.ITextBackground)
else:
pTextBackground = NewObj(esriDisplay.BalloonCallout, esriDisplay.ITextBackground)
pFormattedTS = CType(pTextSymbol, esriDisplay.IFormattedTextSymbol)
pFormattedTS.Background = pTextBackground
# Create text element and add it to map
if bStandalone:
pUnk = pFact.Create(CLSID(esriCarto.TextElement))
pTextElement = CType(pUnk, esriCarto.ITextElement)
else:
pTextElement = NewObj(esriCarto.TextElement, esriCarto.ITextElement)
pTextElement.Symbol = pTextSymbol
pTextElement.Text = "Wink, wink, nudge, nudge,\nSay no more!"
pElement = CType(pTextElement, esriCarto.IElement)
pElement.Geometry = pPt
pGC = CType(pMap, esriCarto.IGraphicsContainer)
pGC.AddElement(pElement, 0)
pGCSel = CType(pMap, esriCarto.IGraphicsContainerSelect)
pGCSel.SelectElement(pElement)
iOpt = esriCarto.esriViewGraphics + \
esriCarto.esriViewGraphicSelection
pAV.PartialRefresh(iOpt, None, None)
# Get element width
iCount = pGCSel.ElementSelectionCount
pElement = pGCSel.SelectedElement(iCount - 1)
pEnv = NewObj(esriGeometry.Envelope, esriGeometry.IEnvelope)
pElement.QueryBounds(pSD, pEnv)
print "Width = ", pEnv.Width
def ArcMap_GetEditWorkspace(bStandalone=False):
GetDesktopModules()
if bStandalone:
InitStandalone()
pApp = GetApp()
else:
pApp = GetCurrentApp()
GetModule("esriEditor.olb")
import comtypes.gen.esriSystem as esriSystem
import comtypes.gen.esriEditor as esriEditor
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
pID = NewObj(esriSystem.UID, esriSystem.IUID)
pID.Value = CLSID(esriEditor.Editor)
pExt = pApp.FindExtensionByCLSID(pID)
pEditor = CType(pExt, esriEditor.IEditor)
if pEditor.EditState == esriEditor.esriStateEditing:
pWS = pEditor.EditWorkspace
pDS = CType(pWS, esriGeoDatabase.IDataset)
print "Workspace name: " + pDS.BrowseName
print "Workspace category: " + pDS.Category
return
def ArcMap_GetSelectedTable(bStandalone=False):
GetDesktopModules()
if bStandalone:
InitStandalone()
pApp = GetApp()
else:
pApp = GetCurrentApp()
import comtypes.gen.esriFramework as esriFramework
import comtypes.gen.esriArcMapUI as esriArcMapUI
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
pDoc = pApp.Document
pMxDoc = CType(pDoc, esriArcMapUI.IMxDocument)
pUnk = pMxDoc.SelectedItem
if not pUnk:
print "Nothing selected."
return
pTable = CType(pUnk, esriGeoDatabase.ITable)
if not pTable:
print "No table selected."
return
pDS = CType(pTable, esriGeoDatabase.IDataset)
print "Selected table: " + pDS.Name
#**** ArcCatalog ****
def ArcCatalog_GetSelectedTable(bStandalone=False):
GetDesktopModules()
if bStandalone:
InitStandalone()
pApp = GetApp("ArcCatalog")
else:
pApp = GetCurrentApp()
import comtypes.gen.esriFramework as esriFramework
import comtypes.gen.esriCatalogUI as esriCatalogUI
import comtypes.gen.esriCatalog as esriCatalog
import comtypes.gen.esriGeoDatabase as esriGeoDatabase
pGxApp = CType(pApp, esriCatalogUI.IGxApplication)
pGxObj = pGxApp.SelectedObject
if not pGxObj:
print "Nothing selected."
return
pGxDS = CType(pGxObj, esriCatalog.IGxDataset)
if not pGxDS:
print "No dataset selected."
return
eType = pGxDS.Type
if not (eType == esriGeoDatabase.esriDTFeatureClass or eType == esriGeoDatabase.esriDTTable):
print "No table selected."
return
pDS = pGxDS.Dataset
pTable = CType(pDS, esriGeoDatabase.ITable)
print "Selected table: " + pDS.Name
| {
"repo_name": "DougFirErickson/arcplus",
"path": "arcplus/ao/ao.py",
"copies": "1",
"size": "19731",
"license": "mit",
"hash": 5043446295871753000,
"line_mean": 33.9221238938,
"line_max": 97,
"alpha_frac": 0.6930211343,
"autogenerated": false,
"ratio": 3.3278799122954967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4520901046595497,
"avg_score": null,
"num_lines": null
} |
"""Accessing data in a consistent manner"""
__author__ = 'thorwhalen'
import ut.pfile.name as pfile_name
# from ut.khan_utils.encoding import to_unicode_or_bust
from ut.serialize.local import Local
from ut.serialize.s3 import S3
class DataAccessor(object):
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
def __init__(self, relative_root=None, mother_root=None, extension=None, force_extension=False, encoding='UTF-8', location=LOCATION_LOCAL, **kwargs):
# remember these so can switch between locations without having to reset
self.relative_root = relative_root
self.extension = extension or ''
self.force_extension = force_extension or False
self.encoding = encoding
self.mother_root = mother_root
if location==self.LOCATION_LOCAL:
self.use_local(**kwargs)
elif location==self.LOCATION_S3:
self.use_s3(**kwargs)
else:
raise AttributeError("Don't know that location (use 'LOCAL' or 'S3')")
def use_local(self, relative_root=None, mother_root=None, extension=None, force_extension=None, encoding='UTF-8', **kwargs):
self.currently_using = self.LOCATION_LOCAL
# replace self vars with any new vals passed in by client
self.relative_root = relative_root or self.relative_root
self.extension = extension or self.extension
self.force_extension = force_extension or self.force_extension
self.encoding = encoding or 'UTF-8'
# TODO - we need to update local to accept mother_root argument
self.local = Local(relative_root=self.relative_root, extension=self.extension, force_extension=self.force_extension, encoding=self.encoding, **kwargs)
self.dacc = self.local
def use_s3(self, relative_root=None, mother_root=None, extension=None, force_extension=None, encoding=None, **kwargs):
self.currently_using = self.LOCATION_S3
# replace self vars with any new vals passed in by client
self.relative_root = relative_root or self.relative_root
self.extension = extension or self.extension
self.force_extension = force_extension or self.force_extension
self.encoding = encoding or self.encoding
self.mother_root = mother_root or self.mother_root
if self.mother_root is None:
if self.relative_root.startswith('/'):
self.relative_root = self.relative_root[1:]
self.mother_root = pfile_name.get_highest_level_folder(self.relative_root)
self.relative_root = self.relative_root.replace(self.mother_root,'',1)
# set folder and other props on s3 via constructor or other
self.s3 = S3(self.mother_root, self.relative_root, self.extension, self.force_extension, self.encoding, **kwargs)
self.dacc = self.s3
def load_excel(self, filename):
data = self.dacc.loado
####################################################################################################################
def __getattr__(self, name):
def _missing(*args, **kwargs):
if self.currently_using == self.LOCATION_LOCAL:
target = self.local
elif self.currently_using == self.LOCATION_S3:
target = self.s3
else:
raise RuntimeError("Need to implement method missing for {}.".format(self.currently_using))
if name in dir(target):
return getattr(target, name)(*args, **kwargs)
else:
raise AttributeError("{} does not exist in {}".format(name, target))
return _missing
#
# def dumpo(self, obj, filename):
# """
# """
#
# self.dacc.dumpo(obj, filename)
# # pickle.dump(obj,open(self.filepath(filename),'w'))
#
# def loado(self, filename):
# """
# loads an object from a local location
# """
# self.dacc.loado(filename)
#
#
# def dumps(self, the_str, filename, encoding=None):
# """
# """
# if encoding:
# s = encoding(the_str)
#
# self.
#
# if self.location == 'local':
# self.dacc.dumps(the_str, filename, encoding=encoding)
# elif self.location == 's3':
# self.dacc.dumps(the_str, filename)
#
#
# def loads(self, filename):
# """
# loads an object from a local location
# """
# self.dacc.loads(filename)
#
#
# def dumpu(self, the_str, filename, encoding=None):
# """
# saves an object to a local location
# """
# self.dacc.dumps(to_unicode_or_bust(the_str), filename, encoding=encoding)
#
#
# def loadu(self, filename):
# """
# loads an object from a local location
# """
# return to_unicode_or_bust(self.dacc.loads(self.filepath(filename)))
#
#
# def df_to_csv(self, df, filename, encoding=None):
# """
# saves an object to a local location
# """
# encoding = encoding or self.encoding
# return df.to_csv(self.filepath(filename), encoding=encoding, sep="\t")
#
#
# def df_to_excel(self, df, filename):
# """
# saves an object to a local location
# """
# if filename.startswith('/'):
# filename = filename[1:]
# return df.to_excel(self.filepath(filename))
| {
"repo_name": "thorwhalen/ut",
"path": "serialize/data_accessor.py",
"copies": "1",
"size": "5471",
"license": "mit",
"hash": 4638621652976052000,
"line_mean": 33.6265822785,
"line_max": 158,
"alpha_frac": 0.5850849936,
"autogenerated": false,
"ratio": 3.839298245614035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4924383239214035,
"avg_score": null,
"num_lines": null
} |
# Accessing test statistics
#
# Examples of using The Grinder statistics API with standard
# statistics.
from net.grinder.script.Grinder import grinder
from net.grinder.script import Test
from net.grinder.plugin.http import HTTPRequest
class TestRunner:
def __call__(self):
request = HTTPRequest(url = "http://localhost:7001")
Test(1, "Basic request").record(request)
# Example 1. You can get the time of the last test as follows.
result = request.GET("index.html")
grinder.logger.info("The last test took %d milliseconds" %
grinder.statistics.forLastTest.time)
# Example 2. Normally test results are reported automatically
# when the test returns. If you want to alter the statistics
# after a test has completed, you must set delayReports = 1 to
# delay the reporting before performing the test. This only
# affects the current worker thread.
grinder.statistics.delayReports = 1
result = request.GET("index.html")
if grinder.statistics.forLastTest.time > 5:
# We set success = 0 to mark the test as a failure. The test
# time will be reported to the data log, but not included
# in the aggregate statistics sent to the console or the
# summary table.
grinder.statistics.forLastTest.success = 0
# With delayReports = 1 you can call report() to explicitly.
grinder.statistics.report()
# You can also turn the automatic reporting back on.
grinder.statistics.delayReports = 0
# Example 3.
# getForCurrentTest() accesses statistics for the current test.
# getForLastTest() accesses statistics for the last completed test.
def page(self):
resourceRequest =HTTPRequest(url = "http://localhost:7001")
Test(2, "Request resource").record(resourceRequest)
resourceRequest.GET("index.html");
resourceRequest.GET("foo.css");
grinder.logger.info("GET foo.css returned a %d byte body" %
grinder.statistics.forLastTest.getLong(
"httpplugin.responseLength"))
grinder.logger.info("Page has taken %d ms so far" %
grinder.statistics.forCurrentTest.time)
if grinder.statistics.forLastTest.time > 10:
grinder.statistics.forCurrentTest.success = 0
resourceRequest.GET("image.gif");
instrumentedPage = page
Test(3, "Page").record(instrumentedPage)
instrumentedPage(self)
| {
"repo_name": "NineWoranop/loadtesting-kpi",
"path": "loadtesting/ThirdPartyTools/grinder-3.11/examples/statistics.py",
"copies": "1",
"size": "2669",
"license": "apache-2.0",
"hash": -5700283163081334000,
"line_mean": 35.5616438356,
"line_max": 75,
"alpha_frac": 0.6286998876,
"autogenerated": false,
"ratio": 4.4706867671691795,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01703791752590045,
"num_lines": 73
} |
"""Accessing the files."""
import io
import mimetypes
import os
import zipfile
from enum import Enum
from flask import Blueprint, current_app, render_template, send_file, Response
from sacredboard.app.data import NotFoundError, DataStorage
files = Blueprint("files", __name__)
class _FileType(Enum):
ARTIFACT = 1
SOURCE = 2
_filetype_suffices = {
_FileType.ARTIFACT: "artifact",
_FileType.SOURCE: "source",
}
def _get_binary_info(binary: bytes):
hex_data = ""
for i in range(0, 10):
if i > 0:
hex_data += " "
hex_data += hex(binary[i])
hex_data += " ..."
return "Binary data\nLength: {}\nFirst 10 bytes: {}".format(len(binary), hex_data)
def get_file(file_id: str, download):
"""
Get a specific file from GridFS.
Returns a binary stream response or HTTP 404 if not found.
"""
data = current_app.config["data"] # type: DataStorage
dao = data.get_files_dao()
file_fp, filename, upload_date = dao.get(file_id)
if download:
mime = mimetypes.guess_type(filename)[0]
if mime is None:
# unknown type
mime = "binary/octet-stream"
basename = os.path.basename(filename)
return send_file(file_fp, mimetype=mime, attachment_filename=basename, as_attachment=True)
else:
rawdata = file_fp.read()
try:
text = rawdata.decode('utf-8')
except UnicodeDecodeError:
# not decodable as utf-8
text = _get_binary_info(rawdata)
html = render_template("api/file_view.html", content=text)
file_fp.close()
return Response(html)
def get_files_zip(run_id: int, filetype: _FileType):
"""Send all artifacts or sources of a run as ZIP."""
data = current_app.config["data"]
dao_runs = data.get_run_dao()
dao_files = data.get_files_dao()
run = dao_runs.get(run_id)
if filetype == _FileType.ARTIFACT:
target_files = run['artifacts']
elif filetype == _FileType.SOURCE:
target_files = run['experiment']['sources']
else:
raise Exception("Unknown file type: %s" % filetype)
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for f in target_files:
# source and artifact files use a different data structure
file_id = f['file_id'] if 'file_id' in f else f[1]
file, filename, upload_date = dao_files.get(file_id)
data = zipfile.ZipInfo(filename, date_time=upload_date.timetuple())
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(data, file.read())
memory_file.seek(0)
fn_suffix = _filetype_suffices[filetype]
return send_file(memory_file, attachment_filename='run{}_{}.zip'.format(run_id, fn_suffix), as_attachment=True)
@files.route("/api/file/<string:file_id>")
def api_file(file_id):
"""Download a file."""
return get_file(file_id, True)
@files.route("/api/fileview/<string:file_id>")
def api_fileview(file_id):
"""View a file."""
return get_file(file_id, False)
@files.route("/api/artifacts/<int:run_id>")
def api_artifacts(run_id):
"""Download all artifacts of a run as ZIP."""
return get_files_zip(run_id, _FileType.ARTIFACT)
@files.route("/api/sources/<int:run_id>")
def api_sources(run_id):
"""Download all sources of a run as ZIP."""
return get_files_zip(run_id, _FileType.SOURCE)
@files.errorhandler(NotFoundError)
def handle_not_found_error(e):
"""Handle exception when a metric is not found."""
return "Couldn't find resource:\n%s" % e, 404
def initialize(app, app_config):
"""Register the module in Flask."""
app.register_blueprint(files)
| {
"repo_name": "chovanecm/sacredboard",
"path": "sacredboard/app/webapi/files.py",
"copies": "1",
"size": "3711",
"license": "mit",
"hash": 8379502704148305000,
"line_mean": 27.9921875,
"line_max": 115,
"alpha_frac": 0.6308272703,
"autogenerated": false,
"ratio": 3.4682242990654206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45990515693654205,
"avg_score": null,
"num_lines": null
} |
ACCESS_KEY_ID = "####"
SECRET_KEY = "#+##/##"
ASSOC_TAG = "##-##"
import bottlenose
from lxml import objectify
from amazon.api import AmazonAPI
import amazon_scraper
import json
from pprint import pprint
from textblob import TextBlob
from nltk import sent_tokenize
amazon = AmazonAPI(ACCESS_KEY_ID, SECRET_KEY, ASSOC_TAG)
search_item=raw_input()
#search_item='iphone 6'
products = amazon.search_n(5, Keywords=search_item, SearchIndex='All')
#products=products[0:10]
asinlist=[]
title_list=[]
print("Scanning these products...obtaining results")
for product in products:
asinlist.append(product.asin)
print product.title
title_list.append(product.title)
#amazon.call(asinlist[0])
amazon_scraper.ReadAsin(asinlist)
data=open('data.json','r')
reviews = json.loads(data.read())
data.close()
#pprint(reviews)
#a=reviews[1]['reviews'][0]
#print a['review_text']
review_list=[]
a=reviews[0]['reviews'][0]
#print reviews[4]['reviews'][4]['review_text']
count =0
for i in range(5):
for j in range(len(reviews[i]['reviews'])):
#print count,reviews[i]['reviews'][j]['review_text']
review_list.append(reviews[i]['reviews'][j]['review_text'])
count=count+1
print "\n"*3
n=0
review_count=0
total=[]
try:
for k in range(len(title_list)):
print "\n"*3
print title_list[k]
print "\n"*2
for i in range(8):
index=7*k+i
r=review_list[index]
review=sent_tokenize(review_list[index])
pos=[]
count=0
for line in review:
line=TextBlob(line)
count=count+1
pos.append(line.sentiment.polarity)
polar_mean=0
for j in pos:
polar_mean=polar_mean+j
polar_mean=polar_mean/count
print r
print ("Positivity for review"+ "= " + str(polar_mean*100) +"%"+"\n")
total.append(polar_mean*100)
n=n+1
overall_score=0
for i in total:
overall_score=overall_score+i
overall_score=overall_score/(n)
print("Overall user satisfaction for this product: "+ str(overall_score) +"%")
except Exception as e:
print("That's all folks!!!")
| {
"repo_name": "PramodShenoy/AmazoNLP",
"path": "bottle.py",
"copies": "1",
"size": "1978",
"license": "mit",
"hash": -2395205723540919300,
"line_mean": 23.1219512195,
"line_max": 80,
"alpha_frac": 0.6905965622,
"autogenerated": false,
"ratio": 2.758716875871688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8698954786460371,
"avg_score": 0.0500717303222634,
"num_lines": 82
} |
"""Access keys and certificates stored in database"""
from pisces.spkilib import sexp, spki, database
from pisces import pwcrypt
import os
import string
import cPickle
import types
def getPrincipal(obj):
if isinstance(obj, spki.Hash):
return obj
return obj.getPrincipal()
def checkType(pos, obj, *types):
"""Check the type and raise an exception if it doesn't match"""
for type in types:
if isinstance(obj, type):
return 1
try:
name = obj.__class__.__name__
except AttributeError:
name = `obj`
valid = map(lambda x:x.__name__, types)
valid = string.join(valid, ', ')
raise TypeError, "arg %d: expect %s, got %s" % (pos, valid, name)
class KeyStore:
"""High-level interface for managing stored keys and certs"""
def __init__(self, path):
self.path = path
keyPath = os.path.join(path, 'keys')
self.keys = database.PrincipalDatabase(keyPath, create=1)
privPath = os.path.join(path, 'private')
self.private = database.PrivateKeyDatabase(privPath, create=1)
certPath = os.path.join(path, 'certs')
self.certs = database.CertificateDatabase(certPath, create=1)
self.default = None
self.needSave = 0
def close(self):
if self.needSave:
self.save()
def save(self):
self.keys.rewrite()
self.private.rewrite()
self.certs.rewrite()
self.needSave = 0
def setDefaultKey(self, hash):
self.private.setDefault(hash)
self.needSave = 1
def getDefaultKey(self):
return self.private.getDefault()
def addPrivateKey(self, key, pub, pword, bogus=None):
"""Add private key with corresponding public key using password
The arguments are the private key, its corresponding public
key (or hash), and a password to use to encrypt it.
"""
checkType(1, key, spki.PrivateKey)
checkType(2, pub, spki.PublicKey, spki.Hash)
enc = spki.encryptWithPassword(key, pword, bogus=bogus)
pub = getPrincipal(pub)
self.private.add(pub, enc)
self.needSave = 1
def addPublicKey(self, key):
checkType(1, key, spki.PublicKey)
hash = key.getPrincipal()
self.keys.add(key)
self.needSave = 1
def addCert(self, cert):
checkType(1, cert, spki.Sequence, spki.Cert)
self.certs.add(cert)
self.needSave = 1
def addName(self, cert):
checkType(1, cert, spki.Sequence, spki.Cert)
self.certs.add(cert)
self.needSave = 1
def lookupKey(self, hash):
return self.keys.lookup(hash)
def lookupPrivateKey(self, pub):
checkType(1, pub, spki.PublicKey, spki.Hash)
pub = getPrincipal(pub)
return self.private.lookup(pub)
def lookupName(self, name, namespace=None):
"""Return certs for specified name
The name can either be a SPKI name object or a simple string.
If it is a string, the key for the namespace must be passed as
the second argument.
"""
if type(name) == types.StringType:
checkType(2, namespace, spki.PublicKey, spki.Hash)
if spki.isa(namespace, spki.PublicKey):
p = namespace.getPrincipal()
else:
p = namespace
name = spki.Name(p, name)
checkType(1, name, spki.Name)
certs = self.lookupCertByIssuer(name)
names = []
for cert in certs:
if isinstance(cert, spki.Sequence):
for elt in cert:
if isinstance(elt, spki.Cert):
if elt.isNameCert():
names.append(cert)
break
elif cert.isNameCert():
names.append(cert)
return names
def lookupCertBySubject(self, subj):
return self.certs.lookupBySubject(subj)
def lookupCertByIssuer(self, iss):
return self.certs.lookupByIssuer(iss)
def listPublicKeys(self):
return self.keys.getObjects()
def listPrivateKeys(self):
return self.private.listPublicKeys()
def listCerts(self):
return self.certs.getObjects()
class MultiKeyStore:
"""Wrapper around multiple KeyStore objects
Intended to support the use of local and remote KeyStore instances
at the same time. A user might use a local store for private keys
and a second shared store of public keys and certs.
"""
def __init__(self, readers=None, writers=None, both=None,
private=None):
"""Create a new object that fronts several backend KeyStores
Each argument should be a sequence of KeyStore objects.
Keyword argument invocation is recommended.
Arguments:
readers -- KeyStores that should only be used for lookups
writers -- KeyStores that should only be used for adds
both -- KeyStores that should be used for lookups and adds
private -- KeyStores that should store private keys
Private keys are only stored in keystores specified in the
private argument. A private KeyStore is implicitly both.
"""
self.readers = readers and list(readers) or []
self.writers = writers and list(writers) or []
for ks in both or []:
self.readers.append(ks)
self.writers.append(ks)
self.private = private and list(private) or []
for ks in self.private:
self.readers.append(ks)
self.writers.append(ks)
self.saveWriters = []
for ks in self.writers:
if hasattr(ks, 'save'):
self.saveWriters.append(ks)
def addPrivateKey(self, key, pub, pword):
# XXX is this a good invocation mechanism? by deferring
# encryption to the underlying KeyStore, we run the risk of
# passing both key and password around longer than necessary.
for ks in self.private:
ks.addPrivateKey(key, pub, pword)
def setDefaultKey(self, hash):
for ks in self.private:
ks.setDefaultKey(hash)
def getDefaultKey(self):
# XXX does it make sense to have more than one private; if so,
# do we simply assume that the all have the same default key
ks = self.private[0]
return ks.getDefaultKey()
def addPublicKey(self, key):
for ks in self.writers:
ks.addPublicKey(key)
def lookupName(self, name, namespace=None):
names = []
for ks in self.readers:
r = ks.lookupName(name, namespace)
if r:
names = names + r
return names
def lookupCertByIssuer(self, hash):
certs = []
for ks in self.readers:
r = ks.lookupCertByIssuer(hash)
if r:
certs = certs + r
return certs
def listPublicKeys(self):
keys = []
for ks in self.readers:
r = ks.listPublicKeys()
if r:
keys = keys + r
return keys
def listPrivateKeys(self):
keys = []
for ks in self.readers:
r = ks.listPrivateKeys()
if r:
keys = keys + r
return keys
def listCerts(self):
certs = []
for ks in self.readers:
c = ks.listCerts()
if c:
certs = certs + c
return certs
def save(self):
for ks in self.saveWriters:
ks.save()
| {
"repo_name": "danieljohnlewis/pisces",
"path": "pisces/spkilib/keystore.py",
"copies": "1",
"size": "7582",
"license": "mit",
"hash": -8149377914887268000,
"line_mean": 30.4605809129,
"line_max": 71,
"alpha_frac": 0.5911369032,
"autogenerated": false,
"ratio": 4.03083466241361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.512197156561361,
"avg_score": null,
"num_lines": null
} |
"""Access_key types file."""
from pyramid.view import view_config
from pyramid.security import (
Allow,
Deny,
Authenticated,
Everyone,
)
from pyramid.settings import asbool
from .base import (
Item,
DELETED,
ONLY_ADMIN_VIEW,
)
from ..authentication import (
generate_password,
generate_user,
CRYPT_CONTEXT,
)
from snovault import (
collection,
load_schema,
)
from snovault.crud_views import (
collection_add,
item_edit,
)
from snovault.validators import (
validate_item_content_post,
)
from snovault.util import debug_log
@collection(
name='access-keys',
unique_key='access_key:access_key_id',
properties={
'title': 'Access keys',
'description': 'Programmatic access keys',
},
acl=[
(Allow, Authenticated, 'add'),
(Allow, 'group.admin', 'list'),
(Allow, 'group.read-only-admin', 'list'),
(Allow, 'remoteuser.INDEXER', 'list'),
(Allow, 'remoteuser.EMBED', 'list'),
(Deny, Everyone, 'list'),
])
class AccessKey(Item):
"""AccessKey class."""
item_type = 'access_key'
schema = load_schema('encoded:schemas/access_key.json')
name_key = 'access_key_id'
embedded_list = []
STATUS_ACL = {
'current': [(Allow, 'role.owner', ['view', 'edit'])] + ONLY_ADMIN_VIEW,
'deleted': DELETED,
}
def __ac_local_roles__(self):
"""grab and return user as owner."""
owner = 'userid.%s' % self.properties['user']
return {owner: 'role.owner'}
def __json__(self, request):
"""delete the secret access key has from the object when used."""
properties = super(AccessKey, self).__json__(request)
del properties['secret_access_key_hash']
return properties
def update(self, properties, sheets=None):
"""smth."""
# make sure PUTs preserve the secret access key hash
if 'secret_access_key_hash' not in properties:
new_properties = self.properties.copy()
new_properties.update(properties)
properties = new_properties
self._update(properties, sheets)
class Collection(Item.Collection):
pass
# access keys have view permissions for update so readonly admin and the like
# can create access keys to download files.
@view_config(context=AccessKey.Collection, request_method='POST',
permission='add',
validators=[validate_item_content_post])
@debug_log
def access_key_add(context, request):
"""smth."""
crypt_context = request.registry[CRYPT_CONTEXT]
if 'access_key_id' not in request.validated:
request.validated['access_key_id'] = generate_user()
if 'user' not in request.validated:
request.validated['user'], = [
principal.split('.', 1)[1]
for principal in request.effective_principals
if principal.startswith('userid.')
]
password = None
if 'secret_access_key_hash' not in request.validated:
password = generate_password()
request.validated['secret_access_key_hash'] = crypt_context.encrypt(password)
result = collection_add(context, request)
if password is None:
result['secret_access_key'] = None
else:
result['secret_access_key'] = password
result['access_key_id'] = request.validated['access_key_id']
result['description'] = request.validated.get('description', "")
return result
@view_config(name='reset-secret', context=AccessKey,
permission='add',
request_method='POST', subpath_segments=0)
@debug_log
def access_key_reset_secret(context, request):
"""smth."""
request.validated = context.properties.copy()
crypt_context = request.registry[CRYPT_CONTEXT]
password = generate_password()
new_hash = crypt_context.encrypt(password)
request.validated['secret_access_key_hash'] = new_hash
result = item_edit(context, request, render=False)
result['access_key_id'] = request.validated['access_key_id']
result['secret_access_key'] = password
return result
@view_config(context=AccessKey, permission='view_raw', request_method='GET',
name='raw')
@debug_log
def access_key_view_raw(context, request):
"""smth."""
if asbool(request.params.get('upgrade', True)):
properties = context.upgrade_properties()
else:
properties = context.properties.copy()
del properties['secret_access_key_hash']
return properties
| {
"repo_name": "4dn-dcic/fourfront",
"path": "src/encoded/types/access_key.py",
"copies": "2",
"size": "4511",
"license": "mit",
"hash": 4536885216042693600,
"line_mean": 28.8741721854,
"line_max": 85,
"alpha_frac": 0.6360008867,
"autogenerated": false,
"ratio": 3.8654670094258785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5501467896125879,
"avg_score": null,
"num_lines": null
} |
#accessLogProcessing module
import argparse
import pandas as pd
import re
def get_command_line_arguments():
parser = argparse.ArgumentParser(description='Parse Jive\'s Apache Access logs, and print out aggregated data from them.')
parser.add_argument('input_log_files', metavar='files', nargs='+', action = 'store', default = [],
help='List of Apache access log files to parse')
return parser.parse_args()
def read_apache_log_to_dataframe(opened_file_to_read):
#If file is there, returns a dataframe composed of the lines that successfully match the access_log_regex. If file
# does not exist, returns 'FileNotFound'
#Apache Log Format: "%{JiveClientIP}i %l %u %t \"%r\" %>s %b %T %k \"%{Referer}i\" \"%{User-Agent}i\" \"%{Content-Type}o\"
# %{JSESSIONID}C" common
#This regex parses a log line into the above items, with %r being separated into the request method, request URI, and HTTP
# version. The Time to serve request claims to be in milliseconds, but I think for some versions of Jive this is
# not true. Sometimes it will be in whole seconds.
access_log_regex = r'(\S+) (\S+) (\S+) \[(\S+ \S+)\] "(\S+) (\S+) (\S+)" (\S+) (\S+) (\S+) (\S+) (".*?") (".*?") (".*?") (\S+)'
compiled_regex = re.compile(access_log_regex)
file_contents = []
line_dict = {}
for line in f:
regex_match = compiled_regex.search(line)
try:
line_dict['ip_address'] = regex_match.group(1)
line_dict['remote_username'] = regex_match.group(2)
line_dict['remote_user'] = regex_match.group(3)
line_dict['request_received_timestamp'] = regex_match.group(4)
line_dict['http_request_method'] = regex_match.group(5)
line_dict['http_request_uri'] = regex_match.group(6)
line_dict['http_request_version'] = regex_match.group(7)
line_dict['http_status'] = regex_match.group(8)
line_dict['response_size_bytes'] = regex_match.group(9)
line_dict['time_to_serve_request_milliseconds'] = regex_match.group(10)
line_dict['keep_alive_requests'] = regex_match.group(11)
line_dict['referer'] = regex_match.group(12)
line_dict['user_agent'] = regex_match.group(13)
line_dict['content_type'] = regex_match.group(14)
line_dict['jsession_id'] = regex_match.group(15)
file_contents.append(line_dict.copy())
#AttributeError thrown when trying to assign matched groups if a match was not found on that line (i.e. match = None)
except AttributeError:
print('Attribute Error for:')
print(line)
pass
data_frame = pd.DataFrame(file_contents)
return data_frame
def top_10_user_agents(data_frame):
#columns wanted - User Agent, Count of Requests
return data_frame['user_agent'].value_counts().head(10)
def top_10_ip_addresses(data_frame):
#columns - IP Address, Count of Requests
return data_frame['ip_address'].value_counts().head(10)
def top_10_request_URIs(data_frame):
#columns - Request URIs, Count of Requests
return data_frame['http_request_uri'].value_counts().head(10)
def top_10_referers(data_frame):
#columns - Referer URI, Count of Requests
return data_frame['referer'].value_counts().head(10)
def top_10_slowest_requests(data_frame):
#columns - Request response time, request URI, HTTP status of request, response size, response content type, timestamp, IP address
return data_frame[['time_to_serve_request_milliseconds', 'http_request_uri', 'http_status', 'response_size_bytes', 'content_type', 'request_received_timestamp', 'ip_address']].sort(columns='time_to_serve_request_milliseconds', ascending=False).head(10)
def top_10_largest_response_sizes(data_frame):
#columns - Response size, request URI, HTTP status of request, request response time, response content type, timestamp, IP address
return data_frame[['response_size_bytes', 'http_request_uri', 'http_status', 'time_to_serve_request_milliseconds', 'content_type', 'request_received_timestamp', 'ip_address']].sort(columns='response_size_bytes', ascending=False).head(10)
def counts_by_status_code(data_frame):
#columns - status code, total counts
return data_frame['http_status'].value_counts()
def print_aggregates(data_frame):
print('Top 10 User Agents' + '\n')
print(top_10_user_agents(data_frame))
print()
print('Top 10 IP Addresses' + '\n')
print(top_10_ip_addresses(data_frame))
print()
print('Top 10 Request URIs' + '\n')
print(top_10_request_URIs(data_frame))
print()
print('Top 10 Referers' + '\n')
print(top_10_referers(data_frame))
print()
print('Status code counts')
print(counts_by_status_code(data_frame))
print()
print('Top 10 Slowest Requests')
print(top_10_slowest_requests(data_frame))
print()
print('Top 10 Largest Response Sizes')
print(top_10_largest_response_sizes(data_frame))
return
if __name__ == "__main__":
args = get_command_line_arguments()
for file in args.input_log_files:
try:
with open(file) as f:
print('File: ' + file +'\n\n')
data_frame = read_apache_log_to_dataframe(f)
print_aggregates(data_frame)
except FileNotFoundError:
print('File not found: ' + file +'\n')
| {
"repo_name": "danielharada/Apache-Access-Log-Tools",
"path": "apacheAccessLogAggregator.py",
"copies": "1",
"size": "5540",
"license": "mit",
"hash": 658797506835919200,
"line_mean": 40.3432835821,
"line_max": 256,
"alpha_frac": 0.6332129964,
"autogenerated": false,
"ratio": 3.5467349551856593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4679947951585659,
"avg_score": null,
"num_lines": null
} |
"""Access log tailer and associated helpers."""
import logging
import os
import re
import time
class SimpleTailer(object):
"""A simple file tailer supporting basic log rotation detection."""
def __init__(self,
filename,
rotation_check_idle_time_s=30,
rotation_check_period_s=10):
"""Create the tailer.
Args:
filename: filename of the log to read from.
rotation_check_idle_time_s: number of seconds of idle time (no new
log lines) before starting rotation checks (seconds; default: 30)
rotation_check_period_s: period between log rotation checks (seconds;
default: 10)
"""
self._filename = filename
self._flog = None
self._flog_ino = None
self._last_read_time = None
self._last_rotation_check_time = None
self._rotation_check_idle_time_s = rotation_check_idle_time_s
self._rotation_check_period_s = rotation_check_period_s
def _maybe_rotate(self):
"""Periodically check for log rotation by comparing inode numbers."""
# If we've never performed a full rotation check, do so.
now = time.time()
if self._last_rotation_check_time is not None:
# Otherwise, only do so if a full rotation check period has passed.
next_check_time = (
self._last_rotation_check_time + self._rotation_check_period_s)
if now < next_check_time:
return
self._last_rotation_check_time = now
try:
ino = os.stat(self._filename).st_ino
except OSError:
# It's possible that the log writer has not created the new log
# file yet
return
if ino != self._flog_ino:
logging.info('Detected file rotation: reopening %s',
self._filename)
self._flog.close()
self._flog = open(self._filename, 'r')
self._flog_ino = os.fstat(self._flog.fileno()).st_ino
def get_lines(self):
"""Returns the latest lines in the log file (possibly none).
Returns:
List of the latest log lines, potentially empty if nothing has been
written or None if the log file cannot be opened.
"""
if self._flog is None:
try:
self._flog = open(self._filename, 'r')
self._flog_ino = os.fstat(self._flog.fileno()).st_ino
except IOError as err:
logging.warning('Could not open log file: %s', err)
return None
lines = self._flog.readlines()
if lines:
self._last_read_time = time.time()
else:
if self._last_read_time is None:
# We've never read anything: Start rotation checks.
self._maybe_rotate()
else:
# We've read data in the past: Enter rotation checks only after
# the idle timeout.
idle_time = time.time() - self._last_read_time
if idle_time > self._rotation_check_idle_time_s:
self._maybe_rotate()
return lines
class NginxAccessLogTailer(object):
"""Tails the provided access log, passing parsed log lines to the consumer."""
# Partial regex for a full nginx access log line
NGINX_ACCESS_LOG_RE = (
r'(?P<ipaddress>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - '
r'\[(?P<datetime>\d{2}\/[A-Z,a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} '
r'(\+|\-)\d{4})\] ((\"(GET|POST) )(?P<url>.+) (HTTP\/1\.1")) '
r'(?P<statuscode>\d{3}) .*')
def __init__(self, log_file, consumer, rotation_check_idle_time_s,
rotation_check_period_s):
"""Initialize the tailer.
Args:
log_file: path to the nginx access log
consumer: the log consumer object, exporting both a record method,
which takes a parsed access log line, and a commit method, which
writes metrics to stackdriver.
rotation_check_idle_time_s: min log idle time before starting
rotation checks (see SimpleTailer).
rotation_check_period_s: min period between rotation checks (see
SimpleTailer).
"""
self._tailer = SimpleTailer(
log_file,
rotation_check_idle_time_s=rotation_check_idle_time_s,
rotation_check_period_s=rotation_check_period_s)
self._consumer = consumer
self._re_parser = re.compile(self.NGINX_ACCESS_LOG_RE)
def _parse_nginx_access_log(self, log_line):
"""Parse an nginx access log line.
Args:
log_line: log line from the access log
Returns:
dict containing a mapping from matched groups to substrings; only the
datetime and statuscode fields are assumed present.
"""
match = self._re_parser.match(log_line)
if match:
return match.groupdict()
return None
def watch(self, polling_period_s):
"""Watch the configured log file in perpetuity.
Args:
polling_period_s: number of seconds between tail checks (optional)
"""
while True:
t_start = time.time()
lines = self._tailer.get_lines()
if lines is None:
logging.warning('Could not open log file.')
else:
for line in lines:
result = self._parse_nginx_access_log(line)
if result:
self._consumer.record(result)
else:
logging.warning('Could not parse log line: "%s"', line)
self._consumer.commit()
time.sleep(max(0, polling_period_s - time.time() + t_start))
| {
"repo_name": "swfrench/nginx-access-tailer",
"path": "nginx_access_tailer/nginx_access_log_tailer.py",
"copies": "1",
"size": "5862",
"license": "bsd-3-clause",
"hash": -6209718050913097000,
"line_mean": 37.5657894737,
"line_max": 82,
"alpha_frac": 0.5569771409,
"autogenerated": false,
"ratio": 4.105042016806722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5162019157706722,
"avg_score": null,
"num_lines": null
} |
"""Access Muonlab II hardware.
Contents
--------
:class:`MuonlabII`
Access Muonlab II hardware.
"""
from array import array
import random
import time
from pysparc.ftdi_chip import FtdiChip
from pysparc.util import map_setting
DESCRIPTION = "USB <-> Serial"
LIFETIME_SCALE = 6.25
COINCIDENCE_TIMEDELTA_SCALE = 6.25 / 12
class MuonlabII(object):
"""Access Muonlab II hardware.
Instantiate this class to get access to connected Muonlab II hardware.
The hardware device is opened during instantiation.
"""
_device = None
# Yes, really, HV1 and 2 are reversed
_address = {'HV_1': 2,
'HV_2': 1,
'THR_1': 3,
'THR_2': 4,
'MEAS': 5}
def __init__(self):
self._device = FtdiChip(DESCRIPTION)
def __del__(self):
"""Cleanly shut down muonlab hardware."""
if self._device and not self._device.closed:
self.set_pmt1_voltage(0)
self.set_pmt2_voltage(0)
self._device.close()
def _write_setting(self, setting, data):
"""Write setting to device.
:param setting: string specifying the setting to write. Must be
one of HV_1 (for PMT 1 high voltage), HV_2 (for PMT 2 high
voltage), THR_1 (for PMT 1 threshold), THR_2 (for PMT 2
threshold) or MEAS (for type of measurement).
:param data: the raw data byte to write to the device.
For a high voltage setting, the data byte values (in the range of
0x00 - 0xff) are mapped linearly to 300 - 1500 V. For a threshold
setting, the data byte values (0x00 - 0xff) are linearly mapped to
0 mV - 1200 mV.
The type of measurement can be set using only 4 bits. They must
be 0b1111 for a muon lifetime measurement. Any other value will
select a coincidence time difference measurement.
"""
if setting not in self._address:
raise TypeError("Unkown setting: %s" % setting)
else:
address_bits = self._address[setting]
high_byte = (1 << 7) | (address_bits << 4) | ((data & 0xf0) >> 4)
low_byte = (address_bits << 4) | (data & 0x0f)
if setting == 'MEAS':
# Measurement type can be selected using only 1 byte
command = chr(low_byte)
else:
command = array('B', [high_byte, low_byte]).tostring()
self._device.write(command)
def set_pmt1_voltage(self, voltage):
"""Set high voltage for PMT 1.
:param voltage: integer. Values are clipped to a 300 - 1500 V
range.
"""
voltage_byte = map_setting(voltage, 300, 1500, 0x00, 0xff)
self._write_setting('HV_1', voltage_byte)
def set_pmt2_voltage(self, voltage):
"""Set high voltage for PMT 2.
:param voltage: integer. Values are clipped to a 300 - 1500 V
range.
"""
voltage_byte = map_setting(voltage, 300, 1500, 0x00, 0xff)
self._write_setting('HV_2', voltage_byte)
def set_pmt1_threshold(self, threshold):
"""Set threshold for PMT 1.
Events with a signal strength below the specified threshold will
be ignored as noise.
:param threshold: integer. Values are clipped to a 0 - 1200 mV
range.
"""
threshold_byte = map_setting(threshold, 0, 1200, 0x00, 0xff)
self._write_setting('THR_1', threshold_byte)
def set_pmt2_threshold(self, threshold):
"""Set threshold for PMT 2.
Events with a signal strength below the specified threshold will
be ignored as noise.
:param threshold: integer. Values are clipped to a 0 - 1200 mV
range.
"""
threshold_byte = map_setting(threshold, 0, 1200, 0x00, 0xff)
self._write_setting('THR_2', threshold_byte)
def select_lifetime_measurement(self):
"""Select lifetime measurement mode."""
self._write_setting('MEAS', 0xff)
def select_coincidence_measurement(self):
"""Select coincidence time difference measurement mode."""
self._write_setting('MEAS', 0x00)
def flush_device(self):
"""Flush device output buffers.
To completely clear out outdated measurements when changing
parameters, call this method. All data received after this method
was called is really newly measured.
"""
self._device.flush()
def read_lifetime_data(self):
"""Read lifetime data from detector.
Raises ValueError when corrupt data is received.
:returns: list of lifetime measurements
"""
data = self._device.read()
if data:
lifetimes = []
#for word_value in data[::2]:
for i in range(0, len(data), 2):
high_byte, low_byte = ord(data[i]), ord(data[i + 1])
# sanity checks
if not (high_byte & 0x80):
raise ValueError(
"Corrupt lifetime data (high byte bit flag not set)")
if (low_byte & 0x80):
raise ValueError(
"Corrupt lifetime data (low byte bit flag set)")
adc_value = ((high_byte & 0x3f) << 6) | (low_byte & 0x3f)
lifetime = LIFETIME_SCALE * adc_value
lifetimes.append(lifetime)
return lifetimes
else:
return []
def read_coincidence_data(self):
"""Read coincidence data from detector.
Raises ValueError when corrupt data is received.
:returns: list of coincidence time difference measurements
"""
data = self._device.read()
if data:
deltatimes = []
#for word_value in data[::2]:
for i in range(0, len(data), 2):
high_byte, low_byte = ord(data[i]), ord(data[i + 1])
det1_isfirsthit = bool(high_byte & 0x40)
det2_isfirsthit = bool(low_byte & 0x40)
# sanity checks
if not (high_byte & 0x80):
raise ValueError("Corrupt coincidence data "
"(high byte bit flag not set)")
if (low_byte & 0x80):
raise ValueError(
"Corrupt coincidence data (low byte bit flag set)")
if not det1_isfirsthit and not det2_isfirsthit:
raise ValueError(
"Corrupt coincidence data (no hit first flag set)")
adc_value = ((high_byte & 0x3f) << 6) | (low_byte & 0x3f)
deltatime = COINCIDENCE_TIMEDELTA_SCALE * adc_value
if det2_isfirsthit and not det1_isfirsthit:
deltatime *= -1
deltatimes.append((deltatime, det1_isfirsthit,
det2_isfirsthit))
return deltatimes
else:
return []
class FakeMuonlabII(object):
"""Access FAKE Muonlab II hardware.
Instantiate this class to test an application without needing to
connect actual hardware. This class does very little.
"""
def set_pmt1_voltage(self, voltage):
pass
def set_pmt2_voltage(self, voltage):
pass
def set_pmt1_threshold(self, threshold):
pass
def set_pmt2_threshold(self, threshold):
pass
def select_lifetime_measurement(self):
pass
def select_coincidence_measurement(self):
pass
def flush_device(self):
pass
def read_lifetime_data(self):
"""Return FAKE lifetime data matching a 2.2 us lifetime at 2 Hz."""
time.sleep(.5)
return [random.expovariate(1. / 2200)]
def read_coincidence_data(self):
"""Return FAKE coincidence data matching a 10 ns sigma at 2 Hz."""
time.sleep(.5)
return [random.normalvariate(0, 10.)]
| {
"repo_name": "CosmicLaserShow/CosmicLaserShow",
"path": "pysparc/pysparc/muonlab/muonlab_ii.py",
"copies": "2",
"size": "8000",
"license": "mit",
"hash": -5377863381279140000,
"line_mean": 29.0751879699,
"line_max": 77,
"alpha_frac": 0.568125,
"autogenerated": false,
"ratio": 3.992015968063872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5560140968063872,
"avg_score": null,
"num_lines": null
} |
"""Access objects for the file system within a bundle and for filesystem caches
used by the download processes and the library.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
import os
import io
from ambry.orm import File
import zipfile
import urllib
import ambry.util
logger = ambry.util.get_logger(__name__)
#import logging; logger.setLevel(logging.DEBUG)
##makedirs
## Monkey Patch!
## Need to patch zipfile.testzip b/c it doesn't close file descriptors in 2.7.3
## The bug apparently exists in several other versions of python
## http://bugs.python.org/issue16408
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
f.close()
f._fileobj.close() # This shoulnd't be necessary, but it is.
except zipfile.BadZipfile:
return zinfo.filename
zipfile.ZipFile.testzip = testzip
class DownloadFailedError(Exception):
pass
class FileRef(File):
'''Extends the File orm class with awareness of the filsystem'''
def __init__(self, bundle):
self.super_ = super(FileRef, self)
self.super_.__init__()
self.bundle = bundle
@property
def abs_path(self):
return self.bundle.filesystem.path(self.path)
@property
def changed(self):
return os.path.getmtime(self.abs_path) > self.modified
def update(self):
self.modified = os.path.getmtime(self.abs_path)
self.content_hash = Filesystem.file_hash(self.abs_path)
self.bundle.database.session.commit()
class Filesystem(object):
def __init__(self, config):
self.config = config
def get_cache_by_name(self, name):
from dbexceptions import ConfigurationError
from cache import new_cache
config = self.config.filesystem(name)
if not config:
raise ConfigurationError('No filesystem cache by name of {}'.format(name))
return new_cache(config)
@classmethod
def find_f(cls, config, key, value):
'''Find a filesystem entry where the key `key` equals `value`'''
@classmethod
def rm_rf(cls, d):
if not os.path.exists(d):
return
for path in (os.path.join(d,f) for f in os.listdir(d)):
if os.path.isdir(path):
cls.rm_rf(path)
else:
os.unlink(path)
os.rmdir(d)
class BundleFilesystem(Filesystem):
BUILD_DIR = 'build'
META_DIR = 'meta'
def __init__(self, bundle, root_directory = None):
super(BundleFilesystem, self).__init__(bundle.config)
self.bundle = bundle
if root_directory:
self.root_directory = root_directory
else:
self.root_directory = Filesystem.find_root_dir()
if not os.path.exists(self.path(BundleFilesystem.BUILD_DIR)):
os.makedirs(self.path(BundleFilesystem.BUILD_DIR),0755)
@staticmethod
def find_root_dir(testFile='bundle.yaml', start_dir = None):
'''Find the parent directory that contains the bundle.yaml file '''
import sys
if start_dir is not None:
d = start_dir
else:
d = sys.path[0]
while os.path.isdir(d) and d != '/':
test = os.path.normpath(d+'/'+testFile)
if(os.path.isfile(test)):
return d
d = os.path.dirname(d)
return None
@property
def root_dir(self):
'''Returns the root directory of the bundle '''
return self.root_directory
def ref(self,rel_path):
s = self.bundle.database.session
import sqlalchemy.orm.exc
try:
o = s.query(FileRef).filter(FileRef.path==rel_path).one()
o.bundle = self.bundle
return o
except sqlalchemy.orm.exc.NoResultFound as e:
raise e
def path(self, *args):
'''Resolve a path that is relative to the bundle root into an
absoulte path'''
if len(args) == 0:
raise ValueError("must supply at least one argument")
args = (self.root_directory,) +args
try:
p = os.path.normpath(os.path.join(*args))
except AttributeError as e:
raise ValueError("Path arguments aren't valid when generating path:"+ e.message)
dir_ = os.path.dirname(p)
if not os.path.exists(dir_):
try:
os.makedirs(dir_) # MUltiple process may try to make, so it could already exist
except Exception as e: #@UnusedVariable
pass
if not os.path.exists(dir_):
raise Exception("Couldn't create directory "+dir_)
return p
def build_path(self, *args):
if len(args) > 0 and args[0] == self.BUILD_DIR:
raise ValueError("Adding build to existing build path "+os.path.join(*args))
args = (self.bundle.build_dir,) + args
return self.path(*args)
def meta_path(self, *args):
if len(args) > 0 and args[0] == self.META_DIR:
raise ValueError("Adding meta to existing meta path "+os.path.join(*args))
args = (self.META_DIR,) + args
return self.path(*args)
def directory(self, rel_path):
'''Resolve a path that is relative to the bundle root into
an absoulte path'''
abs_path = self.path(rel_path)
if(not os.path.isdir(abs_path) ):
os.makedirs(abs_path)
return abs_path
@staticmethod
def file_hash(path):
'''Compute hash of a file in chunks'''
import hashlib
md5 = hashlib.md5()
with open(path,'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest()
def _get_unzip_file(self, cache, tmpdir, zf, path, name):
'''Look for a member of a zip file in the cache, and if it doesn next exist,
extract and cache it. '''
name = name.replace('..','')
if name[0] == '/':
name = name[1:]
base = os.path.basename(path)
rel_path = (urllib.quote_plus(base.replace('/','_'),'_')+'/'+
urllib.quote_plus(name.replace('/','_'),'_') )
# Check if it is already in the cache
cached_file = cache.get(rel_path)
if cached_file:
return cached_file
# Not in cache, extract it.
tmp_abs_path = os.path.join(tmpdir, name)
if not os.path.exists(tmp_abs_path):
zf.extract(name,tmpdir )
# Store it in the cache.
abs_path = cache.put(tmp_abs_path, rel_path)
# There have been zip files that have been truncated, but I don't know
# why. this is a stab i the dark to catch it.
if self.file_hash(tmp_abs_path) != self.file_hash(abs_path):
raise Exception('Zip file extract error: md5({}) != md5({})'
.format(tmp_abs_path,abs_path ))
return abs_path
def unzip(self,path, regex=None):
'''Context manager to extract a single file from a zip archive, and delete
it when finished'''
import tempfile, uuid
if isinstance(regex, basestring):
import re
regex = re.compile(regex)
cache = self.get_cache_by_name('extracts')
tmpdir = os.path.join(cache.cache_dir,'tmp',str(uuid.uuid4()))
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
try:
with zipfile.ZipFile(path) as zf:
abs_path = None
if regex is None:
name = iter(zf.namelist()).next() # Assume only one file in zip archive.
abs_path = self._get_unzip_file(cache, tmpdir, zf, path, name)
else:
for name in zf.namelist():
if regex.match(name):
abs_path = self._get_unzip_file(cache, tmpdir, zf, path, name)
break
return abs_path
except zipfile.BadZipfile:
self.bundle.error("Error processing supposed zip file: '{}' You may want to delete it and try again. ".format(path))
raise
finally:
self.rm_rf(tmpdir)
return None
def unzip_dir(self,path, regex=None):
'''Generator that yields the files from a zip file.
Yield all the files in the zip, unless a regex is specified, in which case it yields only
files with names that match the pattern.'''
import tempfile, uuid
cache = self.get_cache_by_name('extracts')
tmpdir = os.path.join(cache.cache_dir,'tmp',str(uuid.uuid4()))
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
rtrn = True
try:
with zipfile.ZipFile(path) as zf:
abs_path = None
for name in zf.namelist():
abs_path = self._get_unzip_file(cache, tmpdir, zf, path, name)
if regex and regex.match(name) or not regex:
yield abs_path
except Exception as e:
self.bundle.error("File '{}' can't be unzipped, removing it: {}".format(path, e))
os.remove(path)
raise
finally:
self.rm_rf(tmpdir)
def download(self,url, test_f=None):
'''Context manager to download a file, return it for us,
and delete it when done.
url may also be a key for the build.sources configuration
Will store the downloaded file into the cache defined
by filesystem.download
'''
import tempfile
import urlparse
import urllib2
import stat
cache = self.get_cache_by_name('downloads')
parsed = urlparse.urlparse(url)
if ( not parsed.scheme and
self.bundle.config.build.get('sources') and
url in self.bundle.config.build.sources):
url = self.bundle.config.build.sources.get(url)
parsed = urlparse.urlparse(url)
file_path = parsed.netloc+'/'+urllib.quote_plus(parsed.path.replace('/','_'),'_')
# We download to a temp file, then move it into place when
# done. This allows the code to detect and correct partial
# downloads.
download_path = os.path.join(tempfile.gettempdir(),file_path+".download")
def test_zip_file(f):
if not os.path.exists(f):
raise Exception("Test zip file does not exist: {} ".format(f))
try:
with zipfile.ZipFile(f) as zf:
return zf.testzip() is None
except zipfile.BadZipfile:
return False
if test_f == 'zip':
test_f = test_zip_file
for attempts in range(3):
if attempts > 0:
self.bundle.error("Retrying download of {}".format(url))
cached_file = None
out_file = None
excpt = None
try:
cached_file = cache.get(file_path)
size = os.stat(cached_file).st_size if cached_file else None
if cached_file and size:
out_file = cached_file
if test_f and not test_f(out_file):
cache.remove(file_path, True)
raise DownloadFailedError("Cached Download didn't pass test function "+url)
else:
self.bundle.log("Downloading "+url)
self.bundle.log(" --> "+file_path)
resp = urllib2.urlopen(url)
headers = resp.info() #@UnusedVariable
if resp.getcode() is not None and resp.getcode() != 200:
raise DownloadFailedError("Failed to download {}: code: {} ".format(url, resp.getcode()))
try:
out_file = cache.put(resp, file_path)
except:
self.bundle.error("Caught exception, deleting download file")
cache.remove(file_path, propagate = True)
raise
if test_f and not test_f(out_file):
cache.remove(file_path, propagate = True)
raise DownloadFailedError("Download didn't pass test function "+url)
break
except KeyboardInterrupt:
print "\nRemoving Files! \n Wait for deletion to complete! \n"
cache.remove(file_path, propagate = True)
raise
except DownloadFailedError as e:
self.bundle.error("Failed: "+str(e))
excpt = e
except IOError as e:
self.bundle.error("Failed to download "+url+" to "+file_path+" : "+str(e))
excpt = e
except urllib.ContentTooShortError as e:
self.bundle.error("Content too short for "+url)
excpt = e
except zipfile.BadZipfile as e:
# Code that uses the yield value -- like th filesystem.unzip method
# can throw exceptions that will propagate to here. Unexpected, but very useful.
# We should probably create a FileNotValueError, but I'm lazy.
self.bundle.error("Got an invalid zip file for "+url)
cache.remove(file_path, propagate = True)
excpt = e
except Exception as e:
self.bundle.error("Unexpected download error '"+str(e)+"' when downloading "+url)
cache.remove(file_path, propagate = True)
raise
if download_path and os.path.exists(download_path):
os.remove(download_path)
if excpt:
raise excpt
return out_file
def read_csv(self, f, key = None):
"""Read a CSV into a dictionary of dicts or list of dicts
Args:
f a string or file object ( a FLO with read() )
key columm or columns to use as the key. If None, return a list
"""
import os.path
opened = False
if isinstance(f, basestring):
if not os.path.exists(f):
f = self.path(f) # Assume it is relative to the bundle filesystem
f = open(f,'rb')
opened = True
import csv
reader = csv.DictReader(f)
if key is None:
out = []
else:
if isinstance(key, (list,tuple)):
def make_key(row):
return tuple([ str(row[i].strip()) if row[i].strip() else None for i in key])
else:
def make_key(row):
return row[key]
out = {}
for row in reader:
if key is None:
out.append(row)
else:
out[make_key(row)] = row
if opened:
f.close
return out
def download_shapefile(self, url):
"""Downloads a shapefile, unzips it, and returns the .shp file path"""
import os
import re
zip_file = self.download(url)
if not zip_file or not os.path.exists(zip_file):
raise Exception("Failed to download: {} ".format(url))
for file_ in self.unzip_dir(zip_file,
regex=re.compile('.*\.shp$')): pass # Should only be one
if not file_ or not os.path.exists(file_):
raise Exception("Failed to unzip {} and get .shp file ".format(zip_file))
return file_
def load_yaml(self,*args):
"""Load a yaml file from the bundle file system. Arguments are passed to self.path()
And if the first path element is not absolute, pre-pends the bundle path.
Returns an AttrDict of the results.
This will load yaml files the same way as RunConfig files.
"""
from ambry.util import AttrDict
f = self.path(*args)
ad = AttrDict()
ad.update_yaml(f)
return ad
def read_yaml(self,*args):
"""Straight-to-object reading of a YAML file.
"""
import yaml
from ambry.util import AttrDict
with open(self.path(*args),'rb') as f:
return yaml.load(f)
def write_yaml(self, o, *args):
import yaml
with open(self.path(*args),'wb') as f:
return yaml.safe_dump( o, f,default_flow_style=False, indent=4, encoding='utf-8' )
def get_url(self,source_url, create=False):
'''Return a database record for a file'''
import sqlalchemy.orm.exc
s = self.bundle.database.session
try:
o = (s.query(File).filter(File.source_url==source_url).one())
except sqlalchemy.orm.exc.NoResultFound:
if create:
o = File(source_url=source_url,path=source_url,process='none' )
s.add(o)
s.commit()
else:
return None
o.session = s # Files have SavableMixin
return o
def get_or_new_url(self, source_url):
return self.get_url(source_url, True)
def add_file(self, rel_path):
return self.filerec(rel_path, True)
def filerec(self, rel_path, create=False):
'''Return a database record for a file'''
import sqlalchemy.orm.exc
s = self.bundle.database.session
if not rel_path:
raise ValueError('Must supply rel_path')
try:
o = (s.query(File).filter(File.path==rel_path).one())
o._is_new = False
except sqlalchemy.orm.exc.NoResultFound as e:
if not create:
raise e
a_path = self.filesystem.path(rel_path)
o = File(path=rel_path,
content_hash=Filesystem.file_hash(a_path),
modified=os.path.getmtime(a_path),
process='none'
)
s.add(o)
s.commit()
o._is_new = True
except Exception as e:
return None
return o
# Stolen from : https://bitbucket.org/fabian/filechunkio/src/79ba1388ee96/LICENCE?at=default
SEEK_SET = getattr(io, 'SEEK_SET', 0)
SEEK_CUR = getattr(io, 'SEEK_CUR', 1)
SEEK_END = getattr(io, 'SEEK_END', 2)
# A File like object that operated on a subset of another file. For use in Boto
# multipart uploads.
class FileChunkIO(io.FileIO):
"""
A class that allows you reading only a chunk of a file.
"""
def __init__(self, name, mode='r', closefd=True, offset=0, bytes_=None,
*args, **kwargs):
"""
Open a file chunk. The mode can only be 'r' for reading. Offset
is the amount of bytes_ that the chunks starts after the real file's
first byte. Bytes defines the amount of bytes_ the chunk has, which you
can set to None to include the last byte of the real file.
"""
if not mode.startswith('r'):
raise ValueError("Mode string must begin with 'r'")
self.offset = offset
self.bytes = bytes_
if bytes_ is None:
self.bytes = os.stat(name).st_size - self.offset
super(FileChunkIO, self).__init__(name, mode, closefd, *args, **kwargs)
self.seek(0)
def seek(self, offset, whence=SEEK_SET):
"""
Move to a new chunk position.
"""
if whence == SEEK_SET:
super(FileChunkIO, self).seek(self.offset + offset)
elif whence == SEEK_CUR:
self.seek(self.tell() + offset)
elif whence == SEEK_END:
self.seek(self.bytes + offset)
def tell(self):
"""
Current file position.
"""
return super(FileChunkIO, self).tell() - self.offset
def read(self, n=-1):
"""
Read and return at most n bytes.
"""
if n >= 0:
max_n = self.bytes - self.tell()
n = min([n, max_n])
return super(FileChunkIO, self).read(n)
else:
return self.readall()
def readall(self):
"""
Read all data from the chunk.
"""
return self.read(self.bytes - self.tell())
def readinto(self, b):
"""
Same as RawIOBase.readinto().
"""
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
| {
"repo_name": "kball/ambry",
"path": "ambry/filesystem.py",
"copies": "1",
"size": "22133",
"license": "bsd-2-clause",
"hash": -177954941764191780,
"line_mean": 30.7546628407,
"line_max": 128,
"alpha_frac": 0.523471739,
"autogenerated": false,
"ratio": 4.286848731357738,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5310320470357738,
"avg_score": null,
"num_lines": null
} |
""" Accessor for creating progress records, which show the history of the building of
a bundle.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
import os
import platform
from ambry.orm import Process
from six import string_types
class ProgressLoggingError(Exception):
pass
class ProgressSection(object):
"""A handler of the records for a single routine or phase"""
def __init__(self, parent, session, phase, stage, logger, **kwargs):
self._parent = parent
self._pid = os.getpid()
self._hostname = platform.node()
self._session = session
self._logger = logger
self._phase = phase
self._stage = stage
self.rec = None
self._ai_rec_id = None # record for add_update
self._group = None
self._start = None
self._group = self.add(log_action='start', state='running', **kwargs)
assert self._session
def __enter__(self):
assert self._session
return self
def __exit__(self, exc_type, exc_val, exc_tb):
from ambry.util import qualified_name
import traceback
assert self._session
if exc_val:
# Don't want to trigger another exception
if hasattr(exc_val, 'details') and callable(exc_val.details):
message = exc_val.details()
else:
message = str(exc_val.message.replace('{',''))
self.add(
message = message,
exception_class = qualified_name(exc_type),
exception_trace = str(traceback.format_exc(exc_tb)),
)
self.done("Failed in context with exception")
return False
else:
self.done("Successful context exit")
return True
@property
def start(self):
from sqlalchemy.orm.exc import NoResultFound
if not self._start:
try:
self._start = self._session.query(Process).filter(Process.id == self._group).one()
except NoResultFound:
self._start = None
return self._start
def augment_args(self, args, kwargs):
kwargs['pid'] = self._pid
kwargs['d_vid'] = self._parent._d_vid
kwargs['hostname'] = self._hostname
kwargs['phase'] = self._phase
kwargs['stage'] = self._stage
kwargs['group'] = self._group
if self._session is None:
raise ProgressLoggingError("Progress logging section is already closed")
for arg in args:
if isinstance(arg, string_types):
kwargs['message'] = arg
# The sqlite driver has a seperate database, it can't deal with the objects
# from a different database, so we convert them to vids. For postgres,
# the pojects are in the same database, but they would have been attached to another
# session, so we'd have to either detach them, or do the following anyway
for table, vid in (('source', 's_vid'), ('table', 't_vid'), ('partition', 'p_vid')):
if table in kwargs:
kwargs[vid] = kwargs[table].vid
del kwargs[table]
def add(self, *args, **kwargs):
"""Add a new record to the section"""
if self.start and self.start.state == 'done' and kwargs.get('log_action') != 'done':
raise ProgressLoggingError("Can't add -- process section is done")
self.augment_args(args, kwargs)
kwargs['log_action'] = kwargs.get('log_action', 'add')
rec = Process(**kwargs)
self._session.add(rec)
self.rec = rec
if self._logger:
self._logger.info(self.rec.log_str)
self._session.commit()
self._ai_rec_id = None
return self.rec.id
def update(self, *args, **kwargs):
"""Update the last section record"""
self.augment_args(args, kwargs)
kwargs['log_action'] = kwargs.get('log_action', 'update')
if not self.rec:
return self.add(**kwargs)
else:
for k, v in kwargs.items():
# Don't update object; use whatever was set in the original record
if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'):
setattr(self.rec, k, v)
self._session.merge(self.rec)
if self._logger:
self._logger.info(self.rec.log_str)
self._session.commit()
self._ai_rec_id = None
return self.rec.id
def add_update(self, *args, **kwargs):
"""A records is added, then on subsequent calls, updated"""
if not self._ai_rec_id:
self._ai_rec_id = self.add(*args, **kwargs)
else:
au_save = self._ai_rec_id
self.update(*args, **kwargs)
self._ai_rec_id = au_save
return self._ai_rec_id
def update_done(self, *args, **kwargs):
"""Clear out the previous update"""
kwargs['state'] = 'done'
self.update(*args, **kwargs)
self.rec = None
def done(self, *args, **kwargs):
"""Mark the whole ProgressSection as done"""
kwargs['state'] = 'done'
pr_id = self.add(*args, log_action='done', **kwargs)
self._session.query(Process).filter(Process.group == self._group).update({Process.state: 'done'})
self.start.state = 'done'
self._session.commit()
return pr_id
def get(self, id_):
return self._session.query(Process).get(id_)
class ProcessLogger(object):
"""Database connection and access object for recording build progress and build state"""
def __init__(self, dataset, logger=None, new_connection=True, new_sqlite_db=True):
import os.path
self._vid = dataset.vid
self._d_vid = dataset.vid
self._logger = logger
self._buildstate = None
self._new_connection = new_connection
db = dataset._database
schema = db._schema
if db.driver == 'sqlite' and new_sqlite_db:
# Create an entirely new database. Sqlite does not like concurrent access,
# even from multiple connections in the same process.
from ambry.orm import Database
if db.dsn == 'sqlite://':
# in memory database
dsn = 'sqlite://'
else:
# create progress db near library db.
parts = os.path.split(db.dsn)
dsn = '/'.join(parts[:-1] + ('progress.db',))
self._db = Database(dsn, foreign_keys=False)
self._db.create() # falls through if already exists
self._engine = self._db.engine
self._connection = self._db.connection
self._session = self._db.session
self._session.merge(dataset)
self._session.commit()
elif new_connection: # For postgres, by default, create a new db connection
# Make a new connection to the existing database
self._db = db
self._connection = self._db.engine.connect()
self._session = self._db.Session(bind=self._connection, expire_on_commit=False)
else: # When not building, ok to use existing connection
self._db = db
self._connection = db.connection
self._session = db.session
if schema:
self._session.execute('SET search_path TO {}'.format(schema))
def __del__(self):
if self._db.driver == 'sqlite':
self._db.close()
else:
self.close()
def close(self):
if self._connection and self._new_connection:
self._connection.close()
@property
def dataset(self):
from ambry.orm import Dataset
return self._session.query(Dataset).filter(Dataset.vid == self._d_vid).one()
def start(self, phase, stage, **kwargs):
"""Start a new routine, stage or phase"""
return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs)
@property
def records(self):
"""Return all start records for this the dataset, grouped by the start record"""
return (self._session.query(Process)
.filter(Process.d_vid == self._d_vid)).all()
@property
def starts(self):
"""Return all start records for this the dataset, grouped by the start record"""
return (self._session.query(Process)
.filter(Process.d_vid == self._d_vid)
.filter(Process.log_action == 'start')
).all()
@property
def query(self):
"""Return all start records for this the dataset, grouped by the start record"""
return self._session.query(Process).filter(Process.d_vid == self._d_vid)
@property
def exceptions(self):
"""Return all start records for this the dataset, grouped by the start record"""
return (self._session.query(Process)
.filter(Process.d_vid == self._d_vid)
.filter(Process.exception_class != None)
.order_by(Process.modified)).all()
def clean(self):
"""Delete all of the records"""
# Deleting seems to be really weird and unrelable.
self._session \
.query(Process) \
.filter(Process.d_vid == self._d_vid) \
.delete(synchronize_session='fetch')
for r in self.records:
self._session.delete(r)
self._session.commit()
def delete(self):
"""Delete the sqlite database file, if it exists"""
if self._db.dsn.startswith("sqlite"):
self._db.delete()
def commit(self):
assert self._new_connection
self._session.commit()
@property
def build(self):
"""Access build configuration values as attributes. See self.process
for a usage example"""
from ambry.orm.config import BuildConfigGroupAccessor
# It is a lightweight object, so no need to cache
return BuildConfigGroupAccessor(self.dataset, 'buildstate', self._session)
def bundle_process_logs(self, show_all=None):
import time
from collections import OrderedDict
from sqlalchemy.sql import and_
from ambry.util import drop_empty
records = []
def append(pr, edit=None):
if not isinstance(pr, dict):
pr = pr.dict
d = OrderedDict((k, str(v).strip()[:60]) for k, v in pr.items() if k in
['id', 'group', 'state', 'd_vid', 's_vid', 'hostname', 'pid',
'phase', 'stage', 'modified', 'item_count',
'message'])
d['modified'] = round(float(d['modified']) - time.time(), 1)
if edit:
for k, v in edit.items():
d[k] = v(d[k])
if not records:
records.append(d.keys())
records.append(d.values())
q = self.query.order_by(Process.modified.desc())
for pr in q.all():
# Don't show reports that are done or older than 2 minutes.
if show_all or (pr.state != 'done' and pr.modified > time.time() - 120):
append(pr)
# Add old running rows, which may indicate a dead process.
q = (self.query.filter(Process.s_vid != None)
.filter(and_(Process.state == 'running', Process.modified < time.time() - 60))
.filter(Process.group != None))
for pr in q.all():
append(pr, edit={'modified': lambda e: (str(e) + ' (dead?)')})
records = drop_empty(records)
return records
def stats(self):
from collections import defaultdict
from itertools import groupby
from ambry.orm import Partition
ds = self.dataset
key_f = key = lambda e: e.state
states = set()
d = defaultdict(lambda: defaultdict(int))
for state, sources in groupby(sorted(ds.sources, key=key_f), key_f):
d['Sources'][state] = sum(1 for _ in sources) or None
states.add(state)
key_f = key = lambda e: (e.state, e.type)
for (state, type), partitions in groupby(sorted(ds.partitions, key=key_f), key_f):
states.add(state)
if type == Partition.TYPE.UNION:
d['Partitions'][state] = sum(1 for _ in partitions) or None
else:
d['Segments'][state] = sum(1 for _ in partitions) or None
headers = sorted(states)
rows = []
for r in ('Sources', 'Partitions', 'Segments'):
row = [r]
for state in headers:
row.append(d[r].get(state, ''))
rows.append(row)
return headers, rows
class CallInterval(object):
"""Call the inner callback at a limited frequency"""
def __init__(self, f, freq, **kwargs):
import time
self._f = f
self._freq = freq
self._next = time.time() + self._freq
self._kwargs = kwargs
def __call__(self, *args, **kwargs):
import time
if time.time() > self._next:
kwargs.update(self._kwargs)
self._f(*args, **kwargs)
self._next = time.time() + self._freq
def call_interval(freq, **kwargs):
"""Decorator for the CallInterval wrapper"""
def wrapper(f):
return CallInterval(f, freq, **kwargs)
return wrapper
| {
"repo_name": "CivicKnowledge/ambry",
"path": "ambry/bundle/process.py",
"copies": "1",
"size": "13647",
"license": "bsd-2-clause",
"hash": 2063807387936982800,
"line_mean": 30.3004587156,
"line_max": 105,
"alpha_frac": 0.5602696563,
"autogenerated": false,
"ratio": 4.192626728110599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252896384410599,
"avg_score": null,
"num_lines": null
} |
# Accessor functions for control properties
from Controls import *
import struct
# These needn't go through this module, but are here for completeness
def SetControlData_Handle(control, part, selector, data):
control.SetControlData_Handle(part, selector, data)
def GetControlData_Handle(control, part, selector):
return control.GetControlData_Handle(part, selector)
_accessdict = {
kControlPopupButtonMenuHandleTag: (SetControlData_Handle, GetControlData_Handle),
}
_codingdict = {
kControlPushButtonDefaultTag : ("b", None, None),
kControlEditTextTextTag: (None, None, None),
kControlEditTextPasswordTag: (None, None, None),
kControlPopupButtonMenuIDTag: ("h", None, None),
kControlListBoxDoubleClickTag: ("b", None, None),
}
def SetControlData(control, part, selector, data):
if _accessdict.has_key(selector):
setfunc, getfunc = _accessdict[selector]
setfunc(control, part, selector, data)
return
if not _codingdict.has_key(selector):
raise KeyError, ('Unknown control selector', selector)
structfmt, coder, decoder = _codingdict[selector]
if coder:
data = coder(data)
if structfmt:
data = struct.pack(structfmt, data)
control.SetControlData(part, selector, data)
def GetControlData(control, part, selector):
if _accessdict.has_key(selector):
setfunc, getfunc = _accessdict[selector]
return getfunc(control, part, selector, data)
if not _codingdict.has_key(selector):
raise KeyError, ('Unknown control selector', selector)
structfmt, coder, decoder = _codingdict[selector]
data = control.GetControlData(part, selector)
if structfmt:
data = struct.unpack(structfmt, data)
if decoder:
data = decoder(data)
if type(data) == type(()) and len(data) == 1:
data = data[0]
return data
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.6/Lib/plat-mac/Carbon/ControlAccessor.py",
"copies": "81",
"size": "1873",
"license": "mit",
"hash": 1165284151252792800,
"line_mean": 32.4464285714,
"line_max": 85,
"alpha_frac": 0.6951414842,
"autogenerated": false,
"ratio": 3.7162698412698414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Accessors for Amber TI datasets.
"""
from os.path import dirname, join
from glob import glob
from .. import Bunch
def load_bace_improper():
"""Load Amber Bace improper solvated vdw example
Returns
-------
data: Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files for improper solvated vdw alchemical leg
"""
module_path = dirname(__file__)
data = {'vdw': glob(join(module_path, 'bace_improper/solvated/vdw/*/ti-*.out.bz2'))}
with open(join(module_path, 'bace_improper', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_bace_example():
"""Load Amber Bace example perturbation.
Returns
-------
data: Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by system and alchemical leg
"""
module_path = dirname(__file__)
data = {'complex':
{'decharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/complex/decharge/*/ti-*.out.bz2')),
'recharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/complex/recharge/*/ti-*.out.bz2')),
'vdw': glob(join(module_path, 'bace_CAT-13d~CAT-17a/complex/vdw/*/ti-*.out.bz2'))
},
'solvated':
{'decharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/solvated/decharge/*/ti-*.out.bz2')),
'recharge': glob(join(module_path, 'bace_CAT-13d~CAT-17a/solvated/recharge/*/ti-*.out.bz2')),
'vdw': glob(join(module_path, 'bace_CAT-13d~CAT-17a/solvated/vdw/*/ti-*.out.bz2'))
}
}
with open(join(module_path, 'bace_CAT-13d~CAT-17a', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_simplesolvated():
"""Load the Amber solvated dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'charge': glob(join(module_path, 'simplesolvated/charge/*/ti-*.out')),
'vdw': glob(join(module_path, 'simplesolvated/vdw/*/ti-*.out'))}
with open(join(module_path, 'simplesolvated', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_invalidfiles():
"""Load the invalid files.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the example of invalid data files
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = [glob(join(module_path, 'invalidfiles/*.out.bz2'))]
with open(join(module_path, 'invalidfiles', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
| {
"repo_name": "alchemistry/alchemtest",
"path": "src/alchemtest/amber/access.py",
"copies": "1",
"size": "3102",
"license": "bsd-3-clause",
"hash": 4152233049004272600,
"line_mean": 27.7222222222,
"line_max": 110,
"alpha_frac": 0.5876853643,
"autogenerated": false,
"ratio": 3.3680781758957656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44557635401957657,
"avg_score": null,
"num_lines": null
} |
"""Accessors for Gromacs datasets.
"""
from os.path import dirname, join
from glob import glob
from .. import Bunch
def load_benzene():
"""Load the Gromacs benzene dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'Coulomb': sorted(glob(join(module_path, 'benzene', 'Coulomb', '*', 'dhdl.xvg.bz2'))),
'VDW': sorted(glob(join(module_path, 'benzene', 'VDW', '*', 'dhdl.xvg.bz2')))}
with open(join(module_path, 'benzene', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_ABFE():
"""Load the Gromacs ABFE dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'complex': sorted(glob(join(module_path, 'ABFE', 'complex', 'dhdl_*.xvg'))),
'ligand': sorted(glob(join(module_path, 'ABFE', 'ligand', 'dhdl_*.xvg')))}
with open(join(module_path, 'ABFE', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_expanded_ensemble_case_1():
"""Load the Gromacs Host CB7 Guest C3 expanded ensemble dataset, case 1 (single simulation visits all states).
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'AllStates': glob(join(module_path, 'expanded_ensemble', 'case_1', 'CB7_Guest3_dhdl.xvg.gz'))}
with open(join(module_path, 'expanded_ensemble', 'case_1', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_expanded_ensemble_case_2():
"""Load the Gromacs Host CB7 Guest C3 expanded ensemble dataset, case 2 (two simulations visit all states independently).
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'AllStates': sorted(glob(join(module_path, 'expanded_ensemble', 'case_2', '*.xvg.gz')))}
with open(join(module_path, 'expanded_ensemble', 'case_2', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_expanded_ensemble_case_3():
"""Load the Gromacs Host CB7 Guest C3 REX dataset, case 3.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'AllStates': sorted(glob(join(module_path, 'expanded_ensemble', 'case_3', '*.xvg.gz')))}
with open(join(module_path, 'expanded_ensemble', 'case_3', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_water_particle_without_energy():
"""Load the Gromacs water particle without energy dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'AllStates': sorted(glob(join(module_path, 'water_particle', 'without_energy', '*.xvg.bz2')))}
with open(join(module_path, 'water_particle', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_water_particle_with_potential_energy():
"""Load the Gromacs water particle with potential energy dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'AllStates': sorted(glob(join(module_path, 'water_particle', 'with_potential_energy', '*.xvg.bz2')))}
with open(join(module_path, 'water_particle', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_water_particle_with_total_energy():
"""Load the Gromacs water particle with total energy dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'AllStates': sorted(glob(join(module_path, 'water_particle', 'with_total_energy', '*.xvg.bz2')))}
with open(join(module_path, 'water_particle', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
| {
"repo_name": "alchemistry/alchemtest",
"path": "src/alchemtest/gmx/access.py",
"copies": "1",
"size": "5479",
"license": "bsd-3-clause",
"hash": -8964200016877289000,
"line_mean": 26.9540816327,
"line_max": 125,
"alpha_frac": 0.6114254426,
"autogenerated": false,
"ratio": 3.6141160949868074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9715171980184971,
"avg_score": 0.0020739114803671093,
"num_lines": 196
} |
"""Accessors for NAMD FEP datasets.
"""
from os.path import dirname, join
from glob import glob
from .. import Bunch
def load_tyr2ala():
"""Load the NAMD tyrosine to alanine mutation dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'forward': glob(join(module_path, 'tyr2ala/in-aqua/forward/*.fepout.bz2')),
'backward': glob(join(module_path, 'tyr2ala/in-aqua/backward/*.fepout.bz2'))}
with open(join(module_path, 'tyr2ala', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
def load_idws():
"""Load the NAMD IDWS dataset.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
- 'data' : the data files by alchemical leg
- 'DESCR': the full description of the dataset
"""
module_path = dirname(__file__)
data = {'forward': [join(module_path, 'idws', 'quickidws.fepout.bz2')]}
with open(join(module_path, 'idws', 'descr.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
DESCR=fdescr)
| {
"repo_name": "alchemistry/alchemtest",
"path": "src/alchemtest/namd/access.py",
"copies": "1",
"size": "1352",
"license": "bsd-3-clause",
"hash": -4586040539015673000,
"line_mean": 24.5094339623,
"line_max": 89,
"alpha_frac": 0.6072485207,
"autogenerated": false,
"ratio": 3.440203562340967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9535065453146013,
"avg_score": 0.002477325978990796,
"num_lines": 53
} |
"""Access point for the HomematicIP Cloud component."""
import asyncio
import logging
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
COMPONENTS, HMIPC_AUTHTOKEN, HMIPC_HAPID, HMIPC_NAME, HMIPC_PIN)
from .errors import HmipcConnectionError
_LOGGER = logging.getLogger(__name__)
class HomematicipAuth:
"""Manages HomematicIP client registration."""
def __init__(self, hass, config):
"""Initialize HomematicIP Cloud client registration."""
self.hass = hass
self.config = config
self.auth = None
async def async_setup(self):
"""Connect to HomematicIP for registration."""
try:
self.auth = await self.get_auth(
self.hass,
self.config.get(HMIPC_HAPID),
self.config.get(HMIPC_PIN)
)
return True
except HmipcConnectionError:
return False
async def async_checkbutton(self):
"""Check blue butten has been pressed."""
from homematicip.base.base_connection import HmipConnectionError
try:
await self.auth.isRequestAcknowledged()
return True
except HmipConnectionError:
return False
async def async_register(self):
"""Register client at HomematicIP."""
from homematicip.base.base_connection import HmipConnectionError
try:
authtoken = await self.auth.requestAuthToken()
await self.auth.confirmAuthToken(authtoken)
return authtoken
except HmipConnectionError:
return False
async def get_auth(self, hass, hapid, pin):
"""Create a HomematicIP access point object."""
from homematicip.aio.auth import AsyncAuth
from homematicip.base.base_connection import HmipConnectionError
auth = AsyncAuth(hass.loop, async_get_clientsession(hass))
print(auth)
try:
await auth.init(hapid)
if pin:
auth.pin = pin
await auth.connectionRequest('HomeAssistant')
except HmipConnectionError:
return False
return auth
class HomematicipHAP:
"""Manages HomematicIP HTTP and WebSocket connection."""
def __init__(self, hass, config_entry):
"""Initialize HomematicIP Cloud connection."""
self.hass = hass
self.config_entry = config_entry
self.home = None
self._ws_close_requested = False
self._retry_task = None
self._tries = 0
self._accesspoint_connected = True
self._retry_setup = None
async def async_setup(self, tries=0):
"""Initialize connection."""
try:
self.home = await self.get_hap(
self.hass,
self.config_entry.data.get(HMIPC_HAPID),
self.config_entry.data.get(HMIPC_AUTHTOKEN),
self.config_entry.data.get(HMIPC_NAME)
)
except HmipcConnectionError:
retry_delay = 2 ** min(tries + 1, 6)
_LOGGER.error("Error connecting to HomematicIP with HAP %s. "
"Retrying in %d seconds",
self.config_entry.data.get(HMIPC_HAPID), retry_delay)
async def retry_setup(_now):
"""Retry setup."""
if await self.async_setup(tries + 1):
self.config_entry.state = config_entries.ENTRY_STATE_LOADED
self._retry_setup = self.hass.helpers.event.async_call_later(
retry_delay, retry_setup)
return False
_LOGGER.info("Connected to HomematicIP with HAP %s",
self.config_entry.data.get(HMIPC_HAPID))
for component in COMPONENTS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, component)
)
return True
@callback
def async_update(self, *args, **kwargs):
"""Async update the home device.
Triggered when the HMIP HOME_CHANGED event has fired.
There are several occasions for this event to happen.
We are only interested to check whether the access point
is still connected. If not, device state changes cannot
be forwarded to hass. So if access point is disconnected all devices
are set to unavailable.
"""
if not self.home.connected:
_LOGGER.error(
"HMIP access point has lost connection with the cloud")
self._accesspoint_connected = False
self.set_all_to_unavailable()
elif not self._accesspoint_connected:
# Explicitly getting an update as device states might have
# changed during access point disconnect."""
job = self.hass.async_create_task(self.get_state())
job.add_done_callback(self.get_state_finished)
async def get_state(self):
"""Update HMIP state and tell Home Assistant."""
await self.home.get_current_state()
self.update_all()
def get_state_finished(self, future):
"""Execute when get_state coroutine has finished."""
from homematicip.base.base_connection import HmipConnectionError
try:
future.result()
except HmipConnectionError:
# Somehow connection could not recover. Will disconnect and
# so reconnect loop is taking over.
_LOGGER.error(
"Updating state after HMIP access point reconnect failed")
self.hass.async_create_task(self.home.disable_events())
def set_all_to_unavailable(self):
"""Set all devices to unavailable and tell Home Assistant."""
for device in self.home.devices:
device.unreach = True
self.update_all()
def update_all(self):
"""Signal all devices to update their state."""
for device in self.home.devices:
device.fire_update_event()
async def _handle_connection(self):
"""Handle websocket connection."""
from homematicip.base.base_connection import HmipConnectionError
try:
await self.home.get_current_state()
except HmipConnectionError:
return
hmip_events = await self.home.enable_events()
try:
await hmip_events
except HmipConnectionError:
return
async def async_connect(self):
"""Start WebSocket connection."""
from homematicip.base.base_connection import HmipConnectionError
tries = 0
while True:
try:
await self.home.get_current_state()
hmip_events = await self.home.enable_events()
tries = 0
await hmip_events
except HmipConnectionError:
pass
if self._ws_close_requested:
break
self._ws_close_requested = False
tries += 1
retry_delay = 2 ** min(tries + 1, 6)
_LOGGER.error("Error connecting to HomematicIP with HAP %s. "
"Retrying in %d seconds",
self.config_entry.data.get(HMIPC_HAPID), retry_delay)
try:
self._retry_task = self.hass.async_create_task(asyncio.sleep(
retry_delay, loop=self.hass.loop))
await self._retry_task
except asyncio.CancelledError:
break
async def async_reset(self):
"""Close the websocket connection."""
self._ws_close_requested = True
if self._retry_setup is not None:
self._retry_setup.cancel()
if self._retry_task is not None:
self._retry_task.cancel()
self.home.disable_events()
_LOGGER.info("Closed connection to HomematicIP cloud server")
for component in COMPONENTS:
await self.hass.config_entries.async_forward_entry_unload(
self.config_entry, component)
return True
async def get_hap(self, hass, hapid, authtoken, name):
"""Create a HomematicIP access point object."""
from homematicip.aio.home import AsyncHome
from homematicip.base.base_connection import HmipConnectionError
home = AsyncHome(hass.loop, async_get_clientsession(hass))
home.name = name
home.label = 'Access Point'
home.modelType = 'HmIP-HAP'
home.set_auth_token(authtoken)
try:
await home.init(hapid)
await home.get_current_state()
except HmipConnectionError:
raise HmipcConnectionError
home.on_update(self.async_update)
hass.loop.create_task(self.async_connect())
return home
| {
"repo_name": "aronsky/home-assistant",
"path": "homeassistant/components/homematicip_cloud/hap.py",
"copies": "2",
"size": "8951",
"license": "apache-2.0",
"hash": -776444036433894000,
"line_mean": 34.1019607843,
"line_max": 79,
"alpha_frac": 0.597028265,
"autogenerated": false,
"ratio": 4.420246913580247,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6017275178580247,
"avg_score": null,
"num_lines": null
} |
"""Access point for the HomematicIP Cloud component."""
import asyncio
import logging
from homematicip.aio.auth import AsyncAuth
from homematicip.aio.home import AsyncHome
from homematicip.base.base_connection import HmipConnectionError
from homematicip.base.enums import EventType
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import HomeAssistantType
from .const import COMPONENTS, HMIPC_AUTHTOKEN, HMIPC_HAPID, HMIPC_NAME, HMIPC_PIN
from .errors import HmipcConnectionError
_LOGGER = logging.getLogger(__name__)
class HomematicipAuth:
"""Manages HomematicIP client registration."""
def __init__(self, hass, config) -> None:
"""Initialize HomematicIP Cloud client registration."""
self.hass = hass
self.config = config
self.auth = None
async def async_setup(self) -> bool:
"""Connect to HomematicIP for registration."""
try:
self.auth = await self.get_auth(
self.hass, self.config.get(HMIPC_HAPID), self.config.get(HMIPC_PIN)
)
return True
except HmipcConnectionError:
return False
async def async_checkbutton(self) -> bool:
"""Check blue butten has been pressed."""
try:
return await self.auth.isRequestAcknowledged()
except HmipConnectionError:
return False
async def async_register(self):
"""Register client at HomematicIP."""
try:
authtoken = await self.auth.requestAuthToken()
await self.auth.confirmAuthToken(authtoken)
return authtoken
except HmipConnectionError:
return False
async def get_auth(self, hass: HomeAssistantType, hapid, pin):
"""Create a HomematicIP access point object."""
auth = AsyncAuth(hass.loop, async_get_clientsession(hass))
try:
await auth.init(hapid)
if pin:
auth.pin = pin
await auth.connectionRequest("HomeAssistant")
except HmipConnectionError:
return False
return auth
class HomematicipHAP:
"""Manages HomematicIP HTTP and WebSocket connection."""
def __init__(self, hass: HomeAssistantType, config_entry: ConfigEntry) -> None:
"""Initialize HomematicIP Cloud connection."""
self.hass = hass
self.config_entry = config_entry
self.home = None
self._ws_close_requested = False
self._retry_task = None
self._tries = 0
self._accesspoint_connected = True
self.hmip_device_by_entity_id = {}
self.reset_connection_listener = None
async def async_setup(self, tries: int = 0) -> bool:
"""Initialize connection."""
try:
self.home = await self.get_hap(
self.hass,
self.config_entry.data.get(HMIPC_HAPID),
self.config_entry.data.get(HMIPC_AUTHTOKEN),
self.config_entry.data.get(HMIPC_NAME),
)
except HmipcConnectionError:
raise ConfigEntryNotReady
_LOGGER.info(
"Connected to HomematicIP with HAP %s", self.config_entry.unique_id
)
for component in COMPONENTS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, component
)
)
return True
@callback
def async_update(self, *args, **kwargs) -> None:
"""Async update the home device.
Triggered when the HMIP HOME_CHANGED event has fired.
There are several occasions for this event to happen.
1. We are interested to check whether the access point
is still connected. If not, device state changes cannot
be forwarded to hass. So if access point is disconnected all devices
are set to unavailable.
2. We need to update home including devices and groups after a reconnect.
3. We need to update home without devices and groups in all other cases.
"""
if not self.home.connected:
_LOGGER.error("HMIP access point has lost connection with the cloud")
self._accesspoint_connected = False
self.set_all_to_unavailable()
elif not self._accesspoint_connected:
# Now the HOME_CHANGED event has fired indicating the access
# point has reconnected to the cloud again.
# Explicitly getting an update as device states might have
# changed during access point disconnect."""
job = self.hass.async_create_task(self.get_state())
job.add_done_callback(self.get_state_finished)
self._accesspoint_connected = True
else:
# Update home with the given json from arg[0],
# without devices and groups.
self.home.update_home_only(args[0])
@callback
def async_create_entity(self, *args, **kwargs) -> None:
"""Create a device or a group."""
is_device = EventType(kwargs["event_type"]) == EventType.DEVICE_ADDED
self.hass.async_create_task(self.async_create_entity_lazy(is_device))
async def async_create_entity_lazy(self, is_device=True) -> None:
"""Delay entity creation to allow the user to enter a device name."""
if is_device:
await asyncio.sleep(30)
await self.hass.config_entries.async_reload(self.config_entry.entry_id)
async def get_state(self) -> None:
"""Update HMIP state and tell Home Assistant."""
await self.home.get_current_state()
self.update_all()
def get_state_finished(self, future) -> None:
"""Execute when get_state coroutine has finished."""
try:
future.result()
except HmipConnectionError:
# Somehow connection could not recover. Will disconnect and
# so reconnect loop is taking over.
_LOGGER.error("Updating state after HMIP access point reconnect failed")
self.hass.async_create_task(self.home.disable_events())
def set_all_to_unavailable(self) -> None:
"""Set all devices to unavailable and tell Home Assistant."""
for device in self.home.devices:
device.unreach = True
self.update_all()
def update_all(self) -> None:
"""Signal all devices to update their state."""
for device in self.home.devices:
device.fire_update_event()
async def async_connect(self) -> None:
"""Start WebSocket connection."""
tries = 0
while True:
retry_delay = 2 ** min(tries, 8)
try:
await self.home.get_current_state()
hmip_events = await self.home.enable_events()
tries = 0
await hmip_events
except HmipConnectionError:
_LOGGER.error(
"Error connecting to HomematicIP with HAP %s. "
"Retrying in %d seconds",
self.config_entry.unique_id,
retry_delay,
)
if self._ws_close_requested:
break
self._ws_close_requested = False
tries += 1
try:
self._retry_task = self.hass.async_create_task(
asyncio.sleep(retry_delay)
)
await self._retry_task
except asyncio.CancelledError:
break
async def async_reset(self) -> bool:
"""Close the websocket connection."""
self._ws_close_requested = True
if self._retry_task is not None:
self._retry_task.cancel()
await self.home.disable_events()
_LOGGER.info("Closed connection to HomematicIP cloud server")
for component in COMPONENTS:
await self.hass.config_entries.async_forward_entry_unload(
self.config_entry, component
)
self.hmip_device_by_entity_id = {}
return True
async def get_hap(
self, hass: HomeAssistantType, hapid: str, authtoken: str, name: str
) -> AsyncHome:
"""Create a HomematicIP access point object."""
home = AsyncHome(hass.loop, async_get_clientsession(hass))
home.name = name
home.label = "Access Point"
home.modelType = "HmIP-HAP"
home.set_auth_token(authtoken)
try:
await home.init(hapid)
await home.get_current_state()
except HmipConnectionError:
raise HmipcConnectionError
home.on_update(self.async_update)
home.on_create(self.async_create_entity)
hass.loop.create_task(self.async_connect())
return home
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/homematicip_cloud/hap.py",
"copies": "1",
"size": "9004",
"license": "apache-2.0",
"hash": -6684912212817594000,
"line_mean": 35.4534412955,
"line_max": 84,
"alpha_frac": 0.6075077743,
"autogenerated": false,
"ratio": 4.330928330928331,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.543843610522833,
"avg_score": null,
"num_lines": null
} |
"""Access points with isi_sdk.NamespaceApi."""
import urllib3
import isi_sdk_8_1_1 as isi_sdk
import test_constants
urllib3.disable_warnings()
def main():
# configure username and password
configuration = isi_sdk.Configuration()
configuration.username = test_constants.USERNAME
configuration.password = test_constants.PASSWORD
configuration.verify_ssl = test_constants.VERIFY_SSL
configuration.host = test_constants.HOST
# configure client connection
api_client = isi_sdk.ApiClient(configuration)
api = isi_sdk.NamespaceApi(api_client)
auth_api = isi_sdk.AuthApi(api_client)
# get list of access points
print('Access points: {}'.format(api.list_access_points().namespaces))
# get list of access point versions
versions = api.list_access_points(versions=True).versions
print('Protocol versions of namespace access server: {}'.format(versions))
# create access point
ap_path = isi_sdk.AccessPointCreateParams(path='/ifs/home')
api.create_access_point('user1', access_point=ap_path)
print('Access points: {}'.format(api.list_access_points().namespaces))
# create test user
auth_user = isi_sdk.AuthUserCreateParams(
name='user1', password='user1', home_directory='/ifs/home/user1')
auth_api.create_auth_user(auth_user)
# set ACL for user
acl_body = isi_sdk.NamespaceAcl(
authoritative='acl',
acl=[
isi_sdk.AclObject(
trustee={'name': 'user1', 'type': 'user'},
accesstype='allow',
accessrights=['file_read'],
op='add'
)
]
)
api.set_acl('user1', acl=True, nsaccess=True, namespace_acl=acl_body)
# get access control list
print('ACL: {}'.format(api.get_acl('user1', acl=True, nsaccess=True)))
# clean up test access point
api.delete_access_point('user1')
# clean up test user
auth_api.delete_auth_user('user1')
api.delete_directory('ifs/home/user1', recursive=True)
print('Successful clean up')
if __name__ == '__main__':
main()
| {
"repo_name": "Isilon/isilon_sdk",
"path": "tests/test_access_points.py",
"copies": "1",
"size": "2097",
"license": "mit",
"hash": 3561238929823196000,
"line_mean": 30.2985074627,
"line_max": 78,
"alpha_frac": 0.6533142585,
"autogenerated": false,
"ratio": 3.744642857142857,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9897957115642857,
"avg_score": 0,
"num_lines": 67
} |
""" Access PWM devices via SysFS interface """
class PWM(object):
def __init__(self, num):
if not (1 <= num <= 7):
raise ValueError('PWM num must be 1-7')
self.num = num
self.sysfs = '/sys/class/pwm/pwm' + str(num)
with open(self.sysfs + '/duty_ns', 'r') as f:
self.duty = int(f.read())
with open(self.sysfs + '/period_ns', 'r') as f:
self.period = int(f.read())
with open(self.sysfs + '/polarity', 'r') as f:
self.polarity = int(f.read())
def __str__(self):
out = "PWM #%d: %d/%d, pol:%d" % (self.num, self.duty, self.period, self.polarity)
return out
def set_duty(self, val):
self.duty = val
with open(self.sysfs + '/duty_ns', 'w') as f:
f.write(str(val) + '\n')
def set_period(self, val):
self.period = val
with open(self.sysfs + '/period_ns', 'w') as f:
f.write(str(val) + '\n')
def set_polarity(self, val):
self.polarity = val
# verify that the stop/start is actually necessary
self.stop()
with open(self.sysfs + '/polarity', 'w') as f:
f.write(str(val) + '\n')
self.start()
def stop(self):
with open(self.sysfs + '/run', 'w') as f:
f.write('0\n')
def start(self):
with open(self.sysfs + '/run', 'w') as f:
f.write('1\n')
| {
"repo_name": "jschornick/pybbb",
"path": "bbb/pwm.py",
"copies": "1",
"size": "1424",
"license": "mit",
"hash": -9027898577892919000,
"line_mean": 29.9565217391,
"line_max": 90,
"alpha_frac": 0.5,
"autogenerated": false,
"ratio": 3.109170305676856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8993559371988119,
"avg_score": 0.023122186737747377,
"num_lines": 46
} |
""" Access PWM devices via SysFS interface """
class PWM(object):
def __init__(self, num, base_dir='/sys/class/pwm/pwm'):
if not (1 <= num <= 7):
raise ValueError('PWM num must be 1-7')
self.num = num
self.sysfs = base_dir + str(self.num)
with open(self.sysfs + '/duty_ns', 'r') as f:
self.duty = int(f.read())
with open(self.sysfs + '/period_ns', 'r') as f:
self.period = int(f.read())
with open(self.sysfs + '/polarity', 'r') as f:
self.polarity = int(f.read())
def __str__(self):
return "PWM #{}: {}/{}, pol:{}".format(self.num, self.duty,
self.period, self.polarity)
def set_duty(self, val):
with open(self.sysfs + '/duty_ns', 'w') as f:
f.write(str(val) + '\n')
def get_duty(self):
with open(self.sysfs + '/duty_ns', 'r') as f:
return int(f.read())
duty = property(get_duty, set_duty)
def set_period(self, val):
with open(self.sysfs + '/period_ns', 'w') as f:
f.write(str(val) + '\n')
def get_period(self):
with open(self.sysfs + '/period_ns', 'r') as f:
return int(f.read())
period = property(get_period, set_period)
def set_polarity(self, val):
# verify that the stop/start is actually necessary
self.stop()
with open(self.sysfs + '/polarity', 'w') as f:
f.write(str(val) + '\n')
self.start()
def get_polarity(self):
with open(self.sysfs + '/polarity', 'r') as f:
return int(f.read())
polarity = property(get_polarity, set_polarity)
def stop(self):
with open(self.sysfs + '/run', 'w') as f:
f.write('0\n')
def start(self):
with open(self.sysfs + '/run', 'w') as f:
f.write('1\n')
| {
"repo_name": "IEEERobotics/pybbb",
"path": "bbb/pwm.py",
"copies": "1",
"size": "1889",
"license": "mit",
"hash": -2952998767444767000,
"line_mean": 29.9672131148,
"line_max": 74,
"alpha_frac": 0.5071466384,
"autogenerated": false,
"ratio": 3.1747899159663864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4181936554366386,
"avg_score": null,
"num_lines": null
} |
from sqlalchemy import sql, schema, types, exceptions, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base
class AcNumeric(types.Numeric):
def result_processor(self, dialect):
return None
def bind_processor(self, dialect):
def process(value):
if value is None:
# Not sure that this exception is needed
return value
else:
return str(value)
return process
def get_col_spec(self):
return "NUMERIC"
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
def process(value):
if not value is None:
return str(value)
return None
return process
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.Smallinteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def __init__(self, *a, **kw):
super(AcDateTime, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def __init__(self, *a, **kw):
super(AcDate, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcText(types.Text):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.Binary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value and True or False
return process
def bind_processor(self, dialect):
def process(value):
if value is True:
return 1
elif value is False:
return 0
elif value is None:
return None
else:
return value and True or False
return process
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
def descriptor():
return {'name':'access',
'description':'Microsoft Access',
'arguments':[
('user',"Database user name",None),
('password',"Database password",None),
('db',"Path to database file",None),
]}
class AccessExecutionContext(default.DefaultExecutionContext):
def _has_implicit_sequence(self, column):
if column.primary_key and column.autoincrement:
if isinstance(column.type, types.Integer) and not column.foreign_key:
if column.default is None or (isinstance(column.default, schema.Sequence) and \
column.default.optional):
return True
return False
def post_exec(self):
"""If we inserted into a row with a COUNTER column, fetch the ID"""
if self.compiled.isinsert:
tbl = self.compiled.statement.table
if not hasattr(tbl, 'has_sequence'):
tbl.has_sequence = None
for column in tbl.c:
if getattr(column, 'sequence', False) or self._has_implicit_sequence(column):
tbl.has_sequence = column
break
if bool(tbl.has_sequence):
# TBD: for some reason _last_inserted_ids doesn't exist here
# (but it does at corresponding point in mssql???)
#if not len(self._last_inserted_ids) or self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])] #+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
super(AccessExecutionContext, self).post_exec()
const, daoEngine = None, None
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode : AcUnicode,
types.Integer : AcInteger,
types.Smallinteger: AcSmallInteger,
types.Numeric : AcNumeric,
types.Float : AcFloat,
types.DateTime : AcDateTime,
types.Date : AcDate,
types.String : AcString,
types.Binary : AcBinary,
types.Boolean : AcBoolean,
types.Text : AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
def type_descriptor(self, typeobj):
newobj = types.adapt_type(typeobj, self.colspecs)
return newobj
def __init__(self, **params):
super(AccessDialect, self).__init__(**params)
self.text_as_varchar = False
self._dtbs = None
def dbapi(cls):
import win32com.client, pythoncom
global const, daoEngine
if const is None:
const = win32com.client.constants
for suffix in (".36", ".35", ".30"):
try:
daoEngine = win32com.client.gencache.EnsureDispatch("DAO.DBEngine" + suffix)
break
except pythoncom.com_error:
pass
else:
raise exceptions.InvalidRequestError("Can't find a DB engine. Check http://support.microsoft.com/kb/239114 for details.")
import pyodbc as module
return module
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def create_execution_context(self, *args, **kwargs):
return AccessExecutionContext(self, *args, **kwargs)
def last_inserted_ids(self):
return self.context.last_inserted_ids
def do_execute(self, cursor, statement, params, **kwargs):
if params == {}:
params = ()
super(AccessDialect, self).do_execute(cursor, statement, params, **kwargs)
def _execute(self, c, statement, parameters):
try:
if parameters == {}:
parameters = ()
c.execute(statement, parameters)
self.context.rowcount = c.rowcount
except Exception, e:
raise exceptions.DBAPIError.instance(statement, parameters, e)
def has_table(self, connection, tablename, schema=None):
# This approach seems to be more reliable that using DAO
try:
connection.execute('select top 1 * from [%s]' % tablename)
return True
except Exception, e:
return False
def reflecttable(self, connection, table, include_columns):
# This is defined in the function, as it relies on win32com constants,
# that aren't imported until dbapi method is called
if not hasattr(self, 'ischema_names'):
self.ischema_names = {
const.dbByte: AcBinary,
const.dbInteger: AcInteger,
const.dbLong: AcInteger,
const.dbSingle: AcFloat,
const.dbDouble: AcFloat,
const.dbDate: AcDateTime,
const.dbLongBinary: AcBinary,
const.dbMemo: AcText,
const.dbBoolean: AcBoolean,
const.dbText: AcUnicode, # All Access strings are unicode
}
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
try:
for tbl in dtbs.TableDefs:
if tbl.Name.lower() == table.name.lower():
break
else:
raise exceptions.NoSuchTableError(table.name)
for col in tbl.Fields:
coltype = self.ischema_names[col.Type]
if col.Type == const.dbText:
coltype = coltype(col.Size)
colargs = \
{
'nullable': not(col.Required or col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
if col.Attributes & const.dbAutoIncrField:
colargs['default'] = schema.Sequence(col.Name + '_seq')
elif default:
if col.Type == const.dbBoolean:
default = default == 'Yes' and '1' or '0'
colargs['default'] = schema.PassiveDefault(sql.text(default))
table.append_column(schema.Column(col.Name, coltype, **colargs))
# TBD: check constraints
# Find primary key columns first
for idx in tbl.Indexes:
if idx.Primary:
for col in idx.Fields:
thecol = table.c[col.Name]
table.primary_key.add(thecol)
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and isinstance(thecol.default.arg, schema.Sequence)):
thecol.autoincrement = False
# Then add other indexes
for idx in tbl.Indexes:
if not idx.Primary:
if len(idx.Fields) == 1:
col = table.c[idx.Fields[0].Name]
if not col.primary_key:
col.index = True
col.unique = idx.Unique
else:
pass # TBD: multi-column indexes
for fk in dtbs.Relations:
if fk.ForeignTable != table.name:
continue
scols = [c.ForeignName for c in fk.Fields]
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
table.append_constraint(schema.ForeignKeyConstraint(scols, rcols))
finally:
dtbs.Close()
def table_names(self, connection, schema):
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs if t.Name[:4] != "MSys" and t.Name[:4] <> "~TMP"]
dtbs.Close()
return names
class AccessCompiler(compiler.DefaultCompiler):
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exceptions.InvalidRequestError('Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression._Function):
return column.label()
else:
return super(AccessCompiler, self).label_select_column(select, column, asfrom)
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names; rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
# Strip schema
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
return self.preparer.quote(table, table.name)
else:
return ""
class AccessSchemaGenerator(compiler.SchemaGenerator):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + column.type.dialect_impl(self.dialect, _for_ddl=column).get_col_spec()
# install a sequence if we have an implicit IDENTITY column
if (not getattr(column.table, 'has_sequence', False)) and column.primary_key and \
column.autoincrement and isinstance(column.type, types.Integer) and not column.foreign_key:
if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional):
column.sequence = schema.Sequence(column.name + '_seq')
if not column.nullable:
colspec += " NOT NULL"
if hasattr(column, 'sequence'):
column.table.has_sequence = column
colspec = self.preparer.format_column(column) + " counter"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
class AccessSchemaDropper(compiler.SchemaDropper):
def visit_index(self, index):
self.append("\nDROP INDEX [%s].[%s]" % (index.table.name, index.name))
self.execute()
class AccessDefaultRunner(base.DefaultRunner):
pass
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']')
dialect = AccessDialect
dialect.poolclass = pool.SingletonThreadPool
dialect.statement_compiler = AccessCompiler
dialect.schemagenerator = AccessSchemaGenerator
dialect.schemadropper = AccessSchemaDropper
dialect.preparer = AccessIdentifierPreparer
dialect.defaultrunner = AccessDefaultRunner
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.3-py2.5.egg/sqlalchemy/databases/access.py",
"copies": "2",
"size": "15448",
"license": "bsd-3-clause",
"hash": -6805307869279271000,
"line_mean": 34.9255813953,
"line_max": 137,
"alpha_frac": 0.5780683584,
"autogenerated": false,
"ratio": 4.283971159179146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005731862355094436,
"num_lines": 430
} |
"""
Support for the Microsoft Access database.
This dialect is *not* ported to SQLAlchemy 0.6.
This dialect is *not* tested on SQLAlchemy 0.6.
"""
from sqlalchemy import sql, schema, types, exc, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base
from sqlalchemy import processors
class AcNumeric(types.Numeric):
def get_col_spec(self):
return "NUMERIC"
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
return None
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
return processors.to_str
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.SmallInteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def __init__(self, *a, **kw):
super(AcDateTime, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def __init__(self, *a, **kw):
super(AcDate, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcText(types.Text):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.LargeBinary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
class AccessExecutionContext(default.DefaultExecutionContext):
def _has_implicit_sequence(self, column):
if column.primary_key and column.autoincrement:
if isinstance(column.type, types.Integer) and not column.foreign_keys:
if column.default is None or (isinstance(column.default, schema.Sequence) and \
column.default.optional):
return True
return False
def post_exec(self):
"""If we inserted into a row with a COUNTER column, fetch the ID"""
if self.compiled.isinsert:
tbl = self.compiled.statement.table
if not hasattr(tbl, 'has_sequence'):
tbl.has_sequence = None
for column in tbl.c:
if getattr(column, 'sequence', False) or self._has_implicit_sequence(column):
tbl.has_sequence = column
break
if bool(tbl.has_sequence):
# TBD: for some reason _last_inserted_ids doesn't exist here
# (but it does at corresponding point in mssql???)
#if not len(self._last_inserted_ids) or self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])] #+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
super(AccessExecutionContext, self).post_exec()
const, daoEngine = None, None
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode : AcUnicode,
types.Integer : AcInteger,
types.SmallInteger: AcSmallInteger,
types.Numeric : AcNumeric,
types.Float : AcFloat,
types.DateTime : AcDateTime,
types.Date : AcDate,
types.String : AcString,
types.LargeBinary : AcBinary,
types.Boolean : AcBoolean,
types.Text : AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
name = 'access'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
ported_sqla_06 = False
def type_descriptor(self, typeobj):
newobj = types.adapt_type(typeobj, self.colspecs)
return newobj
def __init__(self, **params):
super(AccessDialect, self).__init__(**params)
self.text_as_varchar = False
self._dtbs = None
def dbapi(cls):
import win32com.client, pythoncom
global const, daoEngine
if const is None:
const = win32com.client.constants
for suffix in (".36", ".35", ".30"):
try:
daoEngine = win32com.client.gencache.EnsureDispatch("DAO.DBEngine" + suffix)
break
except pythoncom.com_error:
pass
else:
raise exc.InvalidRequestError("Can't find a DB engine. Check http://support.microsoft.com/kb/239114 for details.")
import pyodbc as module
return module
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def last_inserted_ids(self):
return self.context.last_inserted_ids
def do_execute(self, cursor, statement, params, **kwargs):
if params == {}:
params = ()
super(AccessDialect, self).do_execute(cursor, statement, params, **kwargs)
def _execute(self, c, statement, parameters):
try:
if parameters == {}:
parameters = ()
c.execute(statement, parameters)
self.context.rowcount = c.rowcount
except Exception, e:
raise exc.DBAPIError.instance(statement, parameters, e)
def has_table(self, connection, tablename, schema=None):
# This approach seems to be more reliable that using DAO
try:
connection.execute('select top 1 * from [%s]' % tablename)
return True
except Exception, e:
return False
def reflecttable(self, connection, table, include_columns):
# This is defined in the function, as it relies on win32com constants,
# that aren't imported until dbapi method is called
if not hasattr(self, 'ischema_names'):
self.ischema_names = {
const.dbByte: AcBinary,
const.dbInteger: AcInteger,
const.dbLong: AcInteger,
const.dbSingle: AcFloat,
const.dbDouble: AcFloat,
const.dbDate: AcDateTime,
const.dbLongBinary: AcBinary,
const.dbMemo: AcText,
const.dbBoolean: AcBoolean,
const.dbText: AcUnicode, # All Access strings are unicode
const.dbCurrency: AcNumeric,
}
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
try:
for tbl in dtbs.TableDefs:
if tbl.Name.lower() == table.name.lower():
break
else:
raise exc.NoSuchTableError(table.name)
for col in tbl.Fields:
coltype = self.ischema_names[col.Type]
if col.Type == const.dbText:
coltype = coltype(col.Size)
colargs = \
{
'nullable': not(col.Required or col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
if col.Attributes & const.dbAutoIncrField:
colargs['default'] = schema.Sequence(col.Name + '_seq')
elif default:
if col.Type == const.dbBoolean:
default = default == 'Yes' and '1' or '0'
colargs['server_default'] = schema.DefaultClause(sql.text(default))
table.append_column(schema.Column(col.Name, coltype, **colargs))
# TBD: check constraints
# Find primary key columns first
for idx in tbl.Indexes:
if idx.Primary:
for col in idx.Fields:
thecol = table.c[col.Name]
table.primary_key.add(thecol)
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and isinstance(thecol.default.arg, schema.Sequence)):
thecol.autoincrement = False
# Then add other indexes
for idx in tbl.Indexes:
if not idx.Primary:
if len(idx.Fields) == 1:
col = table.c[idx.Fields[0].Name]
if not col.primary_key:
col.index = True
col.unique = idx.Unique
else:
pass # TBD: multi-column indexes
for fk in dtbs.Relations:
if fk.ForeignTable != table.name:
continue
scols = [c.ForeignName for c in fk.Fields]
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
table.append_constraint(schema.ForeignKeyConstraint(scols, rcols, link_to_name=True))
finally:
dtbs.Close()
def table_names(self, connection, schema):
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
dtbs.Close()
return names
class AccessCompiler(compiler.SQLCompiler):
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'month': 'm',
'day': 'd',
'year': 'yyyy',
'second': 's',
'hour': 'h',
'doy': 'y',
'minute': 'n',
'quarter': 'q',
'dow': 'w',
'week': 'ww'
})
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exc.InvalidRequestError('Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label()
else:
return super(AccessCompiler, self).label_select_column(select, column, asfrom)
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names; rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
# Strip schema
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
return self.preparer.quote(table.name, table.quote)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (self.process(join.left, asfrom=True) + (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + self.process(join.onclause))
def visit_extract(self, extract):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr))
class AccessDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + column.type.dialect_impl(self.dialect).get_col_spec()
# install a sequence if we have an implicit IDENTITY column
if (not getattr(column.table, 'has_sequence', False)) and column.primary_key and \
column.autoincrement and isinstance(column.type, types.Integer) and not column.foreign_keys:
if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional):
column.sequence = schema.Sequence(column.name + '_seq')
if not column.nullable:
colspec += " NOT NULL"
if hasattr(column, 'sequence'):
column.table.has_sequence = column
colspec = self.preparer.format_column(column) + " counter"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
index = drop.element
self.append("\nDROP INDEX [%s].[%s]" % (index.table.name, self._validate_identifier(index.name, False)))
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']')
dialect = AccessDialect
dialect.poolclass = pool.SingletonThreadPool
dialect.statement_compiler = AccessCompiler
dialect.ddlcompiler = AccessDDLCompiler
dialect.preparer = AccessIdentifierPreparer
dialect.execution_ctx_cls = AccessExecutionContext
| {
"repo_name": "obeattie/sqlalchemy",
"path": "lib/sqlalchemy/dialects/access/base.py",
"copies": "1",
"size": "15155",
"license": "mit",
"hash": -197028102134500640,
"line_mean": 35.3429256595,
"line_max": 130,
"alpha_frac": 0.5813922798,
"autogenerated": false,
"ratio": 4.132806108535588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004090275959630432,
"num_lines": 417
} |
"""
Support for Microsoft Access via pyodbc.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
^^^^^^^^^^
Examples of pyodbc connection string URLs:
* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
"""
from .base import AccessExecutionContext, AccessDialect
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, util
import decimal
class _AccessNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_AccessNumeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int)-1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class AccessExecutionContext_pyodbc(AccessExecutionContext):
pass
class AccessDialect_pyodbc(PyODBCConnector, AccessDialect):
execution_ctx_cls = AccessExecutionContext_pyodbc
pyodbc_driver_name = 'Microsoft Access'
colspecs = util.update_copy(
AccessDialect.colspecs,
{
sqltypes.Numeric:_AccessNumeric_pyodbc
}
)
| {
"repo_name": "luzfcb/sqlalchemy-access",
"path": "sqlalchemy_access/pyodbc.py",
"copies": "1",
"size": "3203",
"license": "mit",
"hash": -6761128357402641000,
"line_mean": 28.3853211009,
"line_max": 84,
"alpha_frac": 0.5482360287,
"autogenerated": false,
"ratio": 4.039092055485498,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011184961217167572,
"num_lines": 109
} |
"""Access Rules Classes."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.policy_services.accesspolicies import AccessPolicies
from fmcapi.api_objects.policy_services.intrusionpolicies import IntrusionPolicies
from fmcapi.api_objects.object_services.variablesets import VariableSets
from fmcapi.api_objects.object_services.securityzones import SecurityZones
from fmcapi.api_objects.object_services.vlantags import VlanTags
from fmcapi.api_objects.object_services.portobjectgroups import PortObjectGroups
from fmcapi.api_objects.object_services.protocolportobjects import ProtocolPortObjects
from fmcapi.api_objects.object_services.fqdns import FQDNS
from fmcapi.api_objects.object_services.networkgroups import NetworkGroups
from fmcapi.api_objects.object_services.networkaddresses import NetworkAddresses
from fmcapi.api_objects.policy_services.filepolicies import FilePolicies
from fmcapi.api_objects.object_services.isesecuritygrouptags import ISESecurityGroupTags
from fmcapi.api_objects.helper_functions import (
get_networkaddress_type,
true_false_checker,
)
from fmcapi.api_objects.object_services.applications import Applications
from fmcapi.api_objects.object_services.applicationfilters import ApplicationFilters
from fmcapi.api_objects.object_services.urlgroups import URLGroups
from fmcapi.api_objects.object_services.urls import URLs
import logging
import sys
class AccessRules(APIClassTemplate):
"""
The AccessRules Object in the FMC.
"""
VALID_JSON_DATA = [
"id",
"name",
"type",
"action",
"enabled",
"sendEventsToFMC",
"logFiles",
"logBegin",
"logEnd",
"variableSet",
"originalSourceNetworks",
"vlanTags",
"sourceNetworks",
"destinationNetworks",
"sourcePorts",
"destinationPorts",
"ipsPolicy",
"urls",
"sourceZones",
"destinationZones",
"applications",
"filePolicy",
"sourceSecurityGroupTags",
"destinationSecurityGroupTags",
"enableSyslog",
"newComments",
"commentHistoryList",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + [
"acp_id",
"acp_name",
"insertBefore",
"insertAfter",
"section",
"category",
]
PREFIX_URL = "/policy/accesspolicies"
REQUIRED_FOR_POST = ["name", "acp_id"]
REQUIRED_FOR_GET = ["acp_id"]
VALID_FOR_ACTION = [
"ALLOW",
"TRUST",
"BLOCK",
"MONITOR",
"BLOCK_RESET",
"BLOCK_INTERACTIVE",
"BLOCK_RESET_INTERACTIVE",
]
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\- ]"""
@property
def URL_SUFFIX(self):
"""
Add the URL suffixes for categories, insertBefore and insertAfter
NOTE: You must specify these at the time the object is initialized (created) for this feature
to work correctly. Example:
This works:
new_rule = AccessRules(fmc=fmc, acp_name='acp1', insertBefore=2)
This does not:
new_rule = AccessRules(fmc=fmc, acp_name='acp1')
new_rule.insertBefore = 2
"""
url = "?"
if "category" in self.__dict__:
url = f"{url}category={self.category}&"
if "insertBefore" in self.__dict__:
url = f"{url}insertBefore={self.insertBefore}&"
if "insertAfter" in self.__dict__:
url = f"{url}insertAfter={self.insertAfter}&"
if "insertBefore" in self.__dict__ and "insertAfter" in self.__dict__:
logging.warning("ACP rule has both insertBefore and insertAfter params")
if "section" in self.__dict__:
url = f"{url}section={self.section}&"
return url[:-1]
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value=False):
self._enabled = true_false_checker(value)
@property
def logBegin(self):
return self._logBegin
@logBegin.setter
def logBegin(self, value=False):
self._logBegin = true_false_checker(value)
@property
def logEnd(self):
return self._logEnd
@logEnd.setter
def logEnd(self, value=False):
self._logEnd = true_false_checker(value)
@property
def sendEventsToFMC(self):
return self._sendEventsToFMC
@sendEventsToFMC.setter
def sendEventsToFMC(self, value=False):
self._sendEventsToFMC = true_false_checker(value)
@property
def enableSyslog(self):
return self._enableSyslog
@enableSyslog.setter
def enableSyslog(self, value=False):
self._enableSyslog = true_false_checker(value)
@property
def newComments(self):
return self._newComments
@property
def commentHistoryList(self):
return self._commentHistoryList
def __init__(self, fmc, **kwargs):
"""
Initialize AccessRules object.
Set self.type to "AccessRule", parse the kwargs, and set up the self.URL.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for AccessRules class.")
self.type = "AccessRule"
self._enabled = False
self._logBegin = False
self._logEnd = False
self._sendEventsToFMC = False
self._enableSyslog = False
self._newComments = []
self._commentHistoryList = []
self.parse_kwargs(**kwargs)
self.URL = f"{self.URL}{self.URL_SUFFIX}"
def format_data(self, filter_query=""):
"""
Gather all the data in preparation for sending to API in JSON format.
:param filter_query: (str) 'all' or 'kwargs'
:return: (dict) json_data
"""
json_data = super().format_data(filter_query=filter_query)
logging.debug("In format_data() for AccessRules class.")
if "sourceNetworks" in self.__dict__:
json_data["sourceNetworks"] = {"objects": self.sourceNetworks["objects"]}
json_data["sourceNetworks"]["literals"] = [
{"type": v, "value": k}
for k, v in self.sourceNetworks["literals"].items()
]
if "destinationNetworks" in self.__dict__:
json_data["destinationNetworks"] = {
"objects": self.destinationNetworks["objects"]
}
json_data["destinationNetworks"]["literals"] = [
{"type": v, "value": k}
for k, v in self.destinationNetworks["literals"].items()
]
if "action" in self.__dict__:
if self.action not in self.VALID_FOR_ACTION:
logging.warning(f"Action {self.action} is not a valid action.")
logging.warning(f"\tValid actions are: {self.VALID_FOR_ACTION}.")
return json_data
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for AccessRules class.")
if "acp_id" in kwargs:
self.acp(id=kwargs["acp_id"])
if "acp_name" in kwargs:
self.acp(name=kwargs["acp_name"])
if "action" in kwargs:
if kwargs["action"] in self.VALID_FOR_ACTION:
self.action = kwargs["action"]
else:
logging.warning(f"Action {kwargs['action']} is not a valid action.")
logging.warning(f"\tValid actions are: {self.VALID_FOR_ACTION}.")
if "sourceNetworks" in kwargs:
self.sourceNetworks = {"objects": [], "literals": {}}
if kwargs["sourceNetworks"].get("objects"):
self.sourceNetworks["objects"] = kwargs["sourceNetworks"]["objects"]
if kwargs["sourceNetworks"].get("literals"):
for literal in kwargs["sourceNetworks"]["literals"]:
self.sourceNetworks["literals"][literal["value"]] = literal["type"]
if "destinationNetworks" in kwargs:
self.destinationNetworks = {"objects": [], "literals": {}}
if kwargs["destinationNetworks"].get("objects"):
self.destinationNetworks["objects"] = kwargs["destinationNetworks"][
"objects"
]
if kwargs["destinationNetworks"].get("literals"):
for literal in kwargs["destinationNetworks"]["literals"]:
self.destinationNetworks["literals"][literal["value"]] = literal[
"type"
]
if "enableSyslog" in kwargs:
self.enableSyslog = kwargs["enableSyslog"]
if "logBegin" in kwargs:
self.logBegin = kwargs["logBegin"]
if "logEnd" in kwargs:
self.logEnd = kwargs["logEnd"]
if "enabled" in kwargs:
self.enabled = kwargs["enabled"]
if "sendEventsToFMC" in kwargs:
self.sendEventsToFMC = kwargs["sendEventsToFMC"]
if "newComments" in kwargs:
self._newComments = kwargs["newComments"]
if "commentHistoryList" in kwargs:
self._commentHistoryList = kwargs["commentHistoryList"]
def acp(self, name="", id=""):
"""
Associate an AccessPolicies object with this AccessRule object.
:param name: (str) Name of ACP.
:param id: (str) ID of ACP.
:return: None
"""
# either name or id of the ACP should be given
logging.debug("In acp() for AccessRules class.")
if id != "":
self.acp_id = id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{id}/accessrules"
elif name != "":
acp1 = AccessPolicies(fmc=self.fmc)
acp1.get(name=name)
if "id" in acp1.__dict__:
self.acp_id = acp1.id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{acp1.id}/accessrules"
else:
logging.warning(
f"Access Control Policy {name} not found. Cannot set up accessPolicy for AccessRules."
)
else:
logging.error("No accessPolicy name or ID was provided.")
def intrusion_policy(self, action, name=""):
"""
Add/remove name of ipsPolicy field of AccessRules object.
:param action: (str) 'set', or 'clear'
:param name: (str) Name of intrusion policy in FMC.
:return: None
"""
logging.debug("In intrusion_policy() for AccessRules class.")
if action == "clear":
if "ipsPolicy" in self.__dict__:
del self.ipsPolicy
logging.info("Intrusion Policy removed from this AccessRules object.")
elif action == "set":
ips = IntrusionPolicies(fmc=self.fmc, name=name)
ips.get()
self.ipsPolicy = {"name": ips.name, "id": ips.id, "type": ips.type}
logging.info(
f'Intrusion Policy set to "{name}" for this AccessRules object.'
)
def variable_set(self, action, name="Default-Set"):
"""
Add/remove name of variableSet field of AccessRules object.
:param action: (str) 'set', or 'clear'
:param name: (str) Name of variable set in FMC.
:return: None
"""
logging.debug("In variable_set() for AccessRules class.")
if action == "clear":
if "variableSet" in self.__dict__:
del self.variableSet
logging.info("Variable Set removed from this AccessRules object.")
elif action == "set":
vs = VariableSets(fmc=self.fmc)
vs.get(name=name)
self.variableSet = {"name": vs.name, "id": vs.id, "type": vs.type}
logging.info(f'VariableSet set to "{name}" for this AccessRules object.')
def file_policy(self, action, name="None"):
"""
Add/remove name of filePolicy field of AccessRules object.
:param action: (str) 'set', or 'clear'
:param name: (str) Name of file policy in FMC.
:return: None
"""
logging.debug("In file_policy() for ACPRule class.")
if action == "clear":
if "filePolicy" in self.__dict__:
del self.filePolicy
logging.info("file_policy removed from this AccessRules object.")
elif action == "set":
fp = FilePolicies(fmc=self.fmc)
fp.get(name=name)
self.filePolicy = {"name": fp.name, "id": fp.id, "type": fp.type}
logging.info(f'file_policy set to "{name}" for this AccessRules object.')
def vlan_tags(self, action, name=""):
"""
Add/modify name to vlanTags field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of VLAN tag in FMC.
:return: None
"""
logging.debug("In vlan_tags() for AccessRules class.")
if action == "add":
vlantag = VlanTags(fmc=self.fmc)
vlantag.get(name=name)
if "id" in vlantag.__dict__:
if "vlanTags" in self.__dict__:
new_vlan = {
"name": vlantag.name,
"id": vlantag.id,
"type": vlantag.type,
}
duplicate = False
for obj in self.vlanTags["objects"]:
if obj["name"] == new_vlan["name"]:
duplicate = True
break
if not duplicate:
self.vlanTags["objects"].append(new_vlan)
logging.info(
f'Adding "{name}" to vlanTags for this AccessRules.'
)
else:
self.vlanTags = {
"objects": [
{
"name": vlantag.name,
"id": vlantag.id,
"type": vlantag.type,
}
]
}
logging.info(f'Adding "{name}" to vlanTags for this AccessRules.')
else:
logging.warning(
f'VLAN Tag, "{name}", not found. Cannot add to AccessRules.'
)
elif action == "remove":
vlantag = VlanTags(fmc=self.fmc)
vlantag.get(name=name)
if "id" in vlantag.__dict__:
if "vlanTags" in self.__dict__:
objects = []
for obj in self.vlanTags["objects"]:
if obj["name"] != name:
objects.append(obj)
self.vlanTags["objects"] = objects
logging.info(
f'Removed "{name}" from vlanTags for this AccessRules.'
)
else:
logging.info(
"vlanTags doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f"VLAN Tag, {name}, not found. Cannot remove from AccessRules."
)
elif action == "clear":
if "vlanTags" in self.__dict__:
del self.vlanTags
logging.info("All VLAN Tags removed from this AccessRules object.")
def source_zone(self, action, name=""):
"""
Add/modify name to sourceZones field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of Security Zone in FMC.
:return: None
"""
logging.debug("In source_zone() for AccessRules class.")
if action == "add":
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
if "sourceZones" in self.__dict__:
new_zone = {"name": sz.name, "id": sz.id, "type": sz.type}
duplicate = False
for obj in self.sourceZones["objects"]:
if obj["name"] == new_zone["name"]:
duplicate = True
break
if not duplicate:
self.sourceZones["objects"].append(new_zone)
logging.info(
f'Adding "{name}" to sourceZones for this AccessRules.'
)
else:
self.sourceZones = {
"objects": [{"name": sz.name, "id": sz.id, "type": sz.type}]
}
logging.info(
f'Adding "{name}" to sourceZones for this AccessRules.'
)
else:
logging.warning(
'Security Zone, "{name}", not found. Cannot add to AccessRules.'
)
elif action == "remove":
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
if "sourceZones" in self.__dict__:
objects = []
for obj in self.sourceZones["objects"]:
if obj["name"] != name:
objects.append(obj)
self.sourceZones["objects"] = objects
logging.info(
f'Removed "{name}" from sourceZones for this AccessRules.'
)
else:
logging.info(
"sourceZones doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f'Security Zone, "{name}", not found. Cannot remove from AccessRules.'
)
elif action == "clear":
if "sourceZones" in self.__dict__:
del self.sourceZones
logging.info("All Source Zones removed from this AccessRules object.")
def destination_zone(self, action, name=""):
"""
Add/modify name to destinationZones field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of Security Zone in FMC.
:return: None
"""
logging.debug("In destination_zone() for AccessRules class.")
if action == "add":
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
if "destinationZones" in self.__dict__:
new_zone = {"name": sz.name, "id": sz.id, "type": sz.type}
duplicate = False
for obj in self.destinationZones["objects"]:
if obj["name"] == new_zone["name"]:
duplicate = True
break
if not duplicate:
self.destinationZones["objects"].append(new_zone)
logging.info(
f'Adding "{name}" to destinationZones for this AccessRules.'
)
else:
self.destinationZones = {
"objects": [{"name": sz.name, "id": sz.id, "type": sz.type}]
}
logging.info(
f'Adding "{name}" to destinationZones for this AccessRules.'
)
else:
logging.warning(
f'Security Zone, "{name}", not found. Cannot add to AccessRules.'
)
elif action == "remove":
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
if "destinationZones" in self.__dict__:
objects = []
for obj in self.destinationZones["objects"]:
if obj["name"] != name:
objects.append(obj)
self.destinationZones["objects"] = objects
logging.info(
'Removed "{name}" from destinationZones for this AccessRules.'
)
else:
logging.info(
"destinationZones doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f"Security Zone, {name}, not found. Cannot remove from AccessRules."
)
elif action == "clear":
if "destinationZones" in self.__dict__:
del self.destinationZones
logging.info(
"All Destination Zones removed from this AccessRules object."
)
def source_port(self, action, name=""):
"""
Add/modify name to sourcePorts field of AccessRules object.
:param action: (str) 'add', 'addgroup', 'remove', or 'clear'
:param name: (str) Name of Port in FMC.
:return: None
"""
logging.debug("In source_port() for AccessRules class.")
if action == "add":
pport_json = ProtocolPortObjects(fmc=self.fmc)
pport_json.get(name=name)
if "id" in pport_json.__dict__:
item = pport_json
else:
item = PortObjectGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "sourcePorts" in self.__dict__:
new_port = {"name": item.name, "id": item.id, "type": item.type}
duplicate = False
if "objects" not in self.sourcePorts:
self.__dict__["sourcePorts"]["objects"] = []
for obj in self.sourcePorts["objects"]:
if obj["name"] == new_port["name"]:
duplicate = True
break
if not duplicate:
self.sourcePorts["objects"].append(new_port)
logging.info(
f'Adding "{name}" to sourcePorts for this AccessRules.'
)
else:
self.sourcePorts = {
"objects": [
{"name": item.name, "id": item.id, "type": item.type}
]
}
logging.info(
f'Adding "{name}" to sourcePorts for this AccessRules.'
)
else:
logging.warning(
f'Protocol Port or Protocol Port Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "addgroup":
item = PortObjectGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "sourcePorts" in self.__dict__:
new_port = {"name": item.name, "id": item.id, "type": item.type}
duplicate = False
if "objects" not in self.sourcePorts:
self.__dict__["sourcePorts"]["objects"] = []
for obj in self.sourcePorts["objects"]:
if obj["name"] == new_port["name"]:
duplicate = True
break
if not duplicate:
self.sourcePorts["objects"].append(new_port)
logging.info(
f'Adding "{name}" to sourcePorts for this AccessRules.'
)
else:
self.sourcePorts = {
"objects": [
{"name": item.name, "id": item.id, "type": item.type}
]
}
logging.info(
f'Adding "{name}" to sourcePorts for this AccessRules.'
)
else:
logging.warning(
f'Protocol Port Port Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "remove":
pport_json = ProtocolPortObjects(fmc=self.fmc)
pport_json.get(name=name)
if "id" in pport_json.__dict__:
item = pport_json
else:
item = PortObjectGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "sourcePorts" in self.__dict__:
objects = []
for obj in self.sourcePorts["objects"]:
if obj["name"] != name:
objects.append(obj)
self.sourcePorts["objects"] = objects
logging.info(
f'Removed "{name}" from sourcePorts for this AccessRules.'
)
else:
logging.info(
"sourcePorts doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f'Protocol Port or Protocol Port Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "clear":
if "sourcePorts" in self.__dict__:
del self.sourcePorts
logging.info("All Source Ports removed from this AccessRules object.")
def destination_port(self, action, name=""):
"""
Add/modify name to destinationPorts field of AccessRules object.
:param action: (str) 'add', 'addgroup', 'remove', or 'clear'
:param name: (str) Name of Port in FMC.
:return: None
"""
logging.debug("In destination_port() for AccessRules class.")
if action == "add":
pport_json = ProtocolPortObjects(fmc=self.fmc)
pport_json.get(name=name)
if "id" in pport_json.__dict__:
item = pport_json
else:
item = PortObjectGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "destinationPorts" in self.__dict__:
new_port = {"name": item.name, "id": item.id, "type": item.type}
duplicate = False
if "objects" not in self.destinationPorts:
self.__dict__["destinationPorts"]["objects"] = []
for obj in self.destinationPorts["objects"]:
if obj["name"] == new_port["name"]:
duplicate = True
break
if not duplicate:
self.destinationPorts["objects"].append(new_port)
logging.info(
f'Adding "{name}" to destinationPorts for this AccessRules.'
)
else:
self.destinationPorts = {
"objects": [
{"name": item.name, "id": item.id, "type": item.type}
]
}
logging.info(
f'Adding "{name}" to destinationPorts for this AccessRules.'
)
else:
logging.warning(
f'Protocol Port or Protocol Port Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
if action == "addgroup":
item = PortObjectGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "destinationPorts" in self.__dict__:
new_port = {"name": item.name, "id": item.id, "type": item.type}
duplicate = False
if "objects" not in self.destinationPorts:
self.__dict__["destinationPorts"]["objects"] = []
for obj in self.destinationPorts["objects"]:
if obj["name"] == new_port["name"]:
duplicate = True
break
if not duplicate:
self.destinationPorts["objects"].append(new_port)
logging.info(
f'Adding "{name}" to destinationPorts for this AccessRules.'
)
else:
self.destinationPorts = {
"objects": [
{"name": item.name, "id": item.id, "type": item.type}
]
}
logging.info(
f'Adding "{name}" to destinationPorts for this AccessRules.'
)
else:
logging.warning(
f'Protocol Port Port Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "remove":
pport_json = ProtocolPortObjects(fmc=self.fmc)
pport_json.get(name=name)
if "id" in pport_json.__dict__:
item = pport_json
else:
item = PortObjectGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "destinationPorts" in self.__dict__:
objects = []
for obj in self.destinationPorts["objects"]:
if obj["name"] != name:
objects.append(obj)
self.destinationPorts["objects"] = objects
logging.info(
f'Removed "{name}" from destinationPorts for this AccessRules.'
)
else:
logging.info(
"destinationPorts doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f'Protocol Port or Protocol Port Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "clear":
if "destinationPorts" in self.__dict__:
del self.destinationPorts
logging.info(
"All Destination Ports removed from this AccessRules object."
)
def source_network(self, action, name="", literal=None):
"""
Add/modify name/literal to sourceNetworks field of AccessRules object.
:param action: (str) the action to be done 'add', 'remove', 'clear'
:param name: (str) name of the object in question
:param literal: (dict) the literal in question {value:<>, type:<>}
:return: None
"""
# using dict() as default value is dangerous here, any thoughts/workarounds on this?
logging.debug("In source_network() for AccessRules class.")
if literal and name != "":
raise ValueError(
"Only one of literals or name (object name) should be set while creating a source network"
)
if not hasattr(self, "sourceNetworks"):
self.sourceNetworks = {"objects": [], "literals": {}}
if action == "add":
if literal:
type_ = get_networkaddress_type(literal)
self.sourceNetworks["literals"][literal] = type_
logging.info(
f'Adding literal "{literal}" of type "{type_}" to sourceNetworks for this AccessRules.'
)
else:
ipaddresses_json = NetworkAddresses(fmc=self.fmc).get()
networkgroup_json = NetworkGroups(fmc=self.fmc).get()
fqdns_json = FQDNS(fmc=self.fmc).get()
items = (
ipaddresses_json.get("items", [])
+ networkgroup_json.get("items", [])
+ fqdns_json.get("items", [])
)
new_net = None
for item in items:
if item["name"] == name:
new_net = {
"name": item["name"],
"id": item["id"],
"type": item["type"],
}
break
if new_net is None:
logging.warning(
f'Network "{name}" is not found in FMC. Cannot add to sourceNetworks.'
)
else:
if "sourceNetworks" in self.__dict__:
# thus either some objects are already present in sourceNetworks,
# or only literals are present in sourceNetworks
if "objects" in self.__dict__["sourceNetworks"]:
# some objects are already present
duplicate = False
# see if its a duplicate or not. If not, append to the list of
# existing objects in sourceNetworks
for obj in self.sourceNetworks["objects"]:
if obj["name"] == new_net["name"]:
duplicate = True
break
if not duplicate:
self.sourceNetworks["objects"].append(new_net)
logging.info(
f'Adding "{name}" to sourceNetworks for this AccessRules.'
)
else:
# this means no objects were present in sourceNetworks,
# and sourceNetworks contains literals only
self.sourceNetworks.update({"objects": [new_net]})
# So update the sourceNetworks dict which contained 'literals' key initially
# to have a 'objects' key as well
logging.info(
f'Adding "{name}" to sourceNetworks for this AccessRules.'
)
else:
# None of literals or objects are present in sourceNetworks,
# so initialize it with objects and update the provided object
self.sourceNetworks = {"objects": [new_net]}
logging.info(
f'Adding "{name}" to sourceNetworks for this AccessRules.'
)
elif action == "remove":
if "sourceNetworks" in self.__dict__:
if name != "":
# an object's name has been provided to be removed
objects = []
for obj in self.sourceNetworks["objects"]:
if obj["name"] != name:
objects.append(obj)
if len(objects) == 0:
# it was the last object which was deleted now
del self.sourceNetworks
logging.info(
f'Removed "{name}" from sourceNetworks for this AccessRules'
)
logging.info(
"All Source Networks removed from this AccessRules object."
)
else:
self.sourceNetworks["objects"] = objects
logging.info(
f'Removed "{name}" from sourceNetworks for this AccessRules.'
)
else:
# a literal value has been provided to be removed
type_ = self.sourceNetworks["literals"].get(literal)
if type_:
self.sourceNetworks["literals"].pop(literal)
logging.info(
f'Removed literal "{literal}" of type '
f'"{type_}" from sourceNetworks for this AccessRules.'
)
else:
logging.info(
f'Unable to removed literal "{literal}" from sourceNetworks as it was not found'
)
else:
logging.info(
"sourceNetworks doesn't exist for this AccessRules. Nothing to remove."
)
elif action == "clear":
if "sourceNetworks" in self.__dict__:
del self.sourceNetworks
logging.info(
"All Source Networks removed from this AccessRules object."
)
def destination_network(self, action, name="", literal=None):
"""
Add/modify name/literal to destinationNetworks field of AccessRules object.
:param action: (str) the action to be done 'add', 'remove', 'clear'
:param name: (str) name of the object in question
:param literal: (dict) the literal in question {value:<>, type:<>}
:return: None
"""
# using dict() as default value is dangerous here, any thoughts/workarounds on this?
logging.debug("In destination_network() for ACPRule class.")
if literal and name != "":
raise ValueError(
"Only one of literals or name (object name) should be set while creating a source network"
)
if not hasattr(self, "destinationNetworks"):
self.destinationNetworks = {"objects": [], "literals": {}}
if action == "add":
if literal:
type_ = get_networkaddress_type(literal)
self.destinationNetworks["literals"][literal] = type_
logging.info(
f'Adding literal "{literal}" of type "{type_}" '
f"to destinationNetworks for this AccessRules."
)
else:
ipaddresses_json = NetworkAddresses(fmc=self.fmc).get()
networkgroup_json = NetworkGroups(fmc=self.fmc).get()
if self.fmc.serverVersion >= "6.4":
fqdns_json = FQDNS(fmc=self.fmc).get()
else:
fqdns_json = {"items": []}
items = (
ipaddresses_json.get("items", [])
+ networkgroup_json.get("items", [])
+ fqdns_json.get("items", [])
)
new_net = None
for item in items:
if item["name"] == name:
new_net = {
"name": item["name"],
"id": item["id"],
"type": item["type"],
}
break
if new_net is None:
logging.warning(
f'Network "{name}" is not found in FMC. Cannot add to destinationNetworks.'
)
else:
if "destinationNetworks" in self.__dict__:
# thus either some objects are already present in destinationNetworks,
# or only literals are present in destinationNetworks
if "objects" in self.__dict__["destinationNetworks"]:
# some objects are already present
duplicate = False
for obj in self.destinationNetworks["objects"]:
if obj["name"] == new_net["name"]:
duplicate = True
break
if not duplicate:
self.destinationNetworks["objects"].append(new_net)
logging.info(
f'Adding "{name}" to destinationNetworks for this AccessRules.'
)
else:
# this means no objects were present in destinationNetworks,
# and destinationNetworks contains literals only
self.destinationNetworks.update({"objects": [new_net]})
# So update the destinationNetworks dict which contained 'literals' key initially
# to have a 'objects' key as well
logging.info(
f'Adding "{name}" to destinationNetworks for this AccessRules.'
)
else:
# None of literals or objects are present in destinationNetworks,
# so initialize it with objects and update the provided object
self.destinationNetworks = {"objects": [new_net]}
logging.info(
f'Adding "{name}" to destinationNetworks for this AccessRules.'
)
elif action == "remove":
if "destinationNetworks" in self.__dict__:
if name != "":
# an object's name has been provided to be removed
objects = []
for obj in self.destinationNetworks["objects"]:
if obj["name"] != name:
objects.append(obj)
if len(objects) == 0:
# it was the last object which was deleted now
del self.destinationNetworks
logging.info(
f'Removed "{name}" from destinationNetworks for this AccessRules'
)
logging.info(
"All Destination Networks removed from this AccessRules object."
)
else:
self.destinationNetworks["objects"] = objects
logging.info(
f'Removed "{name}" from destinationNetworks for this AccessRules.'
)
else:
# a literal value has been provided to be removed
type_ = self.destinationNetworks["literals"].get(literal)
if type_:
self.destinationNetworks["literals"].pop(literal)
logging.info(
f'Removed literal "{literal}" of '
f'type "{type_}" from destinationNetworks for this AccessRules.'
)
else:
logging.info(
f'Unable to removed literal "{literal}" '
f"from destinationNetworks as it was not found"
)
else:
logging.info(
"destinationNetworks doesn't exist for this AccessRules. Nothing to remove."
)
elif action == "clear":
if "destinationNetworks" in self.__dict__:
del self.destinationNetworks
logging.info(
"All Destination Networks removed from this AccessRules object."
)
def source_sgt(self, action, name="", literal=None):
"""
Add/modify name/literal to the sourceSecurityGroupTags field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of SGT in FMC.
:param literal: (dict) {value:<>, type:<>}
:return: None
"""
# using dict() as default value is dangerous here, any thoughts/workarounds on this?
logging.debug("In source_sgt() for ACPRule class.")
if literal and name != "":
raise ValueError(
"Only one of literals or name (object name) should be set while creating a source sgt"
)
if not hasattr(self, "sourceSecurityGroupTags"):
self.sourceSecurityGroupTags = {"objects": [], "literals": {}}
if action == "add":
if literal:
type_ = "ISESecurityGroupTag"
self.sourceSecurityGroupTags["literals"][literal] = type_
logging.info(
f'Adding literal "{literal}" of type "{type_}" '
f"to sourceSecurityGroupTags for this AccessRules."
)
else:
# Query FMC for all SGTs and iterate through them to see if our name matches 1 of them.
sgt = ISESecurityGroupTags(fmc=self.fmc)
sgt.get(name=name)
if "id" in sgt.__dict__:
item = sgt
else:
item = {}
new_sgt = None
if item.name == name:
new_sgt = {"name": item.name, "tag": item.tag, "type": item.type}
if new_sgt is None:
logging.warning(
f'SecurityGroupTag "{name}" is not found in FMC. '
f"Cannot add to sourceSecurityGroupTags."
)
else:
if "sourceSecurityGroupTags" in self.__dict__:
# thus either some objects are already present in sourceSecurityGroupTags,
# or only literals are present in sourceSecurityGroupTags
if "objects" in self.__dict__["sourceSecurityGroupTags"]:
# some objects are already present
duplicate = False
for obj in self.sourceSecurityGroupTags["objects"]:
if obj["name"] == new_sgt["name"]:
duplicate = True
break
if not duplicate:
self.sourceSecurityGroupTags["objects"].append(new_sgt)
logging.info(
f'Adding "{name}" to sourceSecurityGroupTags for this AccessRules.'
)
else:
# this means no objects were present in sourceSecurityGroupTags,
# and sourceSecurityGroupTags contains literals only
self.sourceSecurityGroupTags.update({"objects": [new_sgt]})
# So update the sourceSecurityGroupTags dict which contained 'literals' key initially
# to have a 'objects' key as well
logging.info(
f'Adding "{name}" to sourceSecurityGroupTags for this AccessRules.'
)
else:
# None of literals or objects are present in sourceSecurityGroupTags,
# so initialize it with objects and update the provided object
self.sourceSecurityGroupTags = {"objects": [new_sgt]}
logging.info(
f'Adding "{name}" to sourceSecurityGroupTags for this AccessRules.'
)
elif action == "remove":
if "sourceSecurityGroupTags" in self.__dict__:
if name != "":
# an object's name has been provided to be removed
objects = []
for obj in self.sourceSecurityGroupTags["objects"]:
if obj["name"] != name:
objects.append(obj)
if len(objects) == 0:
# it was the last object which was deleted now
del self.sourceSecurityGroupTags
logging.info(
f'Removed "{name}" from sourceSecurityGroupTags for this AccessRules'
)
logging.info(
"All source security group tags are removed from this AccessRules object."
)
else:
self.sourceSecurityGroupTags["objects"] = objects
logging.info(
f'Removed "{name}" from sourceSecurityGroupTags for this AccessRules.'
)
else:
# a literal value has been provided to be removed
type_ = self.sourceSecurityGroupTags["literals"].get(literal)
if type_:
self.sourceSecurityGroupTags["literals"].pop(literal)
logging.info(
f'Removed literal "{literal}" of '
f'type "{type_}" from sourceSecurityGroupTags for this AccessRules.'
)
else:
logging.info(
f'Unable to removed literal "{literal}" '
f"from sourceSecurityGroupTags as it was not found"
)
else:
logging.info(
"No sourceSecurityGroupTags exist for this AccessRules. Nothing to remove."
)
elif action == "clear":
if "sourceSecurityGroupTags" in self.__dict__:
del self.sourceSecurityGroupTags
logging.info(
"All source security group tags are removed from this AccessRules object."
)
def destination_sgt(self, action, name="", literal=None):
"""
Add/modify name/literal to the destinationSecurityGroupTags field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of SGT in FMC.
:param literal: (dict) {value:<>, type:<>}
:return: None
"""
pass
def application(self, action, name=""):
"""
Add/modify name to applications field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of Application in FMC.
:return: None
"""
logging.debug("In application() for AccessRules class.")
if action == "add":
app = Applications(fmc=self.fmc)
app.get(name=name)
if "id" in app.__dict__:
if "applications" in self.__dict__:
new_app = {"name": app.name, "id": app.id, "type": app.type}
duplicate = False
if "applications" not in self.applications:
self.__dict__["applications"]["applications"] = []
for obj in self.applications["applications"]:
if obj["name"] == new_app["name"]:
duplicate = True
break
if not duplicate:
self.applications["applications"].append(new_app)
logging.info(
f'Adding "{name}" to applications for this AccessRules.'
)
else:
self.applications = {
"applications": [
{"name": app.name, "id": app.id, "type": app.type}
]
}
logging.info(
f'Adding "{name}" to applications for this AccessRules.'
)
else:
logging.warning(
f'Application: "{name}", ' f"not found. Cannot add to AccessRules."
)
elif action == "addappfilter":
app = ApplicationFilters(fmc=self.fmc)
app.get(name=name)
if "id" in app.__dict__:
if "applicationFilters" in self.__dict__:
new_app = {"name": app.name, "id": app.id, "type": app.type}
duplicate = False
if "applicationFilters" not in self.applications:
self.__dict__["applicationFilters"]["applicationFilters"] = []
for obj in self.applications["applicationFilters"]:
if obj["name"] == new_app["name"]:
duplicate = True
break
if not duplicate:
self.applications["applicationFilters"].append(new_app)
logging.info(
f'Adding "{name}" to applications for this AccessRules.'
)
else:
self.applications = {
"applicationFilters": [
{"name": app.name, "id": app.id, "type": app.type}
]
}
logging.info(
f'Adding "{name}" application filter to applications for this AccessRules.'
)
else:
logging.warning(
f'Application Filter: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "remove":
app = Applications(fmc=self.fmc)
app.get(name=name)
if "id" in app.__dict__:
if "applicationFilters" in self.__dict__:
applications = []
for obj in self.applications["applications"]:
if obj["name"] != name:
applications.append(obj)
self.applications["applicationFilters"] = applications
logging.info(
f'Removed "{name}" from applications for this AccessRules.'
)
else:
logging.info(
"Application doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f"Application, {name}, not found. Cannot remove from AccessRules."
)
elif action == "removeappfilter":
app = ApplicationFilters(fmc=self.fmc)
app.get(name=name)
if "id" in app.__dict__:
if "applications" in self.__dict__:
applications = []
for obj in self.applications["applicationFilters"]:
if obj["name"] != name:
applications.append(obj)
self.applications["applicationFilters"] = applications
logging.info(
f'Removed "{name}" application filter from applications for this AccessRules.'
)
else:
logging.info(
"Application filter doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f"Application filter, {name}, not found. Cannot remove from AccessRules."
)
elif action == "clear":
if "applications" in self.__dict__:
del self.applications
logging.info("All Applications removed from this AccessRules object.")
def urls_info(self, action, name=""):
"""
Add/modify name to URLs field of AccessRules object.
:param action: (str) 'add', 'remove', or 'clear'
:param name: (str) Name of URLs in FMC.
:return: None
"""
logging.debug("In urls() for AccessRules class.")
if action == "add":
urlobj_json = URLs(fmc=self.fmc)
urlobj_json.get(name=name)
if "id" in urlobj_json.__dict__:
item = urlobj_json
else:
item = URLGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "urls" in self.__dict__:
new_url = {"name": item.name, "id": item.id}
duplicate = False
if "objects" not in self.urls:
self.__dict__["urls"]["objects"] = []
for obj in self.urls["objects"]:
if obj["name"] == new_url["name"]:
duplicate = True
break
if not duplicate:
self.urls["objects"].append(new_url)
logging.info(
f'Adding URLs "{name}" to URLs for this AccessRules.'
)
else:
self.urls = {"objects": [{"name": item.name, "id": item.id}]}
logging.info(f'Adding URLs "{name}" to URLs for this AccessRules.')
else:
logging.warning(
f'URL Object or URL Object Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "remove":
urlobj_json = URLs(fmc=self.fmc)
urlobj_json.get(name=name)
if "id" in urlobj_json.__dict__:
item = urlobj_json
else:
item = URLGroups(fmc=self.fmc)
item.get(name=name)
if "id" in item.__dict__:
if "urls" in self.__dict__:
objects = []
for obj in self.urls["objects"]:
if obj["name"] != name:
objects.append(obj)
self.urls["objects"] = objects
logging.info(
f'Removed URLs "{name}" from URLs for this AccessRules.'
)
else:
logging.info(
"URLs doesn't exist for this AccessRules. Nothing to remove."
)
else:
logging.warning(
f'URL Object or URL Object Group: "{name}", '
f"not found. Cannot add to AccessRules."
)
elif action == "clear":
if "urls" in self.__dict__:
del self.urls
logging.info("All URLs removed from this AccessRules object.")
def new_comments(self, action, value):
"""
Add a comment to the comment list
Args:
action (str): Add, remove or clear
value (str): Comment value to add
"""
if action == "add":
self._newComments.append(value)
if action == "remove":
self._newComments.remove(value)
if action == "clear":
self._newComments = []
class Bulk(object):
"""
Send many JSON objects in one API call.
This is specific to the AccessRules() method.
"""
MAX_SIZE_QTY = 1000
MAX_SIZE_IN_BYTES = 2048000
REQUIRED_FOR_POST = []
@property
def URL_SUFFIX(self):
"""
Add the URL suffixes for section, categories, insertBefore and insertAfter.
:return (str): url
"""
url = "?"
if "category" in self.__dict__:
url = f"{url}category={self.category}&"
if "insertBefore" in self.__dict__:
url = f"{url}insertBefore={self.insertBefore}&"
if "insertAfter" in self.__dict__:
url = f"{url}insertAfter={self.insertAfter}&"
if "insertBefore" in self.__dict__ and "insertAfter" in self.__dict__:
logging.warning("ACP rule has both insertBefore and insertAfter params")
if "section" in self.__dict__:
url = f"{url}section={self.section}&"
return url[:-1]
def __init__(self, fmc, url="", **kwargs):
"""
Initialize Bulk object.
:param fmc (object): FMC object
:param url (str): Base URL used for API action.
:param **kwargs: Pass any/all variables for self.
:return: None
"""
logging.debug("In __init__() for Bulk class.")
self.fmc = fmc
self.items = []
self.URL = url
self.parse_kwargs(**kwargs)
def parse_kwargs(self, **kwargs):
"""
Add/modify variables in self.
:return: None
"""
logging.debug("In parse_kwargs() for Bulk class.")
if "category" in kwargs:
self.category = kwargs["category"]
if "insertBefore" in kwargs:
self.insertBefore = kwargs["insertBefore"]
if "insertAfter" in kwargs:
self.insertAfter = kwargs["insertAfter"]
if "section" in kwargs:
self.section = kwargs["section"]
def add(self, item):
"""
:param item: (str) Add JSON string to list of items to send to FMC.
:return: None
"""
self.items.append(item)
logging.info(f"Adding {item} to bulk items list.")
def clear(self):
"""
Clear self.items -- Empty out list of JSON strings to send to FMC.
:return: None
"""
logging.info(f"Clearing bulk items list.")
self.items = []
def post(self):
"""
Send list of self.items to FMC as a bulk import.
:return: (str) requests response from FMC
"""
# Build URL
self.URL = f"{self.URL}{self.URL_SUFFIX}&bulk=true"
# Break up the items into MAX_BULK_POST_SIZE chunks.
chunks = [
self.items[i * self.MAX_SIZE_QTY : (i + 1) * self.MAX_SIZE_QTY]
for i in range(
(len(self.items) + self.MAX_SIZE_QTY - 1) // self.MAX_SIZE_QTY
)
]
# Post the chunks
for item in chunks:
# I'm not sure what to do about the max bytes right now so I'll just throw a warning message.
if sys.getsizeof(item, 0) > self.MAX_SIZE_IN_BYTES:
logging.warning(
f"This chunk of the post is too large. Please submit less items to be bulk posted."
)
response = self.fmc.send_to_api(method="post", url=self.URL, json_data=item)
logging.info(f"Posting to bulk items.")
return response
| {
"repo_name": "daxm/fmcapi",
"path": "fmcapi/api_objects/policy_services/accessrules.py",
"copies": "1",
"size": "63397",
"license": "bsd-3-clause",
"hash": 6600217757393744000,
"line_mean": 42.0393754243,
"line_max": 113,
"alpha_frac": 0.476315914,
"autogenerated": false,
"ratio": 4.956375576577281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.593269149057728,
"avg_score": null,
"num_lines": null
} |
_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"}
class ps_object(object):
literal = 1
access = 0
value = None
def __init__(self, value):
self.value = value
self.type = self.__class__.__name__[3:] + "type"
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
class ps_operator(ps_object):
literal = 0
def __init__(self, name, function):
self.name = name
self.function = function
self.type = self.__class__.__name__[3:] + "type"
def __repr__(self):
return "<operator %s>" % self.name
class ps_procedure(ps_object):
literal = 0
def __repr__(self):
return "<procedure>"
def __str__(self):
psstring = '{'
for i in range(len(self.value)):
if i:
psstring = psstring + ' ' + str(self.value[i])
else:
psstring = psstring + str(self.value[i])
return psstring + '}'
class ps_name(ps_object):
literal = 0
def __str__(self):
if self.literal:
return '/' + self.value
else:
return self.value
class ps_literal(ps_object):
def __str__(self):
return '/' + self.value
class ps_array(ps_object):
def __str__(self):
psstring = '['
for i in range(len(self.value)):
item = self.value[i]
access = _accessstrings[item.access]
if access:
access = ' ' + access
if i:
psstring = psstring + ' ' + str(item) + access
else:
psstring = psstring + str(item) + access
return psstring + ']'
def __repr__(self):
return "<array>"
_type1_pre_eexec_order = [
"FontInfo",
"FontName",
"Encoding",
"PaintType",
"FontType",
"FontMatrix",
"FontBBox",
"UniqueID",
"Metrics",
"StrokeWidth"
]
_type1_fontinfo_order = [
"version",
"Notice",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"isFixedPitch",
"UnderlinePosition",
"UnderlineThickness"
]
_type1_post_eexec_order = [
"Private",
"CharStrings",
"FID"
]
def _type1_item_repr(key, value):
psstring = ""
access = _accessstrings[value.access]
if access:
access = access + ' '
if key == 'CharStrings':
psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value))
elif key == 'Encoding':
psstring = psstring + _type1_Encoding_repr(value, access)
else:
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
return psstring
def _type1_Encoding_repr(encoding, access):
encoding = encoding.value
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
for i in range(256):
name = encoding[i].value
if name != '.notdef':
psstring = psstring + "dup %d /%s put\n" % (i, name)
return psstring + access + "def\n"
def _type1_CharString_repr(charstrings):
items = sorted(charstrings.items())
return 'xxx'
class ps_font(ps_object):
def __str__(self):
psstring = "%d dict dup begin\n" % len(self.value)
for key in _type1_pre_eexec_order:
try:
value = self.value[key]
except KeyError:
pass
else:
psstring = psstring + _type1_item_repr(key, value)
items = sorted(self.value.items())
for key, value in items:
if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
psstring = psstring + _type1_item_repr(key, value)
psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
for key in _type1_post_eexec_order:
try:
value = self.value[key]
except KeyError:
pass
else:
psstring = psstring + _type1_item_repr(key, value)
return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \
8 * (64 * '0' + '\n') + 'cleartomark' + '\n'
def __repr__(self):
return '<font>'
class ps_file(ps_object):
pass
class ps_dict(ps_object):
def __str__(self):
psstring = "%d dict dup begin\n" % len(self.value)
items = sorted(self.value.items())
for key, value in items:
access = _accessstrings[value.access]
if access:
access = access + ' '
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
return psstring + 'end '
def __repr__(self):
return "<dict>"
class ps_mark(ps_object):
def __init__(self):
self.value = 'mark'
self.type = self.__class__.__name__[3:] + "type"
class ps_procmark(ps_object):
def __init__(self):
self.value = 'procmark'
self.type = self.__class__.__name__[3:] + "type"
class ps_null(ps_object):
def __init__(self):
self.type = self.__class__.__name__[3:] + "type"
class ps_boolean(ps_object):
def __str__(self):
if self.value:
return 'true'
else:
return 'false'
class ps_string(ps_object):
def __str__(self):
return "(%s)" % repr(self.value)[1:-1]
class ps_integer(ps_object):
def __str__(self):
return repr(self.value)
class ps_real(ps_object):
def __str__(self):
return repr(self.value)
class PSOperators(object):
def ps_def(self):
obj = self.pop()
name = self.pop()
self.dictstack[-1][name.value] = obj
def ps_bind(self):
proc = self.pop('proceduretype')
self.proc_bind(proc)
self.push(proc)
def proc_bind(self, proc):
for i in range(len(proc.value)):
item = proc.value[i]
if item.type == 'proceduretype':
self.proc_bind(item)
else:
if not item.literal:
try:
obj = self.resolve_name(item.value)
except:
pass
else:
if obj.type == 'operatortype':
proc.value[i] = obj
def ps_exch(self):
if len(self.stack) < 2:
raise RuntimeError('stack underflow')
obj1 = self.pop()
obj2 = self.pop()
self.push(obj1)
self.push(obj2)
def ps_dup(self):
if not self.stack:
raise RuntimeError('stack underflow')
self.push(self.stack[-1])
def ps_exec(self):
obj = self.pop()
if obj.type == 'proceduretype':
self.call_procedure(obj)
else:
self.handle_object(obj)
def ps_count(self):
self.push(ps_integer(len(self.stack)))
def ps_eq(self):
any1 = self.pop()
any2 = self.pop()
self.push(ps_boolean(any1.value == any2.value))
def ps_ne(self):
any1 = self.pop()
any2 = self.pop()
self.push(ps_boolean(any1.value != any2.value))
def ps_cvx(self):
obj = self.pop()
obj.literal = 0
self.push(obj)
def ps_matrix(self):
matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)]
self.push(ps_array(matrix))
def ps_string(self):
num = self.pop('integertype').value
self.push(ps_string('\0' * num))
def ps_type(self):
obj = self.pop()
self.push(ps_string(obj.type))
def ps_store(self):
value = self.pop()
key = self.pop()
name = key.value
for i in range(len(self.dictstack)-1, -1, -1):
if name in self.dictstack[i]:
self.dictstack[i][name] = value
break
self.dictstack[-1][name] = value
def ps_where(self):
name = self.pop()
# XXX
self.push(ps_boolean(0))
def ps_systemdict(self):
self.push(ps_dict(self.dictstack[0]))
def ps_userdict(self):
self.push(ps_dict(self.dictstack[1]))
def ps_currentdict(self):
self.push(ps_dict(self.dictstack[-1]))
def ps_currentfile(self):
self.push(ps_file(self.tokenizer))
def ps_eexec(self):
f = self.pop('filetype').value
f.starteexec()
def ps_closefile(self):
f = self.pop('filetype').value
f.skipwhite()
f.stopeexec()
def ps_cleartomark(self):
obj = self.pop()
while obj != self.mark:
obj = self.pop()
def ps_readstring(self,
ps_boolean=ps_boolean,
len=len):
s = self.pop('stringtype')
oldstr = s.value
f = self.pop('filetype')
#pad = file.value.read(1)
# for StringIO, this is faster
f.value.pos = f.value.pos + 1
newstr = f.value.read(len(oldstr))
s.value = newstr
self.push(s)
self.push(ps_boolean(len(oldstr) == len(newstr)))
def ps_known(self):
key = self.pop()
d = self.pop('dicttype', 'fonttype')
self.push(ps_boolean(key.value in d.value))
def ps_if(self):
proc = self.pop('proceduretype')
if self.pop('booleantype').value:
self.call_procedure(proc)
def ps_ifelse(self):
proc2 = self.pop('proceduretype')
proc1 = self.pop('proceduretype')
if self.pop('booleantype').value:
self.call_procedure(proc1)
else:
self.call_procedure(proc2)
def ps_readonly(self):
obj = self.pop()
if obj.access < 1:
obj.access = 1
self.push(obj)
def ps_executeonly(self):
obj = self.pop()
if obj.access < 2:
obj.access = 2
self.push(obj)
def ps_noaccess(self):
obj = self.pop()
if obj.access < 3:
obj.access = 3
self.push(obj)
def ps_not(self):
obj = self.pop('booleantype', 'integertype')
if obj.type == 'booleantype':
self.push(ps_boolean(not obj.value))
else:
self.push(ps_integer(~obj.value))
def ps_print(self):
str = self.pop('stringtype')
print('PS output --->', str.value)
def ps_anchorsearch(self):
seek = self.pop('stringtype')
s = self.pop('stringtype')
seeklen = len(seek.value)
if s.value[:seeklen] == seek.value:
self.push(ps_string(s.value[seeklen:]))
self.push(seek)
self.push(ps_boolean(1))
else:
self.push(s)
self.push(ps_boolean(0))
def ps_array(self):
num = self.pop('integertype')
array = ps_array([None] * num.value)
self.push(array)
def ps_astore(self):
array = self.pop('arraytype')
for i in range(len(array.value)-1, -1, -1):
array.value[i] = self.pop()
self.push(array)
def ps_load(self):
name = self.pop()
self.push(self.resolve_name(name.value))
def ps_put(self):
obj1 = self.pop()
obj2 = self.pop()
obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype')
tp = obj3.type
if tp == 'arraytype' or tp == 'proceduretype':
obj3.value[obj2.value] = obj1
elif tp == 'dicttype':
obj3.value[obj2.value] = obj1
elif tp == 'stringtype':
index = obj2.value
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:]
def ps_get(self):
obj1 = self.pop()
if obj1.value == "Encoding":
pass
obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype')
tp = obj2.type
if tp in ('arraytype', 'proceduretype'):
self.push(obj2.value[obj1.value])
elif tp in ('dicttype', 'fonttype'):
self.push(obj2.value[obj1.value])
elif tp == 'stringtype':
self.push(ps_integer(ord(obj2.value[obj1.value])))
else:
assert False, "shouldn't get here"
def ps_getinterval(self):
obj1 = self.pop('integertype')
obj2 = self.pop('integertype')
obj3 = self.pop('arraytype', 'stringtype')
tp = obj3.type
if tp == 'arraytype':
self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value]))
elif tp == 'stringtype':
self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value]))
def ps_putinterval(self):
obj1 = self.pop('arraytype', 'stringtype')
obj2 = self.pop('integertype')
obj3 = self.pop('arraytype', 'stringtype')
tp = obj3.type
if tp == 'arraytype':
obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value
elif tp == 'stringtype':
newstr = obj3.value[:obj2.value]
newstr = newstr + obj1.value
newstr = newstr + obj3.value[obj2.value + len(obj1.value):]
obj3.value = newstr
def ps_cvn(self):
self.push(ps_name(self.pop('stringtype').value))
def ps_index(self):
n = self.pop('integertype').value
if n < 0:
raise RuntimeError('index may not be negative')
self.push(self.stack[-1-n])
def ps_for(self):
proc = self.pop('proceduretype')
limit = self.pop('integertype', 'realtype').value
increment = self.pop('integertype', 'realtype').value
i = self.pop('integertype', 'realtype').value
while 1:
if increment > 0:
if i > limit:
break
else:
if i < limit:
break
if type(i) == type(0.0):
self.push(ps_real(i))
else:
self.push(ps_integer(i))
self.call_procedure(proc)
i = i + increment
def ps_forall(self):
proc = self.pop('proceduretype')
obj = self.pop('arraytype', 'stringtype', 'dicttype')
tp = obj.type
if tp == 'arraytype':
for item in obj.value:
self.push(item)
self.call_procedure(proc)
elif tp == 'stringtype':
for item in obj.value:
self.push(ps_integer(ord(item)))
self.call_procedure(proc)
elif tp == 'dicttype':
for key, value in obj.value.items():
self.push(ps_name(key))
self.push(value)
self.call_procedure(proc)
def ps_definefont(self):
font = self.pop('dicttype')
name = self.pop()
font = ps_font(font.value)
self.dictstack[0]['FontDirectory'].value[name.value] = font
self.push(font)
def ps_findfont(self):
name = self.pop()
font = self.dictstack[0]['FontDirectory'].value[name.value]
self.push(font)
def ps_pop(self):
self.pop()
def ps_dict(self):
self.pop('integertype')
self.push(ps_dict({}))
def ps_begin(self):
self.dictstack.append(self.pop('dicttype').value)
def ps_end(self):
if len(self.dictstack) > 2:
del self.dictstack[-1]
else:
raise RuntimeError('dictstack underflow')
notdef = '.notdef'
from fontTools.encodings.StandardEncoding import StandardEncoding
ps_StandardEncoding = list(map(ps_name, StandardEncoding))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/misc/psOperators.py",
"copies": "5",
"size": "12810",
"license": "apache-2.0",
"hash": -5815410608739100000,
"line_mean": 22.8547486034,
"line_max": 99,
"alpha_frac": 0.6371584699,
"autogenerated": false,
"ratio": 2.6417818106826148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5778940280582615,
"avg_score": null,
"num_lines": null
} |
""" Access the cluster.
Easy to use functions to make use of the cluster facilities.
This checks the available slots on the requested queue, creates the
scripts to submit, submits the jobs, and cleans up afterwards.
Example usage::
>>> import qsub
>>> qsub.submit_job('touch /data/hisparc/test', 'job_1', 'express')
"""
import logging
import os
from cax import config
import subprocess
import tempfile
from distutils.spawn import find_executable
def which(program):
"""Check if a command line program is available
An Exception is raised if the program is not available.
:param program: name or program to check for, e.g. 'wget'.
"""
path = find_executable(program)
if not path:
raise Exception('The program %s is not available.' % program)
def submit_job(script, extra=''):
"""Submit a job
:param script: contents of the script to run.
:param name: name for the job.
:param extra: optional extra arguments for the sbatch command.
"""
which('sbatch')
fileobj = create_script(script)
# Effect of the arguments for sbatch:
# http://slurm.schedmd.com/sbatch.html
sbatch = ('sbatch {extra} {script}'
.format(script=fileobj.name,
extra=extra))
try:
result = subprocess.check_output(sbatch,
stderr=subprocess.STDOUT,
shell=True,
timeout=120)
logging.info(result)
except subprocess.TimeoutExpired as e:
logging.error("Process timeout")
except Exception as e:
logging.exception(e)
delete_script(fileobj)
def create_script(script):
"""Create script as temp file to be run on cluster"""
fileobj = tempfile.NamedTemporaryFile(delete=True,
suffix='.sh',
mode='wt',
buffering=1)
fileobj.write(script)
os.chmod(fileobj.name, 0o774)
return fileobj
def delete_script(fileobj):
"""Delete script after submitting to cluster
:param script_path: path to the script to be removed
"""
fileobj.close()
def get_number_in_queue(host=config.get_hostname(), partition=''):
# print (len(get_queue(host, partition)), host, partition)
return len(get_queue(host, partition))
def get_queue(host=config.get_hostname(), partition=''):
"""Get list of jobs in queue"""
if host == "midway-login1":
args = {'partition': 'sandyb',
'user' : config.get_user()}
elif host == 'tegner-login-1':
args = {'partition': 'main',
'user' : 'bobau'}
else:
return []
if partition == '':
command = 'squeue --user={user} -o "%.30j"'.format(**args)
else:
args['partition'] = partition
command = 'squeue --partition={partition} --user={user} -o "%.30j"'.format(**args)
try:
queue = subprocess.check_output(command,
shell=True,
timeout=120)
except subprocess.TimeoutExpired as e:
logging.error("Process timeout")
return []
except Exception as e:
logging.exception(e)
return []
queue_list = queue.rstrip().decode('ascii').split()
if len(queue_list) > 1:
return queue_list[1:]
return []
| {
"repo_name": "XENON1T/cax",
"path": "cax/qsub.py",
"copies": "1",
"size": "3490",
"license": "isc",
"hash": 5392750664664035000,
"line_mean": 26.4803149606,
"line_max": 90,
"alpha_frac": 0.5739255014,
"autogenerated": false,
"ratio": 4.276960784313726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029472709248399053,
"num_lines": 127
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.