repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/io/json/test_normalize.py | 14 | 11514 | import pytest
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat, Index, DataFrame
from pandas.io.json import json_normalize
from pandas.io.json.normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
@pytest.fixture
def state_data():
return [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
class TestJSONNormalize(object):
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({'A': {'A': 1, 'B': 2}})
expected = DataFrame([[1, 2]], columns=['A.A', 'A.B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_')
expected = DataFrame([[1, 2]], columns=['A_A', 'A_B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3')
expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']],
sep='_')
expected = Index(['name', 'pop',
'country', 'states_name']).sort_values()
assert result.columns.sort_values().equals(expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
with pytest.raises(ValueError):
json_normalize(data, 'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(object):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
assert result == expected
def test_json_normalize_errors(self):
# GH14583: If meta keys are not always present
# a new option to set errors='ignore' has been implemented
i = {
"Trades": [{
"general": {
"tradeid": 100,
"trade_version": 1,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}, {
"general": {
"tradeid": 100,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}
]
}
j = json_normalize(data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='ignore')
expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
'price': {0: '0', 1: '0', 2: '0', 3: '0'},
'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
assert j.fillna('').to_dict() == expected
pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
| mit |
nfallen/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/mux_client_for_testing.py | 457 | 25761 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing mux extension.
This code should be independent from mod_pywebsocket. See the comment of
client_for_testing.py.
NOTE: This code is far from robust like client_for_testing.py.
"""
import Queue
import base64
import collections
import email
import email.parser
import logging
import math
import os
import random
import socket
import struct
import threading
from mod_pywebsocket import util
from test import client_for_testing
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
class _ControlBlock:
def __init__(self, opcode):
self.opcode = opcode
def _parse_handshake_response(response):
status_line, header_lines = response.split('\r\n', 1)
words = status_line.split(' ')
if len(words) < 3:
raise ValueError('Bad Status-Line syntax %r' % status_line)
[version, response_code] = words[:2]
if version != 'HTTP/1.1':
raise ValueError('Bad response version %r' % version)
if response_code != '101':
raise ValueError('Bad response code %r ' % response_code)
headers = email.parser.Parser().parsestr(header_lines)
return headers
def _parse_channel_id(data, offset=0):
length = len(data)
remaining = length - offset
if remaining <= 0:
raise Exception('No channel id found')
channel_id = ord(data[offset])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining < 4:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!L',
data[offset:offset+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining < 3:
raise Exception('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', data[offset+1:offset+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining < 2:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!H', data[offset:offset+2])[0] & 0x3fff
channel_id_length = 2
return channel_id, channel_id_length
def _parse_number(data, offset=0):
first_byte = ord(data[offset])
if (first_byte & 0x80) != 0:
raise Exception('The MSB of number field must be unset')
first_byte = first_byte & 0x7f
if first_byte == 127:
if offset + 9 > len(data):
raise Exception('Invalid number')
return struct.unpack('!Q', data[offset+1:offset+9])[0], 9
if first_byte == 126:
if offset + 3 > len(data):
raise Exception('Invalid number')
return struct.unpack('!H', data[offset+1:offset+3])[0], 3
return first_byte, 1
def _parse_size_and_contents(data, offset=0):
size, advance = _parse_number(data, offset)
start_position = offset + advance
end_position = start_position + size
if len(data) < end_position:
raise Exception('Invalid size of control block (%d < %d)' % (
len(data), end_position))
return data[start_position:end_position], size + advance
def _parse_control_blocks(data):
blocks = []
length = len(data)
pos = 0
while pos < length:
first_byte = ord(data[pos])
pos += 1
opcode = (first_byte >> 5) & 0x7
block = _ControlBlock(opcode)
# TODO(bashi): Support more opcode
if opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
block.encode = first_byte & 3
block.rejected = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
encoded_handshake, advance = _parse_size_and_contents(data, pos)
block.encoded_handshake = encoded_handshake
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
block.mux_error = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
reason, advance = _parse_size_and_contents(data, pos)
if len(reason) == 0:
block.drop_code = None
block.drop_message = ''
elif len(reason) >= 2:
block.drop_code = struct.unpack('!H', reason[:2])[0]
block.drop_message = reason[2:]
else:
raise Exception('Invalid DropChannel')
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
send_quota, advance = _parse_number(data, pos)
block.send_quota = send_quota
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
fallback = first_byte & 1
slots, advance = _parse_number(data, pos)
pos += advance
send_quota, advance = _parse_number(data, pos)
pos += advance
if fallback == 1 and (slots != 0 or send_quota != 0):
raise Exception('slots and send_quota must be zero if F bit '
'is set')
block.fallback = fallback
block.slots = slots
block.send_quota = send_quota
blocks.append(block)
else:
raise Exception(
'Unsupported mux opcode %d received' % opcode)
return blocks
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
if number <= 125:
return chr(number)
elif number < (1 << 16):
return chr(0x7e) + struct.pack('!H', number)
elif number < (1 << 63):
return chr(0x7f) + struct.pack('!Q', number)
else:
raise Exception('Invalid number')
def _create_add_channel_request(channel_id, encoded_handshake,
encoding=0):
length = len(encoded_handshake)
handshake_length = _encode_number(length)
first_byte = (_MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding
return (chr(first_byte) + _encode_channel_id(channel_id) +
handshake_length + encoded_handshake)
def _create_flow_control(channel_id, replenished_quota):
first_byte = (_MUX_OPCODE_FLOW_CONTROL << 5)
return (chr(first_byte) + _encode_channel_id(channel_id) +
_encode_number(replenished_quota))
class _MuxReaderThread(threading.Thread):
"""Mux reader thread.
Reads frames and passes them to the mux client. This thread accesses
private functions/variables of the mux client.
"""
def __init__(self, mux):
threading.Thread.__init__(self)
self.setDaemon(True)
self._mux = mux
self._stop_requested = False
def _receive_message(self):
first_opcode = None
pending_payload = []
while not self._stop_requested:
fin, rsv1, rsv2, rsv3, opcode, payload_length = (
client_for_testing.read_frame_header(self._mux._socket))
if not first_opcode:
if opcode == client_for_testing.OPCODE_TEXT:
raise Exception('Received a text message on physical '
'connection')
if opcode == client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received an intermediate frame but '
'fragmentation was not started')
if (opcode == client_for_testing.OPCODE_BINARY or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_CLOSE):
first_opcode = opcode
else:
raise Exception('Received an undefined opcode frame: %d' %
opcode)
elif opcode != client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received a new opcode before '
'terminating fragmentation')
payload = client_for_testing.receive_bytes(
self._mux._socket, payload_length)
if self._mux._incoming_frame_filter is not None:
payload = self._mux._incoming_frame_filter.filter(payload)
pending_payload.append(payload)
if fin:
break
if self._stop_requested:
return None, None
message = ''.join(pending_payload)
return first_opcode, message
def request_stop(self):
self._stop_requested = True
def run(self):
try:
while not self._stop_requested:
# opcode is OPCODE_BINARY or control opcodes when a message
# is succesfully received.
opcode, message = self._receive_message()
if not opcode:
return
if opcode == client_for_testing.OPCODE_BINARY:
channel_id, advance = _parse_channel_id(message)
self._mux._dispatch_frame(channel_id, message[advance:])
else:
self._mux._process_control_message(opcode, message)
finally:
self._mux._notify_reader_done()
class _InnerFrame(object):
def __init__(self, fin, rsv1, rsv2, rsv3, opcode, payload):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
class _LogicalChannelData(object):
def __init__(self):
self.queue = Queue.Queue()
self.send_quota = 0
self.receive_quota = 0
class MuxClient(object):
"""WebSocket mux client.
Note that this class is NOT thread-safe. Do not access an instance of this
class from multiple threads at a same time.
"""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
self._options.enable_mux()
self._stream = None
self._socket = None
self._handshake = client_for_testing.WebSocketHandshake(self._options)
self._incoming_frame_filter = None
self._outgoing_frame_filter = None
self._is_active = False
self._read_thread = None
self._control_blocks_condition = threading.Condition()
self._control_blocks = []
self._channel_slots = collections.deque()
self._logical_channels_condition = threading.Condition();
self._logical_channels = {}
self._timeout = 2
self._physical_connection_close_event = None
self._physical_connection_close_message = None
def _parse_inner_frame(self, data):
if len(data) == 0:
raise Exception('Invalid encapsulated frame received')
first_byte = ord(data[0])
fin = (first_byte << 7) & 1
rsv1 = (first_byte << 6) & 1
rsv2 = (first_byte << 5) & 1
rsv3 = (first_byte << 4) & 1
opcode = first_byte & 0xf
if self._outgoing_frame_filter:
payload = self._outgoing_frame_filter.filter(
data[1:])
else:
payload = data[1:]
return _InnerFrame(fin, rsv1, rsv2, rsv3, opcode, payload)
def _process_mux_control_blocks(self):
for block in self._control_blocks:
if block.opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
# AddChannelResponse will be handled in add_channel().
continue
elif block.opcode == _MUX_OPCODE_FLOW_CONTROL:
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
raise Exception('Invalid flow control received for '
'channel id %d' % block.channel_id)
self._logical_channels[block.channel_id].send_quota += (
block.send_quota)
self._logical_channels_condition.notify()
finally:
self._logical_channels_condition.release()
elif block.opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
self._channel_slots.extend([block.send_quota] * block.slots)
def _dispatch_frame(self, channel_id, payload):
if channel_id == _CONTROL_CHANNEL_ID:
try:
self._control_blocks_condition.acquire()
self._control_blocks += _parse_control_blocks(payload)
self._process_mux_control_blocks()
self._control_blocks_condition.notify()
finally:
self._control_blocks_condition.release()
else:
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise Exception('Received logical frame on channel id '
'%d, which is not established' %
channel_id)
inner_frame = self._parse_inner_frame(payload)
self._logical_channels[channel_id].receive_quota -= (
len(inner_frame.payload))
if self._logical_channels[channel_id].receive_quota < 0:
raise Exception('The server violates quota on '
'channel id %d' % channel_id)
finally:
self._logical_channels_condition.release()
self._logical_channels[channel_id].queue.put(inner_frame)
def _process_control_message(self, opcode, message):
# Ping/Pong are not supported.
if opcode == client_for_testing.OPCODE_CLOSE:
self._physical_connection_close_message = message
if self._is_active:
self._stream.send_close(
code=client_for_testing.STATUS_NORMAL_CLOSURE, reason='')
self._read_thread.request_stop()
if self._physical_connection_close_event:
self._physical_connection_close_event.set()
def _notify_reader_done(self):
self._logger.debug('Read thread terminated.')
self.close_socket()
def _assert_channel_slot_available(self):
try:
self._control_blocks_condition.acquire()
if len(self._channel_slots) == 0:
# Wait once
self._control_blocks_condition.wait(timeout=self._timeout)
finally:
self._control_blocks_condition.release()
if len(self._channel_slots) == 0:
raise Exception('Failed to receive NewChannelSlot')
def _assert_send_quota_available(self, channel_id):
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota == 0:
# Wait once
self._logical_channels_condition.wait(timeout=self._timeout)
finally:
self._logical_channels_condition.release()
if self._logical_channels[channel_id].send_quota == 0:
raise Exception('Failed to receive FlowControl for channel id %d' %
channel_id)
def connect(self):
self._socket = client_for_testing.connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = client_for_testing.WebSocketStream(
self._socket, self._handshake)
self._logical_channels[_DEFAULT_CHANNEL_ID] = _LogicalChannelData()
self._read_thread = _MuxReaderThread(self)
self._read_thread.start()
self._assert_channel_slot_available()
self._assert_send_quota_available(_DEFAULT_CHANNEL_ID)
self._is_active = True
self._logger.info('Connection established')
def add_channel(self, channel_id, options):
if not self._is_active:
raise Exception('Mux client is not active')
if channel_id in self._logical_channels:
raise Exception('Channel id %d already exists' % channel_id)
try:
send_quota = self._channel_slots.popleft()
except IndexError, e:
raise Exception('No channel slots: %r' % e)
# Create AddChannel request
request_line = 'GET %s HTTP/1.1\r\n' % options.resource
fields = []
if options.server_port == client_for_testing.DEFAULT_PORT:
fields.append('Host: %s\r\n' % options.server_host.lower())
else:
fields.append('Host: %s:%d\r\n' % (options.server_host.lower(),
options.server_port))
fields.append('Origin: %s\r\n' % options.origin.lower())
fields.append('Connection: Upgrade\r\n')
if len(options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(options.extensions))
handshake = request_line + ''.join(fields) + '\r\n'
add_channel_request = _create_add_channel_request(
channel_id, handshake)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + add_channel_request
self._stream.send_binary(payload)
# Wait AddChannelResponse
self._logger.debug('Waiting AddChannelResponse for the request...')
response = None
try:
self._control_blocks_condition.acquire()
while True:
for block in self._control_blocks:
if block.opcode != _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
continue
if block.channel_id == channel_id:
response = block
self._control_blocks.remove(response)
break
if response:
break
self._control_blocks_condition.wait(self._timeout)
if not self._is_active:
raise Exception('AddChannelRequest timed out')
finally:
self._control_blocks_condition.release()
# Validate AddChannelResponse
if response.rejected:
raise Exception('The server rejected AddChannelRequest')
fields = _parse_handshake_response(response.encoded_handshake)
# Should we reject when Upgrade, Connection, or Sec-WebSocket-Accept
# headers exist?
self._logical_channels_condition.acquire()
self._logical_channels[channel_id] = _LogicalChannelData()
self._logical_channels[channel_id].send_quota = send_quota
self._logical_channels_condition.release()
self._logger.debug('Logical channel %d established' % channel_id)
def _check_logical_channel_is_opened(self, channel_id):
if not self._is_active:
raise Exception('Mux client is not active')
if not channel_id in self._logical_channels:
raise Exception('Logical channel %d is not established.')
def drop_channel(self, channel_id):
# TODO(bashi): Implement
pass
def send_flow_control(self, channel_id, replenished_quota):
self._check_logical_channel_is_opened(channel_id)
flow_control = _create_flow_control(channel_id, replenished_quota)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + flow_control
# Replenish receive quota
try:
self._logical_channels_condition.acquire()
self._logical_channels[channel_id].receive_quota += (
replenished_quota)
finally:
self._logical_channels_condition.release()
self._stream.send_binary(payload)
def send_message(self, channel_id, message, end=True, binary=False):
self._check_logical_channel_is_opened(channel_id)
if binary:
first_byte = (end << 7) | client_for_testing.OPCODE_BINARY
else:
first_byte = (end << 7) | client_for_testing.OPCODE_TEXT
message = message.encode('utf-8')
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota < len(message):
raise Exception('Send quota violation: %d < %d' % (
self._logical_channels[channel_id].send_quota,
len(message)))
self._logical_channels[channel_id].send_quota -= len(message)
finally:
self._logical_channels_condition.release()
payload = _encode_channel_id(channel_id) + chr(first_byte) + message
self._stream.send_binary(payload)
def assert_receive(self, channel_id, payload, binary=False):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if binary:
opcode = client_for_testing.OPCODE_BINARY
else:
opcode = client_for_testing.OPCODE_TEXT
if inner_frame.opcode != opcode:
raise Exception('Unexpected opcode received (%r != %r)' %
(expected_opcode, inner_frame.opcode))
if inner_frame.payload != payload:
raise Exception('Unexpected payload received')
def send_close(self, channel_id, code=None, reason=''):
self._check_logical_channel_is_opened(channel_id)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
first_byte = (1 << 7) | client_for_testing.OPCODE_CLOSE
payload = _encode_channel_id(channel_id) + chr(first_byte) + body
self._stream.send_binary(payload)
def assert_receive_close(self, channel_id):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if inner_frame.opcode != client_for_testing.OPCODE_CLOSE:
raise Exception('Didn\'t receive close frame')
def send_physical_connection_close(self, code=None, reason=''):
self._physical_connection_close_event = threading.Event()
self._stream.send_close(code, reason)
# This method can be used only after calling
# send_physical_connection_close().
def assert_physical_connection_receive_close(
self, code=client_for_testing.STATUS_NORMAL_CLOSURE, reason=''):
self._physical_connection_close_event.wait(timeout=self._timeout)
if (not self._physical_connection_close_event.isSet() or
not self._physical_connection_close_message):
raise Exception('Didn\'t receive closing handshake')
def close_socket(self):
self._is_active = False
self._socket.close()
| mpl-2.0 |
vivianli32/TravelConnect | flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| mit |
cnamejj/PyProc | regentest/root_devices.py | 1 | 1041 | #!/usr/bin/env python
"""Handle records from /proc/devices data files"""
import regentest as RG
import ProcHandlers as PH
PFC = PH.ProcFieldConstants
# ---
def re_root_devices(inprecs):
"""Iterate through parsed records and re-generate data file"""
__dtype = "unknown"
__break = ""
__type_pref = dict()
__type_pref["unknown"] = "Unknown"
__type_pref["character"] = "Character"
__type_pref["block"] = "Block"
__head = "{lb:s}{dt:s} devices:"
__template = "{major:3d} {name:s}"
for __hilit in inprecs:
__ff = inprecs.field
if __dtype != __ff[PFC.F_DEVICE_TYPE]:
__dtype = __ff[PFC.F_DEVICE_TYPE]
print __head.format(dt=__type_pref[__dtype], lb=__break)
__break = "\n"
#...+....1....+....2....+....3....+....4....+....5....+....6....+....7....+....8
print __template.format(major=__ff[PFC.F_MAJOR_DEV],
name=__ff[PFC.F_DEVICE_NAME]
)
RG.RECREATOR[PH.GET_HANDLER("/proc/devices")] = re_root_devices
| gpl-2.0 |
lixiangning888/whole_project | modules/signatures/antisandbox_cuckoo.py | 3 | 1505 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Accuvant, Inc. (bspengler@accuvant.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class AntiCuckoo(Signature):
name = "antisandbox_cuckoo"
description = "使用反Cuckoo系统检测技术"
severity = 3
weight = 3
categories = ["anti-sandbox"]
authors = ["Accuvant"]
minimum = "1.3"
evented = True
filter_categories = set(["__notification__"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
def on_call(self, call, process):
subcategory = self.check_argument_call(call,
api="__anomaly__",
name="Subcategory",
pattern="anticuckoo")
if subcategory:
return True
| lgpl-3.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Dynamic_Shear_Behaviour/Frictional_SDOF_With_Damping/c_t_10/NonLinHardShear/Displacement_Response.py | 12 | 2048 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Displacement.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(times,disp,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Frictional_SDOF_freeVibration.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Plot the figure. Add labels and titles.
plt.plot(times,disp,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Displacement_Response.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
jeremiahyan/odoo | addons/product/models/product_template.py | 1 | 61070 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import itertools
import logging
from collections import defaultdict
from odoo import api, fields, models, tools, _, SUPERUSER_ID
from odoo.exceptions import ValidationError, RedirectWarning, UserError
from odoo.osv import expression
_logger = logging.getLogger(__name__)
class ProductTemplate(models.Model):
_name = "product.template"
_inherit = ['mail.thread', 'mail.activity.mixin', 'image.mixin']
_description = "Product Template"
_order = "priority desc, name"
@tools.ormcache()
def _get_default_category_id(self):
# Deletion forbidden (at least through unlink)
return self.env.ref('product.product_category_all')
@tools.ormcache()
def _get_default_uom_id(self):
# Deletion forbidden (at least through unlink)
return self.env.ref('uom.product_uom_unit')
def _read_group_categ_id(self, categories, domain, order):
category_ids = self.env.context.get('default_categ_id')
if not category_ids and self.env.context.get('group_expand'):
category_ids = categories._search([], order=order, access_rights_uid=SUPERUSER_ID)
return categories.browse(category_ids)
name = fields.Char('Name', index=True, required=True, translate=True)
sequence = fields.Integer('Sequence', default=1, help='Gives the sequence order when displaying a product list')
description = fields.Html(
'Description', translate=True)
description_purchase = fields.Text(
'Purchase Description', translate=True)
description_sale = fields.Text(
'Sales Description', translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sales Order, Delivery Order and Customer Invoice/Credit Note")
type = fields.Selection([
('consu', 'Consumable'),
('service', 'Service')], string='Product Type', default='consu', required=True,
help='A storable product is a product for which you manage stock. The Inventory app has to be installed.\n'
'A consumable product is a product for which stock is not managed.\n'
'A service is a non-material product you provide.')
categ_id = fields.Many2one(
'product.category', 'Product Category',
change_default=True, default=_get_default_category_id, group_expand='_read_group_categ_id',
required=True, help="Select category for the current product")
currency_id = fields.Many2one(
'res.currency', 'Currency', compute='_compute_currency_id')
cost_currency_id = fields.Many2one(
'res.currency', 'Cost Currency', compute='_compute_cost_currency_id')
# price fields
# price: total template price, context dependent (partner, pricelist, quantity)
price = fields.Float(
'Price', compute='_compute_template_price', inverse='_set_template_price',
digits='Product Price')
# list_price: catalog price, user defined
list_price = fields.Float(
'Sales Price', default=1.0,
digits='Product Price',
help="Price at which the product is sold to customers.")
# lst_price: catalog price for template, but including extra for variants
lst_price = fields.Float(
'Public Price', related='list_price', readonly=False,
digits='Product Price')
standard_price = fields.Float(
'Cost', compute='_compute_standard_price',
inverse='_set_standard_price', search='_search_standard_price',
digits='Product Price', groups="base.group_user",
help="""In Standard Price & AVCO: value of the product (automatically computed in AVCO).
In FIFO: value of the last unit that left the stock (automatically computed).
Used to value the product when the purchase cost is not known (e.g. inventory adjustment).
Used to compute margins on sale orders.""")
volume = fields.Float(
'Volume', compute='_compute_volume', inverse='_set_volume', digits='Volume', store=True)
volume_uom_name = fields.Char(string='Volume unit of measure label', compute='_compute_volume_uom_name')
weight = fields.Float(
'Weight', compute='_compute_weight', digits='Stock Weight',
inverse='_set_weight', store=True)
weight_uom_name = fields.Char(string='Weight unit of measure label', compute='_compute_weight_uom_name')
sale_ok = fields.Boolean('Can be Sold', default=True)
purchase_ok = fields.Boolean('Can be Purchased', default=True)
pricelist_id = fields.Many2one(
'product.pricelist', 'Pricelist', store=False,
help='Technical field. Used for searching on pricelists, not stored in database.')
uom_id = fields.Many2one(
'uom.uom', 'Unit of Measure',
default=_get_default_uom_id, required=True,
help="Default unit of measure used for all stock operations.")
uom_name = fields.Char(string='Unit of Measure Name', related='uom_id.name', readonly=True)
uom_po_id = fields.Many2one(
'uom.uom', 'Purchase Unit of Measure',
default=_get_default_uom_id, required=True,
help="Default unit of measure used for purchase orders. It must be in the same category as the default unit of measure.")
company_id = fields.Many2one(
'res.company', 'Company', index=1)
packaging_ids = fields.One2many(
'product.packaging', string="Product Packages", compute="_compute_packaging_ids", inverse="_set_packaging_ids",
help="Gives the different ways to package the same product.")
seller_ids = fields.One2many('product.supplierinfo', 'product_tmpl_id', 'Vendors', depends_context=('company',), help="Define vendor pricelists.")
variant_seller_ids = fields.One2many('product.supplierinfo', 'product_tmpl_id')
active = fields.Boolean('Active', default=True, help="If unchecked, it will allow you to hide the product without removing it.")
color = fields.Integer('Color Index')
is_product_variant = fields.Boolean(string='Is a product variant', compute='_compute_is_product_variant')
attribute_line_ids = fields.One2many('product.template.attribute.line', 'product_tmpl_id', 'Product Attributes', copy=True)
valid_product_template_attribute_line_ids = fields.Many2many('product.template.attribute.line',
compute="_compute_valid_product_template_attribute_line_ids", string='Valid Product Attribute Lines', help="Technical compute")
product_variant_ids = fields.One2many('product.product', 'product_tmpl_id', 'Products', required=True)
# performance: product_variant_id provides prefetching on the first product variant only
product_variant_id = fields.Many2one('product.product', 'Product', compute='_compute_product_variant_id')
product_variant_count = fields.Integer(
'# Product Variants', compute='_compute_product_variant_count')
# related to display product product information if is_product_variant
barcode = fields.Char('Barcode', compute='_compute_barcode', inverse='_set_barcode', search='_search_barcode')
default_code = fields.Char(
'Internal Reference', compute='_compute_default_code',
inverse='_set_default_code', store=True)
pricelist_item_count = fields.Integer("Number of price rules", compute="_compute_item_count")
can_image_1024_be_zoomed = fields.Boolean("Can Image 1024 be zoomed", compute='_compute_can_image_1024_be_zoomed', store=True)
has_configurable_attributes = fields.Boolean("Is a configurable product", compute='_compute_has_configurable_attributes', store=True)
priority = fields.Selection([
('0', 'Normal'),
('1', 'Favorite'),
], default='0', string="Favorite")
def _compute_item_count(self):
for template in self:
# Pricelist item count counts the rules applicable on current template or on its variants.
template.pricelist_item_count = template.env['product.pricelist.item'].search_count([
'|', ('product_tmpl_id', '=', template.id), ('product_id', 'in', template.product_variant_ids.ids)])
@api.depends('image_1920', 'image_1024')
def _compute_can_image_1024_be_zoomed(self):
for template in self:
template.can_image_1024_be_zoomed = template.image_1920 and tools.is_image_size_above(template.image_1920, template.image_1024)
@api.depends('attribute_line_ids', 'attribute_line_ids.value_ids', 'attribute_line_ids.attribute_id.create_variant')
def _compute_has_configurable_attributes(self):
"""A product is considered configurable if:
- It has dynamic attributes
- It has any attribute line with at least 2 attribute values configured
"""
for product in self:
product.has_configurable_attributes = product.has_dynamic_attributes() or any(len(ptal.value_ids) >= 2 for ptal in product.attribute_line_ids)
@api.depends('product_variant_ids')
def _compute_product_variant_id(self):
for p in self:
p.product_variant_id = p.product_variant_ids[:1].id
@api.depends('company_id')
def _compute_currency_id(self):
main_company = self.env['res.company']._get_main_company()
for template in self:
template.currency_id = template.company_id.sudo().currency_id.id or main_company.currency_id.id
@api.depends_context('company')
def _compute_cost_currency_id(self):
self.cost_currency_id = self.env.company.currency_id.id
def _compute_template_price(self):
prices = self._compute_template_price_no_inverse()
for template in self:
template.price = prices.get(template.id, 0.0)
def _compute_template_price_no_inverse(self):
"""The _compute_template_price writes the 'list_price' field with an inverse method
This method allows computing the price without writing the 'list_price'
"""
prices = {}
pricelist_id_or_name = self._context.get('pricelist')
if pricelist_id_or_name:
pricelist = None
partner = self.env.context.get('partner')
quantity = self.env.context.get('quantity', 1.0)
# Support context pricelists specified as list, display_name or ID for compatibility
if isinstance(pricelist_id_or_name, list):
pricelist_id_or_name = pricelist_id_or_name[0]
if isinstance(pricelist_id_or_name, str):
pricelist_data = self.env['product.pricelist'].name_search(pricelist_id_or_name, operator='=', limit=1)
if pricelist_data:
pricelist = self.env['product.pricelist'].browse(pricelist_data[0][0])
elif isinstance(pricelist_id_or_name, int):
pricelist = self.env['product.pricelist'].browse(pricelist_id_or_name)
if pricelist:
quantities = [quantity] * len(self)
partners = [partner] * len(self)
prices = pricelist.get_products_price(self, quantities, partners)
return prices
def _set_template_price(self):
if self._context.get('uom'):
for template in self:
value = self.env['uom.uom'].browse(self._context['uom'])._compute_price(template.price, template.uom_id)
template.write({'list_price': value})
else:
self.write({'list_price': self.price})
@api.depends_context('company')
@api.depends('product_variant_ids', 'product_variant_ids.standard_price')
def _compute_standard_price(self):
# Depends on force_company context because standard_price is company_dependent
# on the product_product
unique_variants = self.filtered(lambda template: len(template.product_variant_ids) == 1)
for template in unique_variants:
template.standard_price = template.product_variant_ids.standard_price
for template in (self - unique_variants):
template.standard_price = 0.0
def _set_standard_price(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.standard_price = template.standard_price
def _search_standard_price(self, operator, value):
products = self.env['product.product'].search([('standard_price', operator, value)], limit=None)
return [('id', 'in', products.mapped('product_tmpl_id').ids)]
@api.depends('product_variant_ids', 'product_variant_ids.volume')
def _compute_volume(self):
unique_variants = self.filtered(lambda template: len(template.product_variant_ids) == 1)
for template in unique_variants:
template.volume = template.product_variant_ids.volume
for template in (self - unique_variants):
template.volume = 0.0
def _set_volume(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.volume = template.volume
@api.depends('product_variant_ids', 'product_variant_ids.weight')
def _compute_weight(self):
unique_variants = self.filtered(lambda template: len(template.product_variant_ids) == 1)
for template in unique_variants:
template.weight = template.product_variant_ids.weight
for template in (self - unique_variants):
template.weight = 0.0
def _compute_is_product_variant(self):
self.is_product_variant = False
@api.depends('product_variant_ids.barcode')
def _compute_barcode(self):
self.barcode = False
for template in self:
if len(template.product_variant_ids) == 1:
template.barcode = template.product_variant_ids.barcode
def _search_barcode(self, operator, value):
templates = self.with_context(active_test=False).search([('product_variant_ids.barcode', operator, value)])
return [('id', 'in', templates.ids)]
def _set_barcode(self):
if len(self.product_variant_ids) == 1:
self.product_variant_ids.barcode = self.barcode
@api.model
def _get_weight_uom_id_from_ir_config_parameter(self):
""" Get the unit of measure to interpret the `weight` field. By default, we considerer
that weights are expressed in kilograms. Users can configure to express them in pounds
by adding an ir.config_parameter record with "product.product_weight_in_lbs" as key
and "1" as value.
"""
product_weight_in_lbs_param = self.env['ir.config_parameter'].sudo().get_param('product.weight_in_lbs')
if product_weight_in_lbs_param == '1':
return self.env.ref('uom.product_uom_lb')
else:
return self.env.ref('uom.product_uom_kgm')
@api.model
def _get_length_uom_id_from_ir_config_parameter(self):
""" Get the unit of measure to interpret the `length`, 'width', 'height' field.
By default, we considerer that length are expressed in millimeters. Users can configure
to express them in feet by adding an ir.config_parameter record with "product.volume_in_cubic_feet"
as key and "1" as value.
"""
product_length_in_feet_param = self.env['ir.config_parameter'].sudo().get_param('product.volume_in_cubic_feet')
if product_length_in_feet_param == '1':
return self.env.ref('uom.product_uom_foot')
else:
return self.env.ref('uom.product_uom_millimeter')
@api.model
def _get_volume_uom_id_from_ir_config_parameter(self):
""" Get the unit of measure to interpret the `volume` field. By default, we consider
that volumes are expressed in cubic meters. Users can configure to express them in cubic feet
by adding an ir.config_parameter record with "product.volume_in_cubic_feet" as key
and "1" as value.
"""
product_length_in_feet_param = self.env['ir.config_parameter'].sudo().get_param('product.volume_in_cubic_feet')
if product_length_in_feet_param == '1':
return self.env.ref('uom.product_uom_cubic_foot')
else:
return self.env.ref('uom.product_uom_cubic_meter')
@api.model
def _get_weight_uom_name_from_ir_config_parameter(self):
return self._get_weight_uom_id_from_ir_config_parameter().display_name
@api.model
def _get_length_uom_name_from_ir_config_parameter(self):
return self._get_length_uom_id_from_ir_config_parameter().display_name
@api.model
def _get_volume_uom_name_from_ir_config_parameter(self):
return self._get_volume_uom_id_from_ir_config_parameter().display_name
def _compute_weight_uom_name(self):
self.weight_uom_name = self._get_weight_uom_name_from_ir_config_parameter()
def _compute_volume_uom_name(self):
self.volume_uom_name = self._get_volume_uom_name_from_ir_config_parameter()
def _set_weight(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.weight = template.weight
@api.depends('product_variant_ids.product_tmpl_id')
def _compute_product_variant_count(self):
for template in self:
template.product_variant_count = len(template.product_variant_ids)
@api.depends('product_variant_ids', 'product_variant_ids.default_code')
def _compute_default_code(self):
unique_variants = self.filtered(lambda template: len(template.product_variant_ids) == 1)
for template in unique_variants:
template.default_code = template.product_variant_ids.default_code
for template in (self - unique_variants):
template.default_code = False
def _set_default_code(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.default_code = template.default_code
@api.depends('product_variant_ids', 'product_variant_ids.packaging_ids')
def _compute_packaging_ids(self):
for p in self:
if len(p.product_variant_ids) == 1:
p.packaging_ids = p.product_variant_ids.packaging_ids
else:
p.packaging_ids = False
def _set_packaging_ids(self):
for p in self:
if len(p.product_variant_ids) == 1:
p.product_variant_ids.packaging_ids = p.packaging_ids
@api.constrains('uom_id', 'uom_po_id')
def _check_uom(self):
if any(template.uom_id and template.uom_po_id and template.uom_id.category_id != template.uom_po_id.category_id for template in self):
raise ValidationError(_('The default Unit of Measure and the purchase Unit of Measure must be in the same category.'))
@api.onchange('uom_id')
def _onchange_uom_id(self):
if self.uom_id:
self.uom_po_id = self.uom_id.id
@api.onchange('uom_po_id')
def _onchange_uom(self):
if self.uom_id and self.uom_po_id and self.uom_id.category_id != self.uom_po_id.category_id:
self.uom_po_id = self.uom_id
@api.onchange('type')
def _onchange_type(self):
# Do nothing but needed for inheritance
return {}
@api.model_create_multi
def create(self, vals_list):
''' Store the initial standard price in order to be able to retrieve the cost of a product template for a given date'''
templates = super(ProductTemplate, self).create(vals_list)
if "create_product_product" not in self._context:
templates._create_variant_ids()
# This is needed to set given values to first variant after creation
for template, vals in zip(templates, vals_list):
related_vals = {}
if vals.get('barcode'):
related_vals['barcode'] = vals['barcode']
if vals.get('default_code'):
related_vals['default_code'] = vals['default_code']
if vals.get('standard_price'):
related_vals['standard_price'] = vals['standard_price']
if vals.get('volume'):
related_vals['volume'] = vals['volume']
if vals.get('weight'):
related_vals['weight'] = vals['weight']
# Please do forward port
if vals.get('packaging_ids'):
related_vals['packaging_ids'] = vals['packaging_ids']
if related_vals:
template.write(related_vals)
return templates
def write(self, vals):
if 'uom_id' in vals or 'uom_po_id' in vals:
uom_id = self.env['uom.uom'].browse(vals.get('uom_id')) or self.uom_id
uom_po_id = self.env['uom.uom'].browse(vals.get('uom_po_id')) or self.uom_po_id
if uom_id and uom_po_id and uom_id.category_id != uom_po_id.category_id:
vals['uom_po_id'] = uom_id.id
res = super(ProductTemplate, self).write(vals)
if 'attribute_line_ids' in vals or (vals.get('active') and len(self.product_variant_ids) == 0):
self._create_variant_ids()
if 'active' in vals and not vals.get('active'):
self.with_context(active_test=False).mapped('product_variant_ids').write({'active': vals.get('active')})
if 'image_1920' in vals:
self.env['product.product'].invalidate_cache(fnames=[
'image_1920',
'image_1024',
'image_512',
'image_256',
'image_128',
'can_image_1024_be_zoomed',
])
# Touch all products that will fall back on the template field
# This is done because __last_update is used to compute the 'unique' SHA in image URLs
# for making sure that images are not retrieved from the browser cache after a change
# Performance discussion outcome:
# Actually touch all variants to avoid using filtered on the image_variant_1920 field
self.product_variant_ids.write({})
return res
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
# TDE FIXME: should probably be copy_data
self.ensure_one()
if default is None:
default = {}
if 'name' not in default:
default['name'] = _("%s (copy)", self.name)
return super(ProductTemplate, self).copy(default=default)
def name_get(self):
# Prefetch the fields used by the `name_get`, so `browse` doesn't fetch other fields
self.browse(self.ids).read(['name', 'default_code'])
return [(template.id, '%s%s' % (template.default_code and '[%s] ' % template.default_code or '', template.name))
for template in self]
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
# Only use the product.product heuristics if there is a search term and the domain
# does not specify a match on `product.template` IDs.
if not name or any(term[0] == 'id' for term in (args or [])):
return super(ProductTemplate, self)._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
Product = self.env['product.product']
templates = self.browse([])
domain_no_variant = [('product_variant_ids', '=', False)]
while True:
domain = templates and [('product_tmpl_id', 'not in', templates.ids)] or []
args = args if args is not None else []
products_ids = Product._name_search(name, args+domain, operator=operator, name_get_uid=name_get_uid)
products = Product.browse(products_ids)
new_templates = products.mapped('product_tmpl_id')
if new_templates & templates:
"""Product._name_search can bypass the domain we passed (search on supplier info).
If this happens, an infinite loop will occur."""
break
templates |= new_templates
current_round_templates = self.browse([])
if not products:
domain_template = args + domain_no_variant + (templates and [('id', 'not in', templates.ids)] or [])
template_ids = super(ProductTemplate, self)._name_search(name=name, args=domain_template, operator=operator, limit=limit, name_get_uid=name_get_uid)
current_round_templates |= self.browse(template_ids)
templates |= current_round_templates
if (not products and not current_round_templates) or (limit and (len(templates) > limit)):
break
searched_ids = set(templates.ids)
# some product.templates do not have product.products yet (dynamic variants configuration),
# we need to add the base _name_search to the results
# FIXME awa: this is really not performant at all but after discussing with the team
# we don't see another way to do it
if not limit or len(searched_ids) < limit:
searched_ids |= set(super(ProductTemplate, self)._name_search(
name,
args=args,
operator=operator,
limit=limit,
name_get_uid=name_get_uid))
# re-apply product.template order + name_get
return super(ProductTemplate, self)._name_search(
'', args=[('id', 'in', list(searched_ids))],
operator='ilike', limit=limit, name_get_uid=name_get_uid)
def open_pricelist_rules(self):
self.ensure_one()
domain = ['|',
('product_tmpl_id', '=', self.id),
('product_id', 'in', self.product_variant_ids.ids)]
return {
'name': _('Price Rules'),
'view_mode': 'tree,form',
'views': [(self.env.ref('product.product_pricelist_item_tree_view_from_product').id, 'tree'), (False, 'form')],
'res_model': 'product.pricelist.item',
'type': 'ir.actions.act_window',
'target': 'current',
'domain': domain,
'context': {
'default_product_tmpl_id': self.id,
'default_applied_on': '1_product',
'product_without_variants': self.product_variant_count == 1,
},
}
def price_compute(self, price_type, uom=False, currency=False, company=None):
# TDE FIXME: delegate to template or not ? fields are reencoded here ...
# compatibility about context keys used a bit everywhere in the code
if not uom and self._context.get('uom'):
uom = self.env['uom.uom'].browse(self._context['uom'])
if not currency and self._context.get('currency'):
currency = self.env['res.currency'].browse(self._context['currency'])
templates = self
if price_type == 'standard_price':
# standard_price field can only be seen by users in base.group_user
# Thus, in order to compute the sale price from the cost for users not in this group
# We fetch the standard price as the superuser
templates = self.with_company(company).sudo()
if not company:
company = self.env.company
date = self.env.context.get('date') or fields.Date.today()
prices = dict.fromkeys(self.ids, 0.0)
for template in templates:
prices[template.id] = template[price_type] or 0.0
# yes, there can be attribute values for product template if it's not a variant YET
# (see field product.attribute create_variant)
if price_type == 'list_price' and self._context.get('current_attributes_price_extra'):
# we have a list of price_extra that comes from the attribute values, we need to sum all that
prices[template.id] += sum(self._context.get('current_attributes_price_extra'))
if uom:
prices[template.id] = template.uom_id._compute_price(prices[template.id], uom)
# Convert from current user company currency to asked one
# This is right cause a field cannot be in more than one currency
if currency:
prices[template.id] = template.currency_id._convert(prices[template.id], currency, company, date)
return prices
def _create_variant_ids(self):
self.flush()
Product = self.env["product.product"]
variants_to_create = []
variants_to_activate = Product
variants_to_unlink = Product
for tmpl_id in self:
lines_without_no_variants = tmpl_id.valid_product_template_attribute_line_ids._without_no_variant_attributes()
all_variants = tmpl_id.with_context(active_test=False).product_variant_ids.sorted(lambda p: (p.active, -p.id))
current_variants_to_create = []
current_variants_to_activate = Product
# adding an attribute with only one value should not recreate product
# write this attribute on every product to make sure we don't lose them
single_value_lines = lines_without_no_variants.filtered(lambda ptal: len(ptal.product_template_value_ids._only_active()) == 1)
if single_value_lines:
for variant in all_variants:
combination = variant.product_template_attribute_value_ids | single_value_lines.product_template_value_ids._only_active()
# Do not add single value if the resulting combination would
# be invalid anyway.
if (
len(combination) == len(lines_without_no_variants) and
combination.attribute_line_id == lines_without_no_variants
):
variant.product_template_attribute_value_ids = combination
# Set containing existing `product.template.attribute.value` combination
existing_variants = {
variant.product_template_attribute_value_ids: variant for variant in all_variants
}
# Determine which product variants need to be created based on the attribute
# configuration. If any attribute is set to generate variants dynamically, skip the
# process.
# Technical note: if there is no attribute, a variant is still created because
# 'not any([])' and 'set([]) not in set([])' are True.
if not tmpl_id.has_dynamic_attributes():
# Iterator containing all possible `product.template.attribute.value` combination
# The iterator is used to avoid MemoryError in case of a huge number of combination.
all_combinations = itertools.product(*[
ptal.product_template_value_ids._only_active() for ptal in lines_without_no_variants
])
# For each possible variant, create if it doesn't exist yet.
for combination_tuple in all_combinations:
combination = self.env['product.template.attribute.value'].concat(*combination_tuple)
if combination in existing_variants:
current_variants_to_activate += existing_variants[combination]
else:
current_variants_to_create.append({
'product_tmpl_id': tmpl_id.id,
'product_template_attribute_value_ids': [(6, 0, combination.ids)],
'active': tmpl_id.active,
})
if len(current_variants_to_create) > 1000:
raise UserError(_(
'The number of variants to generate is too high. '
'You should either not generate variants for each combination or generate them on demand from the sales order. '
'To do so, open the form view of attributes and change the mode of *Create Variants*.'))
variants_to_create += current_variants_to_create
variants_to_activate += current_variants_to_activate
else:
for variant in existing_variants.values():
is_combination_possible = self._is_combination_possible_by_config(
combination=variant.product_template_attribute_value_ids,
ignore_no_variant=True,
)
if is_combination_possible:
current_variants_to_activate += variant
variants_to_activate += current_variants_to_activate
variants_to_unlink += all_variants - current_variants_to_activate
if variants_to_activate:
variants_to_activate.write({'active': True})
if variants_to_create:
Product.create(variants_to_create)
if variants_to_unlink:
variants_to_unlink._unlink_or_archive()
# prefetched o2m have to be reloaded (because of active_test)
# (eg. product.template: product_variant_ids)
# We can't rely on existing invalidate_cache because of the savepoint
# in _unlink_or_archive.
self.flush()
self.invalidate_cache()
return True
def has_dynamic_attributes(self):
"""Return whether this `product.template` has at least one dynamic
attribute.
:return: True if at least one dynamic attribute, False otherwise
:rtype: bool
"""
self.ensure_one()
return any(a.create_variant == 'dynamic' for a in self.valid_product_template_attribute_line_ids.attribute_id)
@api.depends('attribute_line_ids.value_ids')
def _compute_valid_product_template_attribute_line_ids(self):
"""A product template attribute line is considered valid if it has at
least one possible value.
Those with only one value are considered valid, even though they should
not appear on the configurator itself (unless they have an is_custom
value to input), indeed single value attributes can be used to filter
products among others based on that attribute/value.
"""
for record in self:
record.valid_product_template_attribute_line_ids = record.attribute_line_ids.filtered(lambda ptal: ptal.value_ids)
def _get_possible_variants(self, parent_combination=None):
"""Return the existing variants that are possible.
For dynamic attributes, it will only return the variants that have been
created already.
If there are a lot of variants, this method might be slow. Even if there
aren't too many variants, for performance reasons, do not call this
method in a loop over the product templates.
Therefore this method has a very restricted reasonable use case and you
should strongly consider doing things differently if you consider using
this method.
:param parent_combination: combination from which `self` is an
optional or accessory product.
:type parent_combination: recordset `product.template.attribute.value`
:return: the existing variants that are possible.
:rtype: recordset of `product.product`
"""
self.ensure_one()
return self.product_variant_ids.filtered(lambda p: p._is_variant_possible(parent_combination))
def _get_attribute_exclusions(self, parent_combination=None, parent_name=None):
"""Return the list of attribute exclusions of a product.
:param parent_combination: the combination from which
`self` is an optional or accessory product. Indeed exclusions
rules on one product can concern another product.
:type parent_combination: recordset `product.template.attribute.value`
:param parent_name: the name of the parent product combination.
:type parent_name: str
:return: dict of exclusions
- exclusions: from this product itself
- parent_combination: ids of the given parent_combination
- parent_exclusions: from the parent_combination
- parent_product_name: the name of the parent product if any, used in the interface
to explain why some combinations are not available.
(e.g: Not available with Customizable Desk (Legs: Steel))
- mapped_attribute_names: the name of every attribute values based on their id,
used to explain in the interface why that combination is not available
(e.g: Not available with Color: Black)
"""
self.ensure_one()
parent_combination = parent_combination or self.env['product.template.attribute.value']
return {
'exclusions': self._complete_inverse_exclusions(self._get_own_attribute_exclusions()),
'parent_exclusions': self._get_parent_attribute_exclusions(parent_combination),
'parent_combination': parent_combination.ids,
'parent_product_name': parent_name,
'mapped_attribute_names': self._get_mapped_attribute_names(parent_combination),
}
@api.model
def _complete_inverse_exclusions(self, exclusions):
"""Will complete the dictionnary of exclusions with their respective inverse
e.g: Black excludes XL and L
-> XL excludes Black
-> L excludes Black"""
result = dict(exclusions)
for key, value in exclusions.items():
for exclusion in value:
if exclusion in result and key not in result[exclusion]:
result[exclusion].append(key)
else:
result[exclusion] = [key]
return result
def _get_own_attribute_exclusions(self):
"""Get exclusions coming from the current template.
Dictionnary, each product template attribute value is a key, and for each of them
the value is an array with the other ptav that they exclude (empty if no exclusion).
"""
self.ensure_one()
product_template_attribute_values = self.valid_product_template_attribute_line_ids.product_template_value_ids
return {
ptav.id: [
value_id
for filter_line in ptav.exclude_for.filtered(
lambda filter_line: filter_line.product_tmpl_id == self
) for value_id in filter_line.value_ids.ids
]
for ptav in product_template_attribute_values
}
def _get_parent_attribute_exclusions(self, parent_combination):
"""Get exclusions coming from the parent combination.
Dictionnary, each parent's ptav is a key, and for each of them the value is
an array with the other ptav that are excluded because of the parent.
"""
self.ensure_one()
if not parent_combination:
return {}
result = {}
for product_attribute_value in parent_combination:
for filter_line in product_attribute_value.exclude_for.filtered(
lambda filter_line: filter_line.product_tmpl_id == self
):
# Some exclusions don't have attribute value. This means that the template is not
# compatible with the parent combination. If such an exclusion is found, it means that all
# attribute values are excluded.
if filter_line.value_ids:
result[product_attribute_value.id] = filter_line.value_ids.ids
else:
result[product_attribute_value.id] = filter_line.product_tmpl_id.mapped('attribute_line_ids.product_template_value_ids').ids
return result
def _get_mapped_attribute_names(self, parent_combination=None):
""" The name of every attribute values based on their id,
used to explain in the interface why that combination is not available
(e.g: Not available with Color: Black).
It contains both attribute value names from this product and from
the parent combination if provided.
"""
self.ensure_one()
all_product_attribute_values = self.valid_product_template_attribute_line_ids.product_template_value_ids
if parent_combination:
all_product_attribute_values |= parent_combination
return {
attribute_value.id: attribute_value.display_name
for attribute_value in all_product_attribute_values
}
def _is_combination_possible_by_config(self, combination, ignore_no_variant=False):
"""Return whether the given combination is possible according to the config of attributes on the template
:param combination: the combination to check for possibility
:type combination: recordset `product.template.attribute.value`
:param ignore_no_variant: whether no_variant attributes should be ignored
:type ignore_no_variant: bool
:return: wether the given combination is possible according to the config of attributes on the template
:rtype: bool
"""
self.ensure_one()
attribute_lines = self.valid_product_template_attribute_line_ids
if ignore_no_variant:
attribute_lines = attribute_lines._without_no_variant_attributes()
if len(combination) != len(attribute_lines):
# number of attribute values passed is different than the
# configuration of attributes on the template
return False
if attribute_lines != combination.attribute_line_id:
# combination has different attributes than the ones configured on the template
return False
if not (attribute_lines.product_template_value_ids._only_active() >= combination):
# combination has different values than the ones configured on the template
return False
return True
def _is_combination_possible(self, combination, parent_combination=None, ignore_no_variant=False):
"""
The combination is possible if it is not excluded by any rule
coming from the current template, not excluded by any rule from the
parent_combination (if given), and there should not be any archived
variant with the exact same combination.
If the template does not have any dynamic attribute, the combination
is also not possible if the matching variant has been deleted.
Moreover the attributes of the combination must excatly match the
attributes allowed on the template.
:param combination: the combination to check for possibility
:type combination: recordset `product.template.attribute.value`
:param ignore_no_variant: whether no_variant attributes should be ignored
:type ignore_no_variant: bool
:param parent_combination: combination from which `self` is an
optional or accessory product.
:type parent_combination: recordset `product.template.attribute.value`
:return: whether the combination is possible
:rtype: bool
"""
self.ensure_one()
if not self._is_combination_possible_by_config(combination, ignore_no_variant):
return False
variant = self._get_variant_for_combination(combination)
if self.has_dynamic_attributes():
if variant and not variant.active:
# dynamic and the variant has been archived
return False
else:
if not variant or not variant.active:
# not dynamic, the variant has been archived or deleted
return False
exclusions = self._get_own_attribute_exclusions()
if exclusions:
# exclude if the current value is in an exclusion,
# and the value excluding it is also in the combination
for ptav in combination:
for exclusion in exclusions.get(ptav.id):
if exclusion in combination.ids:
return False
parent_exclusions = self._get_parent_attribute_exclusions(parent_combination)
if parent_exclusions:
# parent_exclusion are mapped by ptav but here we don't need to know
# where the exclusion comes from so we loop directly on the dict values
for exclusions_values in parent_exclusions.values():
for exclusion in exclusions_values:
if exclusion in combination.ids:
return False
return True
def _get_variant_for_combination(self, combination):
"""Get the variant matching the combination.
All of the values in combination must be present in the variant, and the
variant should not have more attributes. Ignore the attributes that are
not supposed to create variants.
:param combination: recordset of `product.template.attribute.value`
:return: the variant if found, else empty
:rtype: recordset `product.product`
"""
self.ensure_one()
filtered_combination = combination._without_no_variant_attributes()
return self.env['product.product'].browse(self._get_variant_id_for_combination(filtered_combination))
def _create_product_variant(self, combination, log_warning=False):
""" Create if necessary and possible and return the product variant
matching the given combination for this template.
It is possible to create only if the template has dynamic attributes
and the combination itself is possible.
If we are in this case and the variant already exists but it is
archived, it is activated instead of being created again.
:param combination: the combination for which to get or create variant.
The combination must contain all necessary attributes, including
those of type no_variant. Indeed even though those attributes won't
be included in the variant if newly created, they are needed when
checking if the combination is possible.
:type combination: recordset of `product.template.attribute.value`
:param log_warning: whether a warning should be logged on fail
:type log_warning: bool
:return: the product variant matching the combination or none
:rtype: recordset of `product.product`
"""
self.ensure_one()
Product = self.env['product.product']
product_variant = self._get_variant_for_combination(combination)
if product_variant:
if not product_variant.active and self.has_dynamic_attributes() and self._is_combination_possible(combination):
product_variant.active = True
return product_variant
if not self.has_dynamic_attributes():
if log_warning:
_logger.warning('The user #%s tried to create a variant for the non-dynamic product %s.' % (self.env.user.id, self.id))
return Product
if not self._is_combination_possible(combination):
if log_warning:
_logger.warning('The user #%s tried to create an invalid variant for the product %s.' % (self.env.user.id, self.id))
return Product
return Product.sudo().create({
'product_tmpl_id': self.id,
'product_template_attribute_value_ids': [(6, 0, combination._without_no_variant_attributes().ids)]
})
@tools.ormcache('self.id', 'frozenset(filtered_combination.ids)')
def _get_variant_id_for_combination(self, filtered_combination):
"""See `_get_variant_for_combination`. This method returns an ID
so it can be cached.
Use sudo because the same result should be cached for all users.
"""
self.ensure_one()
domain = [('product_tmpl_id', '=', self.id)]
combination_indices_ids = filtered_combination._ids2str()
if combination_indices_ids:
domain = expression.AND([domain, [('combination_indices', '=', combination_indices_ids)]])
else:
domain = expression.AND([domain, [('combination_indices', 'in', ['', False])]])
return self.env['product.product'].sudo().with_context(active_test=False).search(domain, order='active DESC', limit=1).id
@tools.ormcache('self.id')
def _get_first_possible_variant_id(self):
"""See `_create_first_product_variant`. This method returns an ID
so it can be cached."""
self.ensure_one()
return self._create_first_product_variant().id
def _get_first_possible_combination(self, parent_combination=None, necessary_values=None):
"""See `_get_possible_combinations` (one iteration).
This method return the same result (empty recordset) if no
combination is possible at all which would be considered a negative
result, or if there are no attribute lines on the template in which
case the "empty combination" is actually a possible combination.
Therefore the result of this method when empty should be tested
with `_is_combination_possible` if it's important to know if the
resulting empty combination is actually possible or not.
"""
return next(self._get_possible_combinations(parent_combination, necessary_values), self.env['product.template.attribute.value'])
def _cartesian_product(self, product_template_attribute_values_per_line, parent_combination):
"""
Generate all possible combination for attributes values (aka cartesian product).
It is equivalent to itertools.product except it skips invalid partial combinations before they are complete.
Imagine the cartesian product of 'A', 'CD' and range(1_000_000) and let's say that 'A' and 'C' are incompatible.
If you use itertools.product or any normal cartesian product, you'll need to filter out of the final result
the 1_000_000 combinations that start with 'A' and 'C' . Instead, This implementation will test if 'A' and 'C' are
compatible before even considering range(1_000_000), skip it and and continue with combinations that start
with 'A' and 'D'.
It's necessary for performance reason because filtering out invalid combinations from standard Cartesian product
can be extremely slow
:param product_template_attribute_values_per_line: the values we want all the possibles combinations of.
One list of values by attribute line
:return: a generator of product template attribute value
"""
if not product_template_attribute_values_per_line:
return
all_exclusions = {self.env['product.template.attribute.value'].browse(k):
self.env['product.template.attribute.value'].browse(v) for k, v in
self._get_own_attribute_exclusions().items()}
# The following dict uses product template attribute values as keys
# 0 means the value is acceptable, greater than 0 means it's rejected, it cannot be negative
# Bear in mind that several values can reject the same value and the latter can only be included in the
# considered combination if no value rejects it.
# This dictionary counts how many times each value is rejected.
# Each time a value is included in the considered combination, the values it rejects are incremented
# When a value is discarded from the considered combination, the values it rejects are decremented
current_exclusions = defaultdict(int)
for exclusion in self._get_parent_attribute_exclusions(parent_combination):
current_exclusions[self.env['product.template.attribute.value'].browse(exclusion)] += 1
partial_combination = self.env['product.template.attribute.value']
# The following list reflects product_template_attribute_values_per_line
# For each line, instead of a list of values, it contains the index of the selected value
# -1 means no value has been picked for the line in the current (partial) combination
value_index_per_line = [-1] * len(product_template_attribute_values_per_line)
# determines which line line we're working on
line_index = 0
while True:
current_line_values = product_template_attribute_values_per_line[line_index]
current_ptav_index = value_index_per_line[line_index]
current_ptav = current_line_values[current_ptav_index]
# removing exclusions from current_ptav as we're removing it from partial_combination
if current_ptav_index >= 0:
for ptav_to_include_back in all_exclusions[current_ptav]:
current_exclusions[ptav_to_include_back] -= 1
partial_combination -= current_ptav
if current_ptav_index < len(current_line_values) - 1:
# go to next value of current line
value_index_per_line[line_index] += 1
current_line_values = product_template_attribute_values_per_line[line_index]
current_ptav_index = value_index_per_line[line_index]
current_ptav = current_line_values[current_ptav_index]
elif line_index != 0:
# reset current line, and then go to previous line
value_index_per_line[line_index] = - 1
line_index -= 1
continue
else:
# we're done if we must reset first line
break
# adding exclusions from current_ptav as we're incorporating it in partial_combination
for ptav_to_exclude in all_exclusions[current_ptav]:
current_exclusions[ptav_to_exclude] += 1
partial_combination += current_ptav
# test if included values excludes current value or if current value exclude included values
if current_exclusions[current_ptav] or \
any(intersection in partial_combination for intersection in all_exclusions[current_ptav]):
continue
if line_index == len(product_template_attribute_values_per_line) - 1:
# submit combination if we're on the last line
yield partial_combination
else:
# else we go to the next line
line_index += 1
def _get_possible_combinations(self, parent_combination=None, necessary_values=None):
"""Generator returning combinations that are possible, following the
sequence of attributes and values.
See `_is_combination_possible` for what is a possible combination.
When encountering an impossible combination, try to change the value
of attributes by starting with the further regarding their sequences.
Ignore attributes that have no values.
:param parent_combination: combination from which `self` is an
optional or accessory product.
:type parent_combination: recordset `product.template.attribute.value`
:param necessary_values: values that must be in the returned combination
:type necessary_values: recordset of `product.template.attribute.value`
:return: the possible combinations
:rtype: generator of recordset of `product.template.attribute.value`
"""
self.ensure_one()
if not self.active:
return _("The product template is archived so no combination is possible.")
necessary_values = necessary_values or self.env['product.template.attribute.value']
necessary_attribute_lines = necessary_values.mapped('attribute_line_id')
attribute_lines = self.valid_product_template_attribute_line_ids.filtered(lambda ptal: ptal not in necessary_attribute_lines)
if not attribute_lines and self._is_combination_possible(necessary_values, parent_combination):
yield necessary_values
product_template_attribute_values_per_line = [
ptal.product_template_value_ids
for ptal in attribute_lines
]
for partial_combination in self._cartesian_product(product_template_attribute_values_per_line, parent_combination):
combination = partial_combination + necessary_values
if self._is_combination_possible(combination, parent_combination):
yield combination
return _("There are no remaining possible combination.")
def _get_closest_possible_combination(self, combination):
"""See `_get_closest_possible_combinations` (one iteration).
This method return the same result (empty recordset) if no
combination is possible at all which would be considered a negative
result, or if there are no attribute lines on the template in which
case the "empty combination" is actually a possible combination.
Therefore the result of this method when empty should be tested
with `_is_combination_possible` if it's important to know if the
resulting empty combination is actually possible or not.
"""
return next(self._get_closest_possible_combinations(combination), self.env['product.template.attribute.value'])
def _get_closest_possible_combinations(self, combination):
"""Generator returning the possible combinations that are the closest to
the given combination.
If the given combination is incomplete, try to complete it.
If the given combination is invalid, try to remove values from it before
completing it.
:param combination: the values to include if they are possible
:type combination: recordset `product.template.attribute.value`
:return: the possible combinations that are including as much
elements as possible from the given combination.
:rtype: generator of recordset of product.template.attribute.value
"""
while True:
res = self._get_possible_combinations(necessary_values=combination)
try:
# If there is at least one result for the given combination
# we consider that combination set, and we yield all the
# possible combinations for it.
yield(next(res))
for cur in res:
yield(cur)
return _("There are no remaining closest combination.")
except StopIteration:
# There are no results for the given combination, we try to
# progressively remove values from it.
if not combination:
return _("There are no possible combination.")
combination = combination[:-1]
def _get_current_company(self, **kwargs):
"""Get the most appropriate company for this product.
If the company is set on the product, directly return it. Otherwise,
fallback to a contextual company.
:param kwargs: kwargs forwarded to the fallback method.
:return: the most appropriate company for this product
:rtype: recordset of one `res.company`
"""
self.ensure_one()
return self.company_id or self._get_current_company_fallback(**kwargs)
def _get_current_company_fallback(self, **kwargs):
"""Fallback to get the most appropriate company for this product.
This should only be called from `_get_current_company` but is defined
separately to allow override.
The final fallback will be the current user's company.
:return: the fallback company for this product
:rtype: recordset of one `res.company`
"""
self.ensure_one()
return self.env.company
def get_single_product_variant(self):
""" Method used by the product configurator to check if the product is configurable or not.
We need to open the product configurator if the product:
- is configurable (see has_configurable_attributes)
- has optional products (method is extended in sale to return optional products info)
"""
self.ensure_one()
if self.product_variant_count == 1 and not self.has_configurable_attributes:
return {
'product_id': self.product_variant_id.id,
}
return {}
@api.model
def get_empty_list_help(self, help):
self = self.with_context(
empty_list_help_document_name=_("product"),
)
return super(ProductTemplate, self).get_empty_list_help(help)
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Products'),
'template': '/product/static/xls/product_template.xls'
}]
| gpl-3.0 |
deployed/django | tests/commands_sql/tests.py | 16 | 3274 | from __future__ import unicode_literals
from django.apps import apps
from django.core.management.color import no_style
from django.core.management.sql import (sql_create, sql_delete, sql_indexes,
sql_destroy_indexes, sql_all)
from django.db import connections, DEFAULT_DB_ALIAS, router
from django.test import TestCase
from django.utils import six
# See also initial_sql_regress for 'custom_sql_for_model' tests
class SQLCommandsTestCase(TestCase):
"""Tests for several functions in django/core/management/sql.py"""
def count_ddl(self, output, cmd):
return len([o for o in output if o.startswith(cmd)])
def test_sql_create(self):
app_config = apps.get_app_config('commands_sql')
output = sql_create(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
create_tables = [o for o in output if o.startswith('CREATE TABLE')]
self.assertEqual(len(create_tables), 3)
# Lower so that Oracle's upper case tbl names wont break
sql = create_tables[-1].lower()
six.assertRegex(self, sql, r'^create table .commands_sql_book.*')
def test_sql_delete(self):
app_config = apps.get_app_config('commands_sql')
output = sql_delete(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
drop_tables = [o for o in output if o.startswith('DROP TABLE')]
self.assertEqual(len(drop_tables), 3)
# Lower so that Oracle's upper case tbl names wont break
sql = drop_tables[-1].lower()
six.assertRegex(self, sql, r'^drop table .commands_sql_comment.*')
def test_sql_indexes(self):
app_config = apps.get_app_config('commands_sql')
output = sql_indexes(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'CREATE INDEX'), [3, 4])
def test_sql_destroy_indexes(self):
app_config = apps.get_app_config('commands_sql')
output = sql_destroy_indexes(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'DROP INDEX'), [3, 4])
def test_sql_all(self):
app_config = apps.get_app_config('commands_sql')
output = sql_all(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
self.assertEqual(self.count_ddl(output, 'CREATE TABLE'), 3)
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'CREATE INDEX'), [3, 4])
class TestRouter(object):
def allow_migrate(self, db, model):
return False
class SQLCommandsRouterTestCase(TestCase):
def setUp(self):
self._old_routers = router.routers
router.routers = [TestRouter()]
def tearDown(self):
router.routers = self._old_routers
def test_router_honored(self):
app_config = apps.get_app_config('commands_sql')
for sql_command in (sql_all, sql_create, sql_delete, sql_indexes, sql_destroy_indexes):
output = sql_command(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
self.assertEqual(len(output), 0,
"%s command is not honoring routers" % sql_command.__name__)
| bsd-3-clause |
HKUST-SING/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/series.py | 89 | 6745 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Series represents a deferred Tensor computation in a DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
class Series(object):
"""A single output series.
Represents the deferred construction of a graph that computes the series
values.
Note every `Series` should be a `TransformedSeries`, except when mocked.
"""
__metaclass__ = ABCMeta
@classmethod
def register_unary_op(cls, series_method_name):
"""A decorator that registers `Transform`s as `Series` member functions.
For example:
'''
@series.Series.register_unary_op("log")
class Logarithm(Transform):
...
'''
The registered member function takes `args` and `kwargs`. These values will
be passed to the `__init__` function for the decorated `Transform`.
Args:
series_method_name: the name under which to register the function.
Returns:
Decorator function.
Raises:
ValueError: another `Transform` is already registered under
`series_method_name`.
"""
def register(transform_cls):
if hasattr(cls, series_method_name):
raise ValueError("Series already has a function registered as %s.",
series_method_name)
def _member_func(slf, *args, **kwargs):
return transform_cls(*args, **kwargs)([slf])[0]
setattr(cls, series_method_name, _member_func)
return transform_cls
return register
@classmethod
def register_binary_op(cls, series_method_name):
"""A decorator that registers `Transform`s as `Series` member functions.
For example:
'''
@series.Series.register_binary_op("__add___")
class Sum(Transform):
...
'''
The registered member function takes `args` and `kwargs`. These values will
be passed to the `__init__` function for the decorated `Transform`.
Args:
series_method_name: the name under which to register the function.
Returns:
Decorator function.
Raises:
ValueError: another `Transform` is already registered under
`series_method_name`.
"""
def register(transform_cls):
if hasattr(cls, series_method_name):
raise ValueError("Series already has a function registered as %s.",
series_method_name)
def _member_func(slf, b, *args, **kwargs):
return transform_cls(*args, **kwargs)([slf, b])[0]
setattr(cls, series_method_name, _member_func)
return transform_cls
return register
def build(self, cache, **kwargs):
"""Returns a Tensor."""
raise NotImplementedError()
class PredefinedSeries(Series):
"""A `Series` that requires the cache to already map a given name."""
def __init__(self, name, feature_spec):
super(PredefinedSeries, self).__init__()
self._name = name
self._feature_spec = feature_spec
@property
def name(self):
return self._name
@property
def feature_spec(self):
return self._feature_spec
def required_base_features(self):
return {self.name: self.feature_spec}
def build(self, cache, **kwargs):
try:
return cache[self.name]
except KeyError:
raise KeyError("Expected base feature not found: %s" % self._name)
def __repr__(self):
return "Predefined: %s" % self.name
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class TransformedSeries(Series):
"""A `Series` that results from applying a `Transform` to a list of inputs."""
def __init__(self, input_series, transform, output_name):
super(TransformedSeries, self).__init__()
self._input_series = input_series
self._transform = transform
self._output_name = output_name
if output_name is None:
raise ValueError("output_name must be provided")
if len(input_series) != transform.input_valency:
raise ValueError("Expected %s input Series but received %s." %
(transform.input_valency, len(input_series)))
self._repr = TransformedSeries.make_repr(
self._input_series, self._transform, self._output_name)
def required_base_features(self):
# Note the only items in the result are those that can be traced to a
# PredefinedSeries.
result = {}
for s in self._input_series:
# It's OK to overwrite keys since we only want one copy of each anyway.
# We assume (but don't bother checking) that the spec is the same in each
# case.
result.update(s.required_base_features)
return result
def build(self, cache=None, **kwargs):
if cache is None:
cache = {}
all_outputs = self._transform.build_transitive(
self._input_series, cache, **kwargs)
return getattr(all_outputs, self._output_name)
def __repr__(self):
return self._repr
# Note we need to generate series reprs from Transform, without needing the
# series themselves. So we just make this public. Alternatively we could
# create throwaway series just in order to call repr() on them.
@staticmethod
def make_repr(input_series, transform, output_name):
"""Generate a key for caching Tensors produced for a TransformedSeries.
Generally we a need a deterministic unique key representing which transform
was applied to which inputs, and which output was selected.
Args:
input_series: an iterable of input `Series` for the `Transform`
transform: the `Transform` being applied
output_name: the name of the specific output from the `Transform` that is
to be cached
Returns:
A string suitable for use as a cache key for Tensors produced via a
TransformedSeries
"""
input_series_keys = [repr(series) for series in input_series]
input_series_keys_joined = ", ".join(input_series_keys)
return "%s(%s)[%s]" % (
repr(transform), input_series_keys_joined, output_name)
| apache-2.0 |
394954369/horizon | openstack_dashboard/dashboards/project/stacks/mappings.py | 12 | 5675 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import re
from django.core.urlresolvers import reverse
from django.template.defaultfilters import register # noqa
from django.utils import html
from django.utils import safestring
import six.moves.urllib.parse as urlparse
from openstack_dashboard.api import swift
LOG = logging.getLogger(__name__)
resource_urls = {
"AWS::EC2::Instance": {
'link': 'horizon:project:instances:detail'},
"AWS::EC2::NetworkInterface": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::RouteTable": {
'link': 'horizon:project:routers:detail'},
"AWS::EC2::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"AWS::EC2::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"AWS::EC2::VPC": {
'link': 'horizon:project:networks:detail'},
"AWS::S3::Bucket": {
'link': 'horizon:project:containers:index'},
"OS::Quantum::Net": {
'link': 'horizon:project:networks:detail'},
"OS::Quantum::Port": {
'link': 'horizon:project:networks:ports:detail'},
"OS::Quantum::Router": {
'link': 'horizon:project:routers:detail'},
"OS::Quantum::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"OS::Swift::Container": {
'link': 'horizon:project:containers:index',
'format_pattern': '%s' + swift.FOLDER_DELIMITER},
}
def resource_to_url(resource):
if not resource or not resource.physical_resource_id:
return None
mapping = resource_urls.get(resource.resource_type, {})
try:
if 'link' not in mapping:
return None
format_pattern = mapping.get('format_pattern') or '%s'
rid = format_pattern % resource.physical_resource_id
url = reverse(mapping['link'], args=(rid,))
except Exception as e:
LOG.exception(e)
return None
return url
@register.filter
def stack_output(output):
if not output:
return u''
if isinstance(output, dict) or isinstance(output, list):
json_string = json.dumps(output, indent=2)
safe_output = u'<pre>%s</pre>' % html.escape(json_string)
return safestring.mark_safe(safe_output)
if isinstance(output, basestring):
parts = urlparse.urlsplit(output)
if parts.netloc and parts.scheme in ('http', 'https'):
url = html.escape(output)
safe_link = u'<a href="%s" target="_blank">%s</a>' % (url, url)
return safestring.mark_safe(safe_link)
return unicode(output)
resource_images = {
'LB_FAILED': '/static/dashboard/img/lb-red.svg',
'LB_DELETE': '/static/dashboard/img/lb-red.svg',
'LB_IN_PROGRESS': '/static/dashboard/img/lb-gray.gif',
'LB_INIT': '/static/dashboard/img/lb-gray.svg',
'LB_COMPLETE': '/static/dashboard/img/lb-green.svg',
'DB_FAILED': '/static/dashboard/img/db-red.svg',
'DB_DELETE': '/static/dashboard/img/db-red.svg',
'DB_IN_PROGRESS': '/static/dashboard/img/db-gray.gif',
'DB_INIT': '/static/dashboard/img/db-gray.svg',
'DB_COMPLETE': '/static/dashboard/img/db-green.svg',
'STACK_FAILED': '/static/dashboard/img/stack-red.svg',
'STACK_DELETE': '/static/dashboard/img/stack-red.svg',
'STACK_IN_PROGRESS': '/static/dashboard/img/stack-gray.gif',
'STACK_INIT': '/static/dashboard/img/stack-gray.svg',
'STACK_COMPLETE': '/static/dashboard/img/stack-green.svg',
'SERVER_FAILED': '/static/dashboard/img/server-red.svg',
'SERVER_DELETE': '/static/dashboard/img/server-red.svg',
'SERVER_IN_PROGRESS': '/static/dashboard/img/server-gray.gif',
'SERVER_INIT': '/static/dashboard/img/server-gray.svg',
'SERVER_COMPLETE': '/static/dashboard/img/server-green.svg',
'UNKNOWN_FAILED': '/static/dashboard/img/unknown-red.svg',
'UNKNOWN_DELETE': '/static/dashboard/img/unknown-red.svg',
'UNKNOWN_IN_PROGRESS': '/static/dashboard/img/unknown-gray.gif',
'UNKNOWN_INIT': '/static/dashboard/img/unknown-gray.svg',
'UNKNOWN_COMPLETE': '/static/dashboard/img/unknown-green.svg',
}
def get_resource_type(type):
if re.search('LoadBalancer', type):
return 'LB'
elif re.search('DBInstance', type) or re.search('Database', type):
return 'DB'
elif re.search('Instance', type) or re.search('Server', type):
return 'SERVER'
elif re.search('stack', type):
return 'STACK'
else:
return 'UNKNOWN'
def get_resource_status(status):
if re.search('IN_PROGRESS', status):
return 'IN_PROGRESS'
elif re.search('FAILED', status):
return 'FAILED'
elif re.search('DELETE', status):
return 'DELETE'
elif re.search('INIT', status):
return 'INIT'
else:
return 'COMPLETE'
def get_resource_image(status, type):
"""Sets the image url and in_progress action sw based on status."""
resource_type = get_resource_type(type)
resource_status = get_resource_status(status)
resource_state = resource_type + "_" + resource_status
for key in resource_images:
if key == resource_state:
return resource_images.get(key)
| apache-2.0 |
h3llrais3r/SickRage | lib/unidecode/x07d.py | 253 | 4678 | data = (
'Ji ', # 0x00
'Cha ', # 0x01
'Zhou ', # 0x02
'Xun ', # 0x03
'Yue ', # 0x04
'Hong ', # 0x05
'Yu ', # 0x06
'He ', # 0x07
'Wan ', # 0x08
'Ren ', # 0x09
'Wen ', # 0x0a
'Wen ', # 0x0b
'Qiu ', # 0x0c
'Na ', # 0x0d
'Zi ', # 0x0e
'Tou ', # 0x0f
'Niu ', # 0x10
'Fou ', # 0x11
'Jie ', # 0x12
'Shu ', # 0x13
'Chun ', # 0x14
'Pi ', # 0x15
'Yin ', # 0x16
'Sha ', # 0x17
'Hong ', # 0x18
'Zhi ', # 0x19
'Ji ', # 0x1a
'Fen ', # 0x1b
'Yun ', # 0x1c
'Ren ', # 0x1d
'Dan ', # 0x1e
'Jin ', # 0x1f
'Su ', # 0x20
'Fang ', # 0x21
'Suo ', # 0x22
'Cui ', # 0x23
'Jiu ', # 0x24
'Zha ', # 0x25
'Kinu ', # 0x26
'Jin ', # 0x27
'Fu ', # 0x28
'Zhi ', # 0x29
'Ci ', # 0x2a
'Zi ', # 0x2b
'Chou ', # 0x2c
'Hong ', # 0x2d
'Zha ', # 0x2e
'Lei ', # 0x2f
'Xi ', # 0x30
'Fu ', # 0x31
'Xie ', # 0x32
'Shen ', # 0x33
'Bei ', # 0x34
'Zhu ', # 0x35
'Qu ', # 0x36
'Ling ', # 0x37
'Zhu ', # 0x38
'Shao ', # 0x39
'Gan ', # 0x3a
'Yang ', # 0x3b
'Fu ', # 0x3c
'Tuo ', # 0x3d
'Zhen ', # 0x3e
'Dai ', # 0x3f
'Zhuo ', # 0x40
'Shi ', # 0x41
'Zhong ', # 0x42
'Xian ', # 0x43
'Zu ', # 0x44
'Jiong ', # 0x45
'Ban ', # 0x46
'Ju ', # 0x47
'Mo ', # 0x48
'Shu ', # 0x49
'Zui ', # 0x4a
'Wata ', # 0x4b
'Jing ', # 0x4c
'Ren ', # 0x4d
'Heng ', # 0x4e
'Xie ', # 0x4f
'Jie ', # 0x50
'Zhu ', # 0x51
'Chou ', # 0x52
'Gua ', # 0x53
'Bai ', # 0x54
'Jue ', # 0x55
'Kuang ', # 0x56
'Hu ', # 0x57
'Ci ', # 0x58
'Geng ', # 0x59
'Geng ', # 0x5a
'Tao ', # 0x5b
'Xie ', # 0x5c
'Ku ', # 0x5d
'Jiao ', # 0x5e
'Quan ', # 0x5f
'Gai ', # 0x60
'Luo ', # 0x61
'Xuan ', # 0x62
'Bing ', # 0x63
'Xian ', # 0x64
'Fu ', # 0x65
'Gei ', # 0x66
'Tong ', # 0x67
'Rong ', # 0x68
'Tiao ', # 0x69
'Yin ', # 0x6a
'Lei ', # 0x6b
'Xie ', # 0x6c
'Quan ', # 0x6d
'Xu ', # 0x6e
'Lun ', # 0x6f
'Die ', # 0x70
'Tong ', # 0x71
'Si ', # 0x72
'Jiang ', # 0x73
'Xiang ', # 0x74
'Hui ', # 0x75
'Jue ', # 0x76
'Zhi ', # 0x77
'Jian ', # 0x78
'Juan ', # 0x79
'Chi ', # 0x7a
'Mian ', # 0x7b
'Zhen ', # 0x7c
'Lu ', # 0x7d
'Cheng ', # 0x7e
'Qiu ', # 0x7f
'Shu ', # 0x80
'Bang ', # 0x81
'Tong ', # 0x82
'Xiao ', # 0x83
'Wan ', # 0x84
'Qin ', # 0x85
'Geng ', # 0x86
'Xiu ', # 0x87
'Ti ', # 0x88
'Xiu ', # 0x89
'Xie ', # 0x8a
'Hong ', # 0x8b
'Xi ', # 0x8c
'Fu ', # 0x8d
'Ting ', # 0x8e
'Sui ', # 0x8f
'Dui ', # 0x90
'Kun ', # 0x91
'Fu ', # 0x92
'Jing ', # 0x93
'Hu ', # 0x94
'Zhi ', # 0x95
'Yan ', # 0x96
'Jiong ', # 0x97
'Feng ', # 0x98
'Ji ', # 0x99
'Sok ', # 0x9a
'Kase ', # 0x9b
'Zong ', # 0x9c
'Lin ', # 0x9d
'Duo ', # 0x9e
'Li ', # 0x9f
'Lu ', # 0xa0
'Liang ', # 0xa1
'Chou ', # 0xa2
'Quan ', # 0xa3
'Shao ', # 0xa4
'Qi ', # 0xa5
'Qi ', # 0xa6
'Zhun ', # 0xa7
'Qi ', # 0xa8
'Wan ', # 0xa9
'Qian ', # 0xaa
'Xian ', # 0xab
'Shou ', # 0xac
'Wei ', # 0xad
'Qi ', # 0xae
'Tao ', # 0xaf
'Wan ', # 0xb0
'Gang ', # 0xb1
'Wang ', # 0xb2
'Beng ', # 0xb3
'Zhui ', # 0xb4
'Cai ', # 0xb5
'Guo ', # 0xb6
'Cui ', # 0xb7
'Lun ', # 0xb8
'Liu ', # 0xb9
'Qi ', # 0xba
'Zhan ', # 0xbb
'Bei ', # 0xbc
'Chuo ', # 0xbd
'Ling ', # 0xbe
'Mian ', # 0xbf
'Qi ', # 0xc0
'Qie ', # 0xc1
'Tan ', # 0xc2
'Zong ', # 0xc3
'Gun ', # 0xc4
'Zou ', # 0xc5
'Yi ', # 0xc6
'Zi ', # 0xc7
'Xing ', # 0xc8
'Liang ', # 0xc9
'Jin ', # 0xca
'Fei ', # 0xcb
'Rui ', # 0xcc
'Min ', # 0xcd
'Yu ', # 0xce
'Zong ', # 0xcf
'Fan ', # 0xd0
'Lu ', # 0xd1
'Xu ', # 0xd2
'Yingl ', # 0xd3
'Zhang ', # 0xd4
'Kasuri ', # 0xd5
'Xu ', # 0xd6
'Xiang ', # 0xd7
'Jian ', # 0xd8
'Ke ', # 0xd9
'Xian ', # 0xda
'Ruan ', # 0xdb
'Mian ', # 0xdc
'Qi ', # 0xdd
'Duan ', # 0xde
'Zhong ', # 0xdf
'Di ', # 0xe0
'Min ', # 0xe1
'Miao ', # 0xe2
'Yuan ', # 0xe3
'Xie ', # 0xe4
'Bao ', # 0xe5
'Si ', # 0xe6
'Qiu ', # 0xe7
'Bian ', # 0xe8
'Huan ', # 0xe9
'Geng ', # 0xea
'Cong ', # 0xeb
'Mian ', # 0xec
'Wei ', # 0xed
'Fu ', # 0xee
'Wei ', # 0xef
'Yu ', # 0xf0
'Gou ', # 0xf1
'Miao ', # 0xf2
'Xie ', # 0xf3
'Lian ', # 0xf4
'Zong ', # 0xf5
'Bian ', # 0xf6
'Yun ', # 0xf7
'Yin ', # 0xf8
'Ti ', # 0xf9
'Gua ', # 0xfa
'Zhi ', # 0xfb
'Yun ', # 0xfc
'Cheng ', # 0xfd
'Chan ', # 0xfe
'Dai ', # 0xff
)
| gpl-3.0 |
allmende/synnefo | snf-common/synnefo/lib/db/psyco_gevent.py | 10 | 2254 | # Copyright (C) 2010 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""A wait callback to allow psycopg2 cooperation with gevent.
Use `make_psycopg_green()` to enable gevent support in Psycopg.
"""
import psycopg2
from psycopg2 import extensions
from gevent.socket import wait_read, wait_write
def make_psycopg_green():
"""Configure Psycopg to be used with gevent in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(gevent_wait_callback)
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise psycopg2.OperationalError("Bad result from poll: %r" % state)
| gpl-3.0 |
wxgeo/geophar | wxgeometrie/sympy/printing/pretty/stringpict.py | 5 | 18437 | """Prettyprinter by Jurjen Bos.
(I hate spammers: mail me at pietjepuk314 at the reverse of ku.oc.oohay).
All objects have a method that create a "stringPict",
that can be used in the str method for pretty printing.
Updates by Jason Gedge (email <my last name> at cs mun ca)
- terminal_string() method
- minor fixes and changes (mostly to prettyForm)
TODO:
- Allow left/center/right alignment options for above/below and
top/center/bottom alignment options for left/right
"""
from __future__ import print_function, division
from .pretty_symbology import hobj, vobj, xsym, xobj, pretty_use_unicode
from sympy.core.compatibility import string_types, range
class stringPict(object):
"""An ASCII picture.
The pictures are represented as a list of equal length strings.
"""
#special value for stringPict.below
LINE = 'line'
def __init__(self, s, baseline=0):
"""Initialize from string.
Multiline strings are centered.
"""
self.s = s
#picture is a string that just can be printed
self.picture = stringPict.equalLengths(s.splitlines())
#baseline is the line number of the "base line"
self.baseline = baseline
self.binding = None
@staticmethod
def equalLengths(lines):
# empty lines
if not lines:
return ['']
width = max(len(line) for line in lines)
return [line.center(width) for line in lines]
def height(self):
"""The height of the picture in characters."""
return len(self.picture)
def width(self):
"""The width of the picture in characters."""
return len(self.picture[0])
@staticmethod
def next(*args):
"""Put a string of stringPicts next to each other.
Returns string, baseline arguments for stringPict.
"""
#convert everything to stringPicts
objects = []
for arg in args:
if isinstance(arg, string_types):
arg = stringPict(arg)
objects.append(arg)
#make a list of pictures, with equal height and baseline
newBaseline = max(obj.baseline for obj in objects)
newHeightBelowBaseline = max(
obj.height() - obj.baseline
for obj in objects)
newHeight = newBaseline + newHeightBelowBaseline
pictures = []
for obj in objects:
oneEmptyLine = [' '*obj.width()]
basePadding = newBaseline - obj.baseline
totalPadding = newHeight - obj.height()
pictures.append(
oneEmptyLine * basePadding +
obj.picture +
oneEmptyLine * (totalPadding - basePadding))
result = [''.join(lines) for lines in zip(*pictures)]
return '\n'.join(result), newBaseline
def right(self, *args):
r"""Put pictures next to this one.
Returns string, baseline arguments for stringPict.
(Multiline) strings are allowed, and are given a baseline of 0.
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print(stringPict("10").right(" + ",stringPict("1\r-\r2",1))[0])
1
10 + -
2
"""
return stringPict.next(self, *args)
def left(self, *args):
"""Put pictures (left to right) at left.
Returns string, baseline arguments for stringPict.
"""
return stringPict.next(*(args + (self,)))
@staticmethod
def stack(*args):
"""Put pictures on top of each other,
from top to bottom.
Returns string, baseline arguments for stringPict.
The baseline is the baseline of the second picture.
Everything is centered.
Baseline is the baseline of the second picture.
Strings are allowed.
The special value stringPict.LINE is a row of '-' extended to the width.
"""
#convert everything to stringPicts; keep LINE
objects = []
for arg in args:
if arg is not stringPict.LINE and isinstance(arg, string_types):
arg = stringPict(arg)
objects.append(arg)
#compute new width
newWidth = max(
obj.width()
for obj in objects
if obj is not stringPict.LINE)
lineObj = stringPict(hobj('-', newWidth))
#replace LINE with proper lines
for i, obj in enumerate(objects):
if obj is stringPict.LINE:
objects[i] = lineObj
#stack the pictures, and center the result
newPicture = []
for obj in objects:
newPicture.extend(obj.picture)
newPicture = [line.center(newWidth) for line in newPicture]
newBaseline = objects[0].height() + objects[1].baseline
return '\n'.join(newPicture), newBaseline
def below(self, *args):
"""Put pictures under this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of top picture
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print(stringPict("x+3").below(
... stringPict.LINE, '3')[0]) #doctest: +NORMALIZE_WHITESPACE
x+3
---
3
"""
s, baseline = stringPict.stack(self, *args)
return s, self.baseline
def above(self, *args):
"""Put pictures above this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of bottom picture.
"""
string, baseline = stringPict.stack(*(args + (self,)))
baseline = len(string.splitlines()) - self.height() + self.baseline
return string, baseline
def parens(self, left='(', right=')', ifascii_nougly=False):
"""Put parentheses around self.
Returns string, baseline arguments for stringPict.
left or right can be None or empty string which means 'no paren from
that side'
"""
h = self.height()
b = self.baseline
# XXX this is a hack -- ascii parens are ugly!
if ifascii_nougly and not pretty_use_unicode():
h = 1
b = 0
res = self
if left:
lparen = stringPict(vobj(left, h), baseline=b)
res = stringPict(*lparen.right(self))
if right:
rparen = stringPict(vobj(right, h), baseline=b)
res = stringPict(*res.right(rparen))
return ('\n'.join(res.picture), res.baseline)
def leftslash(self):
"""Precede object by a slash of the proper size.
"""
# XXX not used anywhere ?
height = max(
self.baseline,
self.height() - 1 - self.baseline)*2 + 1
slash = '\n'.join(
' '*(height - i - 1) + xobj('/', 1) + ' '*i
for i in range(height)
)
return self.left(stringPict(slash, height//2))
def root(self, n=None):
"""Produce a nice root symbol.
Produces ugly results for big n inserts.
"""
# XXX not used anywhere
# XXX duplicate of root drawing in pretty.py
#put line over expression
result = self.above('_'*self.width())
#construct right half of root symbol
height = self.height()
slash = '\n'.join(
' ' * (height - i - 1) + '/' + ' ' * i
for i in range(height)
)
slash = stringPict(slash, height - 1)
#left half of root symbol
if height > 2:
downline = stringPict('\\ \n \\', 1)
else:
downline = stringPict('\\')
#put n on top, as low as possible
if n is not None and n.width() > downline.width():
downline = downline.left(' '*(n.width() - downline.width()))
downline = downline.above(n)
#build root symbol
root = downline.right(slash)
#glue it on at the proper height
#normally, the root symbel is as high as self
#which is one less than result
#this moves the root symbol one down
#if the root became higher, the baseline has to grow too
root.baseline = result.baseline - result.height() + root.height()
return result.left(root)
def render(self, * args, **kwargs):
"""Return the string form of self.
Unless the argument line_break is set to False, it will
break the expression in a form that can be printed
on the terminal without being broken up.
"""
if kwargs["wrap_line"] is False:
return "\n".join(self.picture)
if kwargs["num_columns"] is not None:
# Read the argument num_columns if it is not None
ncols = kwargs["num_columns"]
else:
# Attempt to get a terminal width
ncols = self.terminal_width()
ncols -= 2
if ncols <= 0:
ncols = 78
# If smaller than the terminal width, no need to correct
if self.width() <= ncols:
return type(self.picture[0])(self)
# for one-line pictures we don't need v-spacers. on the other hand, for
# multiline-pictures, we need v-spacers between blocks, compare:
#
# 2 2 3 | a*c*e + a*c*f + a*d | a*c*e + a*c*f + a*d | 3.14159265358979323
# 6*x *y + 4*x*y + | | *e + a*d*f + b*c*e | 84626433832795
# | *e + a*d*f + b*c*e | + b*c*f + b*d*e + b |
# 3 4 4 | | *d*f |
# 4*y*x + x + y | + b*c*f + b*d*e + b | |
# | | |
# | *d*f
i = 0
svals = []
do_vspacers = (self.height() > 1)
while i < self.width():
svals.extend([ sval[i:i + ncols] for sval in self.picture ])
if do_vspacers:
svals.append("") # a vertical spacer
i += ncols
if svals[-1] == '':
del svals[-1] # Get rid of the last spacer
return "\n".join(svals)
def terminal_width(self):
"""Return the terminal width if possible, otherwise return 0.
"""
ncols = 0
try:
import curses
import io
try:
curses.setupterm()
ncols = curses.tigetnum('cols')
except AttributeError:
# windows curses doesn't implement setupterm or tigetnum
# code below from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440694
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
ncols = right - left + 1
except curses.error:
pass
except io.UnsupportedOperation:
pass
except (ImportError, TypeError):
pass
return ncols
def __eq__(self, o):
if isinstance(o, str):
return '\n'.join(self.picture) == o
elif isinstance(o, stringPict):
return o.picture == self.picture
return False
def __hash__(self):
return super(stringPict, self).__hash__()
def __str__(self):
return str.join('\n', self.picture)
def __unicode__(self):
return unicode.join(u'\n', self.picture)
def __repr__(self):
return "stringPict(%r,%d)" % ('\n'.join(self.picture), self.baseline)
def __getitem__(self, index):
return self.picture[index]
def __len__(self):
return len(self.s)
class prettyForm(stringPict):
"""
Extension of the stringPict class that knows about basic math applications,
optimizing double minus signs.
"Binding" is interpreted as follows::
ATOM this is an atom: never needs to be parenthesized
FUNC this is a function application: parenthesize if added (?)
DIV this is a division: make wider division if divided
POW this is a power: only parenthesize if exponent
MUL this is a multiplication: parenthesize if powered
ADD this is an addition: parenthesize if multiplied or powered
NEG this is a negative number: optimize if added, parenthesize if
multiplied or powered
OPEN this is an open object: parenthesize if added, multiplied, or
powered (example: Piecewise)
"""
ATOM, FUNC, DIV, POW, MUL, ADD, NEG, OPEN = range(8)
def __init__(self, s, baseline=0, binding=0, unicode=None):
"""Initialize from stringPict and binding power."""
stringPict.__init__(self, s, baseline)
self.binding = binding
self.unicode = unicode or s
# Note: code to handle subtraction is in _print_Add
def __add__(self, *others):
"""Make a pretty addition.
Addition of negative numbers is simplified.
"""
arg = self
if arg.binding > prettyForm.NEG:
arg = stringPict(*arg.parens())
result = [arg]
for arg in others:
#add parentheses for weak binders
if arg.binding > prettyForm.NEG:
arg = stringPict(*arg.parens())
#use existing minus sign if available
if arg.binding != prettyForm.NEG:
result.append(' + ')
result.append(arg)
return prettyForm(binding=prettyForm.ADD, *stringPict.next(*result))
def __div__(self, den, slashed=False):
"""Make a pretty division; stacked or slashed.
"""
if slashed:
raise NotImplementedError("Can't do slashed fraction yet")
num = self
if num.binding == prettyForm.DIV:
num = stringPict(*num.parens())
if den.binding == prettyForm.DIV:
den = stringPict(*den.parens())
if num.binding==prettyForm.NEG:
num = num.right(" ")[0]
return prettyForm(binding=prettyForm.DIV, *stringPict.stack(
num,
stringPict.LINE,
den))
def __truediv__(self, o):
return self.__div__(o)
def __mul__(self, *others):
"""Make a pretty multiplication.
Parentheses are needed around +, - and neg.
"""
quantity = {
'degree': u"\N{DEGREE SIGN}"
}
if len(others) == 0:
return self # We aren't actually multiplying... So nothing to do here.
args = self
if args.binding > prettyForm.MUL:
arg = stringPict(*args.parens())
result = [args]
for arg in others:
if arg.picture[0] not in quantity.values():
result.append(xsym('*'))
#add parentheses for weak binders
if arg.binding > prettyForm.MUL:
arg = stringPict(*arg.parens())
result.append(arg)
len_res = len(result)
for i in range(len_res):
if i < len_res - 1 and result[i] == '-1' and result[i + 1] == xsym('*'):
# substitute -1 by -, like in -1*x -> -x
result.pop(i)
result.pop(i)
result.insert(i, '-')
if result[0][0] == '-':
# if there is a - sign in front of all
# This test was failing to catch a prettyForm.__mul__(prettyForm("-1", 0, 6)) being negative
bin = prettyForm.NEG
if result[0] == '-':
right = result[1]
if right.picture[right.baseline][0] == '-':
result[0] = '- '
else:
bin = prettyForm.MUL
return prettyForm(binding=bin, *stringPict.next(*result))
def __repr__(self):
return "prettyForm(%r,%d,%d)" % (
'\n'.join(self.picture),
self.baseline,
self.binding)
def __pow__(self, b):
"""Make a pretty power.
"""
a = self
use_inline_func_form = False
if b.binding == prettyForm.POW:
b = stringPict(*b.parens())
if a.binding > prettyForm.FUNC:
a = stringPict(*a.parens())
elif a.binding == prettyForm.FUNC:
# heuristic for when to use inline power
if b.height() > 1:
a = stringPict(*a.parens())
else:
use_inline_func_form = True
if use_inline_func_form:
# 2
# sin + + (x)
b.baseline = a.prettyFunc.baseline + b.height()
func = stringPict(*a.prettyFunc.right(b))
return prettyForm(*func.right(a.prettyArgs))
else:
# 2 <-- top
# (x+y) <-- bot
top = stringPict(*b.left(' '*a.width()))
bot = stringPict(*a.right(' '*b.width()))
return prettyForm(binding=prettyForm.POW, *bot.above(top))
simpleFunctions = ["sin", "cos", "tan"]
@staticmethod
def apply(function, *args):
"""Functions of one or more variables.
"""
if function in prettyForm.simpleFunctions:
#simple function: use only space if possible
assert len(
args) == 1, "Simple function %s must have 1 argument" % function
arg = args[0].__pretty__()
if arg.binding <= prettyForm.DIV:
#optimization: no parentheses necessary
return prettyForm(binding=prettyForm.FUNC, *arg.left(function + ' '))
argumentList = []
for arg in args:
argumentList.append(',')
argumentList.append(arg.__pretty__())
argumentList = stringPict(*stringPict.next(*argumentList[1:]))
argumentList = stringPict(*argumentList.parens())
return prettyForm(binding=prettyForm.ATOM, *argumentList.left(function))
| gpl-2.0 |
linjoahow/w17test_1 | static/Brython3.1.0-20150301-090019/Lib/sre_compile.py | 630 | 16898 | #
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import sys
import _sre
import sre_parse
from sre_constants import *
from _sre import MAXREPEAT
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFF
def _identityfunction(x):
return x
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
for op, av in pattern:
#print('sre_compile.py:_compile:42', op, av)
#print('sre_compile.py:_compile:42', code)
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
emit(_sre.getlower(av, flags))
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = _identityfunction
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error("internal: unsupported template operator")
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error("look-behind requires fixed-width pattern")
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None):
# compile charset subprogram
emit = code.append
if fixup is None:
fixup = _identityfunction
for op, av in _optimize_charset(charset, fixup):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(fixup(av))
elif op is RANGE:
emit(fixup(av[0]))
emit(fixup(av[1]))
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error("internal: unsupported set operator")
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup):
# internal: optimize character set
out = []
outappend = out.append
charmap = [0]*256
try:
for op, av in charset:
if op is NEGATE:
outappend((op, av))
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could append to charmap tail
return charset # cannot compress
except IndexError:
# character set contains unicode characters
return _optimize_unicode(charset, fixup)
# compress character map
i = p = n = 0
runs = []
runsappend = runs.append
for c in charmap:
if c:
if n == 0:
p = i
n = n + 1
elif n:
runsappend((p, n))
n = 0
i = i + 1
if n:
runsappend((p, n))
if len(runs) <= 2:
# use literal/range
for p, n in runs:
if n == 1:
outappend((LITERAL, p))
else:
outappend((RANGE, (p, p+n-1)))
if len(out) < len(charset):
return out
else:
# use bitmap
data = _mk_bitmap(charmap)
outappend((CHARSET, data))
return out
return charset
def _mk_bitmap(bits):
data = []
dataappend = data.append
if _sre.CODESIZE == 2:
start = (1, 0)
else:
start = (1, 0)
m, v = start
for c in bits:
if c:
v = v + m
m = m + m
if m > MAXCODE:
dataappend(v)
m, v = start
return data
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 16-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (128 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of chunks (16 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of UTF-16 has not yet been developed. This means,
# in particular, that negated charsets cannot be represented as
# bigcharsets.
def _optimize_unicode(charset, fixup):
try:
import array
except ImportError:
return charset
charmap = [0]*65536
negate = 0
try:
for op, av in charset:
if op is NEGATE:
negate = 1
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could expand category
return charset # cannot compress
except IndexError:
# non-BMP characters; XXX now they should work
return charset
if negate:
if sys.maxunicode != 65535:
# XXX: negation does not work with big charsets
# XXX2: now they should work, but removing this will make the
# charmap 17 times bigger
return charset
for i in range(65536):
charmap[i] = not charmap[i]
comps = {}
mapping = [0]*256
block = 0
data = []
for i in range(256):
chunk = tuple(charmap[i*256:(i+1)*256])
new = comps.setdefault(chunk, block)
mapping[i] = new
if new == block:
block = block + 1
data = data + _mk_bitmap(chunk)
header = [block]
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
# Convert block indices to byte array of 256 bytes
mapping = array.array('b', mapping).tobytes()
# Convert byte array to word array
mapping = array.array(code, mapping)
assert mapping.itemsize == _sre.CODESIZE
assert len(mapping) * mapping.itemsize == 256
header = header + mapping.tolist()
data[0:0] = header
return [(BIGCHARSET, data)]
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
if lo == 0 and hi == MAXREPEAT:
raise error("nothing to repeat")
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
#print('sre_compile.py:_compile_info:370', lo, hi)
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
#print('sre_compile.py:_code:381',op,av)
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
#print('sre_compile.py:_code:430', code)
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
#print('sre_compile.py:_code:457', code)
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in range(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
def isstring(obj):
return isinstance(obj, (str, bytes))
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
#print("sre_compile.py:compile:504:p", p)
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
#print('sre_compile.py:498:p', p)
code = _code(p, flags)
#print('sre_compile.py:501:code', code)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| gpl-3.0 |
fearlessspider/python-social-auth | social/tests/backends/test_edmodo.py | 7 | 1339 | import json
from social.tests.backends.oauth import OAuth2Test
class EdmodoOAuth2Test(OAuth2Test):
backend_path = 'social.backends.edmodo.EdmodoOAuth2'
user_data_url = 'https://api.edmodo.com/users/me'
expected_username = 'foobar12345'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'username': 'foobar12345',
'coppa_verified': False,
'first_name': 'Foo',
'last_name': 'Bar',
'premium': False,
'verified_institution_member': False,
'url': 'https://api.edmodo.com/users/12345',
'type': 'teacher',
'time_zone': None,
'end_level': None,
'start_level': None,
'locale': 'en',
'subjects': None,
'utc_offset': None,
'email': 'foo.bar@example.com',
'gender': None,
'about': None,
'user_title': None,
'id': 12345,
'avatars': {
'small': 'https://api.edmodo.com/users/12345/avatar?type=small&u=5a15xug93m53mi4ey3ck4fvkq',
'large': 'https://api.edmodo.com/users/12345/avatar?type=large&u=5a15xug93m53mi4ey3ck4fvkq'
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| bsd-3-clause |
robhudson/zamboni | mkt/site/tests/test_views.py | 1 | 7116 | import json
from urlparse import urljoin
from django.conf import settings
from django.core.cache import cache
from django.test.utils import override_settings
import mock
from lxml import etree
from nose import SkipTest
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from mkt.webapps.models import Webapp
from mkt.site.fixtures import fixture
class Test403(amo.tests.TestCase):
fixtures = ['base/users'] + fixture('webapp_337141')
def setUp(self):
assert self.client.login(username='steamcube@mozilla.com',
password='password')
def _test_403(self, url):
res = self.client.get(url, follow=True)
eq_(res.status_code, 403)
self.assertTemplateUsed(res, 'site/403.html')
def test_403_admin(self):
self._test_403('/admin')
def test_403_devhub(self):
assert self.client.login(username='regular@mozilla.com',
password='password')
app = Webapp.objects.get(pk=337141)
self._test_403(app.get_dev_url('edit'))
def test_403_reviewer(self):
self._test_403('/reviewers')
class Test404(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def _test_404(self, url):
r = self.client.get(url, follow=True)
eq_(r.status_code, 404)
self.assertTemplateUsed(r, 'site/404.html')
return r
def test_404(self):
self._test_404('/xxx')
def test_404_devhub(self):
self._test_404('/developers/xxx')
def test_404_consumer_legacy(self):
self._test_404('/xxx')
def test_404_consumer(self):
self._test_404('/xxx')
def test_404_api(self):
res = self.client.get('/api/this-should-never-work/')
eq_(res.status_code, 404)
eq_(res.content, '')
class TestManifest(amo.tests.TestCase):
def setUp(self):
self.url = reverse('manifest.webapp')
@mock.patch('mkt.carriers.carriers.CARRIERS', {'boop': 'boop'})
@mock.patch.object(settings, 'WEBAPP_MANIFEST_NAME', 'Firefox Marketplace')
@mock.patch('mkt.site.views.get_carrier')
def test_manifest(self, mock_get_carrier):
mock_get_carrier.return_value = 'boop'
response = self.client.get(reverse('manifest.webapp'))
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/x-web-app-manifest+json')
content = json.loads(response.content)
eq_(content['name'], 'Firefox Marketplace')
url = reverse('manifest.webapp')
assert 'en-US' not in url and 'firefox' not in url
eq_(content['launch_path'], '/?carrier=boop')
@mock.patch('mkt.carriers.carriers.CARRIERS', [])
def test_manifest_no_carrier(self):
response = self.client.get(self.url)
eq_(response.status_code, 200)
content = json.loads(response.content)
assert 'launch_path' not in content
@mock.patch.object(settings, 'WEBAPP_MANIFEST_NAME', 'Mozilla Fruitstand')
def test_manifest_name(self):
response = self.client.get(self.url)
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content['name'], 'Mozilla Fruitstand')
def test_manifest_orientation(self):
response = self.client.get(self.url)
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content['orientation'], ['portrait-primary'])
def test_manifest_etag(self):
resp = self.client.get(self.url)
etag = resp.get('Etag')
assert etag, 'Missing ETag'
# Trigger a change to the manifest by changing the name.
with self.settings(WEBAPP_MANIFEST_NAME='Mozilla Fruitstand'):
resp = self.client.get(self.url)
assert resp.get('Etag'), 'Missing ETag'
self.assertNotEqual(etag, resp.get('Etag'))
def test_conditional_get_manifest(self):
resp = self.client.get(self.url)
etag = resp.get('Etag')
resp = self.client.get(self.url, HTTP_IF_NONE_MATCH=str(etag))
eq_(resp.content, '')
eq_(resp.status_code, 304)
class TestMozmarketJS(amo.tests.TestCase):
def setUp(self):
cache.clear()
def render(self):
return self.client.get(reverse('site.mozmarket_js'))
@mock.patch.object(settings, 'SITE_URL', 'https://secure-mkt.com/')
@mock.patch.object(settings, 'MINIFY_MOZMARKET', False)
def test_render(self):
resp = self.render()
self.assertContains(resp, "var server = 'https://secure-mkt.com/'")
eq_(resp['Content-Type'], 'text/javascript')
@mock.patch.object(settings, 'SITE_URL', 'https://secure-mkt.com/')
@mock.patch.object(settings, 'MINIFY_MOZMARKET', True)
def test_minify(self):
resp = self.render()
# Check for no space after equal sign.
self.assertContains(resp, '="https://secure-mkt.com/"')
@mock.patch.object(settings, 'MINIFY_MOZMARKET', True)
@mock.patch.object(settings, 'UGLIFY_BIN', None)
def test_minify_with_yui(self):
self.render() # no errors
@mock.patch.object(settings, 'MINIFY_MOZMARKET', False)
def test_receiptverifier(self):
resp = self.render()
self.assertContains(resp, 'exports.receipts.Verifier')
@mock.patch.object(settings, 'MOZMARKET_VENDOR_EXCLUDE',
['receiptverifier'])
@mock.patch.object(settings, 'MINIFY_MOZMARKET', False)
def test_exclude(self):
resp = self.render()
self.assertNotContains(resp, 'exports.receipts.Verifier')
class TestRobots(amo.tests.TestCase):
@override_settings(CARRIER_URLS=['seavanworld'])
@override_settings(ENGAGE_ROBOTS=True)
def test_engage_robots(self):
rs = self.client.get('/robots.txt')
self.assertContains(rs, 'Allow: /')
self.assertContains(rs, 'Disallow: /seavanworld/')
@override_settings(ENGAGE_ROBOTS=False)
def test_do_not_engage_robots(self):
rs = self.client.get('/robots.txt')
self.assertContains(rs, 'Disallow: /')
class TestOpensearch(amo.tests.TestCase):
def test_opensearch_declaration(self):
"""Look for opensearch declaration in templates."""
response = self.client.get(reverse('commonplace.fireplace'))
elm = pq(response.content)(
'link[rel=search][type="application/opensearchdescription+xml"]')
eq_(elm.attr('href'), reverse('opensearch'))
eq_(elm.attr('title'), 'Firefox Marketplace')
def test_opensearch(self):
response = self.client.get(reverse('opensearch'))
eq_(response['Content-Type'], 'text/xml')
eq_(response.status_code, 200)
doc = etree.fromstring(response.content)
e = doc.find('{http://a9.com/-/spec/opensearch/1.1/}ShortName')
eq_(e.text, 'Firefox Marketplace')
e = doc.find('{http://a9.com/-/spec/opensearch/1.1/}Url')
wanted = '%s?q={searchTerms}' % urljoin(settings.SITE_URL, '/search')
eq_(e.attrib['template'], wanted)
| bsd-3-clause |
lexyan/SickBeard | lib/hachoir_parser/misc/bplist.py | 90 | 11349 | """
Apple/NeXT Binary Property List (BPLIST) parser.
Also includes a .createXML() function which produces an XML representation of the object.
Note that it will discard unknown objects, nulls and fill values, but should work for most files.
Documents:
- CFBinaryPList.c
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Parsing.subproj/CFBinaryPList.c
- ForFoundationOnly.h (for structure formats)
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Base.subproj/ForFoundationOnly.h
- XML <-> BPList converter
http://scw.us/iPhone/plutil/plutil.pl
Author: Robert Xiao
Created: 2008-09-21
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (RootSeekableFieldSet, FieldSet, Enum,
Bits, GenericInteger, Float32, Float64, UInt8, UInt64, Bytes, NullBytes, RawBytes, String)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import displayHandler
from lib.hachoir_core.tools import humanDatetime
from datetime import datetime, timedelta
class BPListTrailer(FieldSet):
def createFields(self):
yield NullBytes(self, "unused", 6)
yield UInt8(self, "offsetIntSize", "Size (in bytes) of offsets in the offset table")
yield UInt8(self, "objectRefSize", "Size (in bytes) of object numbers in object references")
yield UInt64(self, "numObjects", "Number of objects in this file")
yield UInt64(self, "topObject", "Top-level object reference")
yield UInt64(self, "offsetTableOffset", "File offset to the offset table")
def createDescription(self):
return "Binary PList trailer"
class BPListOffsetTable(FieldSet):
def createFields(self):
size = self["../trailer/offsetIntSize"].value*8
for i in range(self["../trailer/numObjects"].value):
yield Bits(self, "offset[]", size)
class BPListSize(FieldSet):
def createFields(self):
yield Bits(self, "size", 4)
if self['size'].value == 0xF:
yield BPListObject(self, "fullsize")
def createValue(self):
if 'fullsize' in self:
return self['fullsize'].value
else:
return self['size'].value
class BPListObjectRef(GenericInteger):
def __init__(self, parent, name, description=None):
size = parent['/trailer/objectRefSize'].value*8
GenericInteger.__init__(self, parent, name, False, size, description)
def getRef(self):
return self.parent['/object[' + str(self.value) + ']']
def createDisplay(self):
return self.getRef().display
def createXML(self, prefix=''):
return self.getRef().createXML(prefix)
class BPListArray(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "ref[]")
def createValue(self):
return self.array('ref')
def createDisplay(self):
return '[' + ', '.join([x.display for x in self.value]) + ']'
def createXML(self,prefix=''):
return prefix + '<array>\n' + ''.join([x.createXML(prefix + '\t' ) + '\n' for x in self.value]) + prefix + '</array>'
class BPListDict(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "keyref[]")
for i in range(self.numels):
yield BPListObjectRef(self, "valref[]")
def createValue(self):
return zip(self.array('keyref'),self.array('valref'))
def createDisplay(self):
return '{' + ', '.join(['%s: %s'%(k.display,v.display) for k,v in self.value]) + '}'
def createXML(self, prefix=''):
return prefix + '<dict>\n' + ''.join(['%s\t<key>%s</key>\n%s\n'%(prefix,k.getRef().value.encode('utf-8'),v.createXML(prefix + '\t')) for k,v in self.value]) + prefix + '</dict>'
class BPListObject(FieldSet):
def createFields(self):
yield Enum(Bits(self, "marker_type", 4),
{0: "Simple",
1: "Int",
2: "Real",
3: "Date",
4: "Data",
5: "ASCII String",
6: "UTF-16-BE String",
8: "UID",
10: "Array",
13: "Dict",})
markertype = self['marker_type'].value
if markertype == 0:
# Simple (Null)
yield Enum(Bits(self, "value", 4),
{0: "Null",
8: "False",
9: "True",
15: "Fill Byte",})
if self['value'].display == "False":
self.xml=lambda prefix:prefix + "<false/>"
elif self['value'].display == "True":
self.xml=lambda prefix:prefix + "<true/>"
else:
self.xml=lambda prefix:prefix + ""
elif markertype == 1:
# Int
yield Bits(self, "size", 4, "log2 of number of bytes")
size=self['size'].value
# 8-bit (size=0), 16-bit (size=1) and 32-bit (size=2) numbers are unsigned
# 64-bit (size=3) numbers are signed
yield GenericInteger(self, "value", (size>=3), (2**size)*8)
self.xml=lambda prefix:prefix + "<integer>%s</integer>"%self['value'].value
elif markertype == 2:
# Real
yield Bits(self, "size", 4, "log2 of number of bytes")
if self['size'].value == 2: # 2**2 = 4 byte float
yield Float32(self, "value")
elif self['size'].value == 3: # 2**3 = 8 byte float
yield Float64(self, "value")
else:
# FIXME: What is the format of the real?
yield Bits(self, "value", (2**self['size'].value)*8)
self.xml=lambda prefix:prefix + "<real>%s</real>"%self['value'].value
elif markertype == 3:
# Date
yield Bits(self, "extra", 4, "Extra value, should be 3")
cvt_time=lambda v:datetime(2001,1,1) + timedelta(seconds=v)
yield displayHandler(Float64(self, "value"),lambda x:humanDatetime(cvt_time(x)))
self.xml=lambda prefix:prefix + "<date>%s</date>"%(cvt_time(self['value'].value).isoformat())
elif markertype == 4:
# Data
yield BPListSize(self, "size")
if self['size'].value:
yield Bytes(self, "value", self['size'].value)
self.xml=lambda prefix:prefix + "<data>\n%s\n%s</data>"%(self['value'].value.encode('base64').strip(),prefix)
else:
self.xml=lambda prefix:prefix + '<data></data>'
elif markertype == 5:
# ASCII String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value, charset="ASCII")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('iso-8859-1'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 6:
# UTF-16-BE String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value*2, charset="UTF-16-BE")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('utf-8'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 8:
# UID
yield Bits(self, "size", 4, "Number of bytes minus 1")
yield GenericInteger(self, "value", False, (self['size'].value + 1)*8)
self.xml=lambda prefix:prefix + "" # no equivalent?
elif markertype == 10:
# Array
yield BPListSize(self, "size")
size = self['size'].value
if size:
yield BPListArray(self, "value", size)
self.xml=lambda prefix:self['value'].createXML(prefix)
elif markertype == 13:
# Dict
yield BPListSize(self, "size")
yield BPListDict(self, "value", self['size'].value)
self.xml=lambda prefix:self['value'].createXML(prefix)
else:
yield Bits(self, "value", 4)
self.xml=lambda prefix:''
def createValue(self):
if 'value' in self:
return self['value'].value
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createDisplay(self):
if 'value' in self:
return unicode(self['value'].display)
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createXML(self, prefix=''):
if 'value' in self:
try:
return self.xml(prefix)
except AttributeError:
return ''
return ''
def getFieldType(self):
return '%s<%s>'%(FieldSet.getFieldType(self), self['marker_type'].display)
class BPList(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "bplist00"
PARSER_TAGS = {
"id": "bplist",
"category": "misc",
"file_ext": ("plist",),
"magic": ((MAGIC, 0),),
"min_size": 8 + 32, # bplist00 + 32-byte trailer
"description": "Apple/NeXT Binary Property List",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "magic", 8, "File magic (bplist00)")
if self.size:
self.seekByte(self.size//8-32, True)
else:
# FIXME: UNTESTED
while True:
try:
self.seekByte(1024)
except:
break
self.seekByte(self.size//8-32)
yield BPListTrailer(self, "trailer")
self.seekByte(self['trailer/offsetTableOffset'].value)
yield BPListOffsetTable(self, "offset_table")
for i in self.array("offset_table/offset"):
if self.current_size > i.value*8:
self.seekByte(i.value)
elif self.current_size < i.value*8:
# try to detect files with gaps or unparsed content
yield RawBytes(self, "padding[]", i.value-self.current_size//8)
yield BPListObject(self, "object[]")
def createXML(self, prefix=''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
''' + self['/object[' + str(self['/trailer/topObject'].value) + ']'].createXML(prefix) + '''
</plist>'''
| gpl-3.0 |
cdrttn/samba-regedit | lib/dnspython/dns/ttl.py | 100 | 2179 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TTL conversion."""
import dns.exception
class BadTTL(dns.exception.SyntaxError):
pass
def from_text(text):
"""Convert the text form of a TTL to an integer.
The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
@param text: the textual TTL
@type text: string
@raises dns.ttl.BadTTL: the TTL is not well-formed
@rtype: int
"""
if text.isdigit():
total = long(text)
else:
if not text[0].isdigit():
raise BadTTL
total = 0L
current = 0L
for c in text:
if c.isdigit():
current *= 10
current += long(c)
else:
c = c.lower()
if c == 'w':
total += current * 604800L
elif c == 'd':
total += current * 86400L
elif c == 'h':
total += current * 3600L
elif c == 'm':
total += current * 60L
elif c == 's':
total += current
else:
raise BadTTL("unknown unit '%s'" % c)
current = 0
if not current == 0:
raise BadTTL("trailing integer")
if total < 0L or total > 2147483647L:
raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
return total
| gpl-3.0 |
saadatqadri/django-oscar | tests/integration/basket/model_tests.py | 35 | 4877 | from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.basket.models import Basket
from oscar.apps.partner import strategy
from oscar.test import factories
from oscar.apps.catalogue.models import Option
class TestAddingAProductToABasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
currency='GBP',
product=self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.basket.add(self.product)
def test_creates_a_line(self):
self.assertEqual(1, self.basket.num_lines)
def test_sets_line_prices(self):
line = self.basket.lines.all()[0]
self.assertEqual(line.price_incl_tax, self.purchase_info.price.incl_tax)
self.assertEqual(line.price_excl_tax, self.purchase_info.price.excl_tax)
def test_means_another_currency_product_cannot_be_added(self):
product = factories.create_product()
factories.create_stockrecord(
currency='USD', product=product, price_excl_tax=D('20.00'))
with self.assertRaises(ValueError):
self.basket.add(product)
class TestANonEmptyBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.basket.add(self.product, 10)
def test_can_be_flushed(self):
self.basket.flush()
self.assertEqual(self.basket.num_items, 0)
def test_returns_correct_product_quantity(self):
self.assertEqual(10, self.basket.product_quantity(
self.product))
def test_returns_correct_line_quantity_for_existing_product_and_stockrecord(self):
self.assertEqual(10, self.basket.line_quantity(
self.product, self.record))
def test_returns_zero_line_quantity_for_alternative_stockrecord(self):
record = factories.create_stockrecord(
self.product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
self.product, record))
def test_returns_zero_line_quantity_for_missing_product_and_stockrecord(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
product, record))
def test_returns_correct_quantity_for_existing_product_and_stockrecord_and_options(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
option = Option.objects.create(name="Message")
options = [{"option": option, "value": "2"}]
self.basket.add(product, options=options)
self.assertEqual(0, self.basket.line_quantity(
product, record))
self.assertEqual(1, self.basket.line_quantity(
product, record, options))
class TestMergingTwoBaskets(TestCase):
def setUp(self):
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.main_basket = Basket()
self.main_basket.strategy = strategy.Default()
self.main_basket.add(self.product, quantity=2)
self.merge_basket = Basket()
self.merge_basket.strategy = strategy.Default()
self.merge_basket.add(self.product, quantity=1)
self.main_basket.merge(self.merge_basket)
def test_doesnt_sum_quantities(self):
self.assertEqual(1, self.main_basket.num_lines)
def test_changes_status_of_merge_basket(self):
self.assertEqual(Basket.MERGED, self.merge_basket.status)
class TestASubmittedBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.basket.submit()
def test_has_correct_status(self):
self.assertTrue(self.basket.is_submitted)
def test_can_be_edited(self):
self.assertFalse(self.basket.can_be_edited)
class TestMergingAVoucherBasket(TestCase):
def test_transfers_vouchers_to_new_basket(self):
baskets = [factories.BasketFactory(), factories.BasketFactory()]
voucher = factories.VoucherFactory()
baskets[0].vouchers.add(voucher)
baskets[1].merge(baskets[0])
self.assertEqual(1, baskets[1].vouchers.all().count())
| bsd-3-clause |
les69/calvin-base | calvin/runtime/south/plugins/async/twistedimpl/tests/test_server_connection.py | 5 | 7701 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.async import async
from calvin.runtime.south.plugins.async import server_connection
from calvin.runtime.south.plugins.async import threads
from calvin.utilities.calvinlogger import get_logger
import pytest
import socket
_log = get_logger(__name__)
def data_available(conn):
first_print = True
while conn.data_available is False:
if first_print:
print "waiting for conn.data_available ... ",
first_print = False
print ""
return True
def connection_made(factory):
first_print = True
while not factory.connections:
if first_print:
print "waiting for connection ... ",
first_print = False
print ""
return True
def hundred_connection_made(factory):
first_print = True
while not len(factory.connections) == 100:
if first_print:
print "waiting for 100 connection ... ",
first_print = False
print ""
return True
def no_more_connections(factory):
first_print = True
while factory.connections:
if first_print:
print "waiting for connections to close ... ",
first_print = False
print ""
return True
def print_header(string):
_log.info("\n\n### %s ###", string)
# Stub
class Scheduler_stub(object):
def trigger_loop(self):
""" Trigger the loop_once """
async.DelayedCall(0, self.trigger_loop)
return
@pytest.mark.essential
class TestServer(object):
@pytest.inlineCallbacks
def test_default_line_mode(self):
print_header("TEST_DEFAULT_LINE_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert not self.factory.connections
assert not self.factory.pending_connections
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
assert self.factory.pending_connections
_, self.conn = self.factory.accept()
####################################################################
####################################################################
print_header("Test_Line_Received")
####################################################################
assert self.conn.data_available is False
yield threads.defer_to_thread(self.client_socket.send, "sending string \r\n")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "sending string "
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.inlineCallbacks
def test_args_in_line_mode(self):
print_header("TEST_ARGS_IN_LINE_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, delimiter='end', max_length=3)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
_, self.conn = self.factory.accept()
print_header("Test_Short_Line_Received")
####################################################################
yield threads.defer_to_thread(self.client_socket.send, "123end")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "123"
print_header("Test_Long_Line_Received")
####################################################################
yield threads.defer_to_thread(self.client_socket.send, "1234end")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "1234"
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.inlineCallbacks
def test_raw_mode(self):
print_header("TEST_RAW_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, mode='raw', max_length=10)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
assert self.factory.pending_connections
_, self.conn = self.factory.accept()
assert not self.factory.pending_connections
print_header("Test_Data_Received")
####################################################################
assert self.conn.data_available is False
yield threads.defer_to_thread(self.client_socket.send, "abcdefghijklmnopqrstuvxyz123456789")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "abcdefghij"
assert self.conn.data_get() == "klmnopqrst"
assert self.conn.data_get() == "uvxyz12345"
assert self.conn.data_get() == "6789"
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.inlineCallbacks
def test_many_clients(self):
print_header("TEST_MANY_CLIENTS")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, mode='raw', max_length=10)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
clients = []
for i in range(100):
clients.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
for c in clients:
yield threads.defer_to_thread(c.connect, ('localhost', 8123))
yield threads.defer_to_thread(hundred_connection_made, self.factory)
assert len(self.factory.pending_connections) == 100
for i in range(100):
_, self.conn = self.factory.accept()
assert not self.factory.pending_connections
| apache-2.0 |
tingzhao/branchtest | neurolabi/gui/ext/gtest/test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| bsd-3-clause |
orgito/ansible | test/units/modules/network/aruba/aruba_module.py | 52 | 2518 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestArubaModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
henaras/horizon | openstack_dashboard/api/rest/utils.py | 42 | 5432 | # Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import logging
from django.conf import settings
from django import http
from django.utils import decorators
from oslo_serialization import jsonutils
from horizon import exceptions
log = logging.getLogger(__name__)
class AjaxError(Exception):
def __init__(self, http_status, msg):
self.http_status = http_status
super(AjaxError, self).__init__(msg)
http_errors = exceptions.UNAUTHORIZED + exceptions.NOT_FOUND + \
exceptions.RECOVERABLE + (AjaxError, )
class CreatedResponse(http.HttpResponse):
def __init__(self, location, data=None):
if data is not None:
content = jsonutils.dumps(data, sort_keys=settings.DEBUG)
content_type = 'application/json'
else:
content = ''
content_type = None
super(CreatedResponse, self).__init__(status=201, content=content,
content_type=content_type)
self['Location'] = location
class JSONResponse(http.HttpResponse):
def __init__(self, data, status=200):
if status == 204:
content = ''
else:
content = jsonutils.dumps(data, sort_keys=settings.DEBUG)
super(JSONResponse, self).__init__(
status=status,
content=content,
content_type='application/json',
)
def ajax(authenticated=True, data_required=False):
'''Provide a decorator to wrap a view method so that it may exist in an
entirely AJAX environment:
- data decoded from JSON as input and data coded as JSON as output
- result status is coded in the HTTP status code; any non-2xx response
data will be coded as a JSON string, otherwise the response type (always
JSON) is specific to the method called.
if authenticated is true then we'll make sure the current user is
authenticated.
If data_required is true then we'll assert that there is a JSON body
present.
The wrapped view method should return either:
- JSON serialisable data
- an object of the django http.HttpResponse subclass (one of JSONResponse
or CreatedResponse is suggested)
- nothing
Methods returning nothing (or None explicitly) will result in a 204 "NO
CONTENT" being returned to the caller.
'''
def decorator(function, authenticated=authenticated,
data_required=data_required):
@functools.wraps(function,
assigned=decorators.available_attrs(function))
def _wrapped(self, request, *args, **kw):
if authenticated and not request.user.is_authenticated():
return JSONResponse('not logged in', 401)
if not request.is_ajax():
return JSONResponse('request must be AJAX', 400)
# decode the JSON body if present
request.DATA = None
if request.body:
try:
request.DATA = json.loads(request.body)
except (TypeError, ValueError) as e:
return JSONResponse('malformed JSON request: %s' % e, 400)
if data_required:
if not request.DATA:
return JSONResponse('request requires JSON body', 400)
# invoke the wrapped function, handling exceptions sanely
try:
data = function(self, request, *args, **kw)
if isinstance(data, http.HttpResponse):
return data
elif data is None:
return JSONResponse('', status=204)
return JSONResponse(data)
except http_errors as e:
# exception was raised with a specific HTTP status
if hasattr(e, 'http_status'):
http_status = e.http_status
elif hasattr(e, 'code'):
http_status = e.code
else:
log.exception('HTTP exception with no status/code')
return JSONResponse(str(e), 500)
return JSONResponse(str(e), http_status)
except Exception as e:
log.exception('error invoking apiclient')
return JSONResponse(str(e), 500)
return _wrapped
return decorator
def parse_filters_kwargs(request, client_keywords={}):
"""Extract REST filter parameters from the request GET args.
Client processes some keywords separately from filters and takes
them as separate inputs. This will ignore those keys to avoid
potential conflicts.
"""
filters = {}
kwargs = {}
for param in request.GET:
if param in client_keywords:
kwargs[param] = request.GET[param]
else:
filters[param] = request.GET[param]
return filters, kwargs
| apache-2.0 |
lihui7115/ChromiumGStreamerBackend | tools/telemetry/telemetry/internal/backends/facebook_credentials_backend.py | 31 | 1144 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.backends import form_based_credentials_backend
class FacebookCredentialsBackend(
form_based_credentials_backend.FormBasedCredentialsBackend):
@property
def logged_in_javascript(self):
"""Evaluates to true iff already logged in."""
return ('document.getElementById("fbNotificationsList")!== null || '
'document.getElementById("m_home_notice")!== null')
@property
def credentials_type(self):
return 'facebook'
@property
def url(self):
return 'http://www.facebook.com/'
@property
def login_form_id(self):
return 'login_form'
@property
def login_input_id(self):
return 'email'
@property
def password_input_id(self):
return 'pass'
class FacebookCredentialsBackend2(FacebookCredentialsBackend):
""" Facebook credential backend for https client. """
@property
def credentials_type(self):
return 'facebook2'
@property
def url(self):
return 'https://www.facebook.com/'
| bsd-3-clause |
TheRealPanPan/rc-rt | rc_rt/board/board.py | 1 | 2290 |
from .plate.plate import Plate
class Board:
def __init__(self, plates):
self.working_plates = {}
self.working_plates.update(plates["NW"].tiles)
NE = plates["NE"]
NE.rotate(90)
NE.offset_coord(x_offset=8)
self._add_to_working_plates(NE.tiles)
SW = plates["SW"]
SW.rotate(270)
SW.offset_coord(y_offset=8)
self._add_to_working_plates(SW.tiles)
SE = plates["SE"]
SE.rotate(180)
SE.offset_coord(y_offset=8, x_offset=8)
self._add_to_working_plates(SE.tiles)
def _add_to_working_plates(self, plates):
for row in plates:
if row not in self.working_plates:
self.working_plates[row] = {}
self.working_plates[row].update(plates[row])
@staticmethod
def generate_board(plates_file):
knwon_plates = []
for pf in plates_file:
knwon_plates.append(
Plate.from_file(pf)
)
# TODO Really Select Plates :p (
# NW NE 1 2
# SW SE 3 4
# )
selected_plates = {
"NW": knwon_plates[0],
"NE": knwon_plates[1],
"SW": knwon_plates[2],
"SE": knwon_plates[3]
}
return Board(plates=selected_plates)
# TODO
def generate_objective(self):
return {
"x": 1,
"y": 2
}
# TODO
def validate(objective, way):
return True
# TODO
def is_up_authorized(self, x, y):
return True
# TODO
def is_right_authorized(self, x, y):
return True
# TODO
def is_left_authorized(self, x, y):
return True
# TODO
def is_down_authorized(self, x, y):
return True
# TODO
def where_do_i_stop(self, startx, starty, direction):
return {
"x": 5,
"y": 5
}
def __str__(self):
data = ""
for y in sorted(self.working_plates.keys(), key=int):
data = "%s\n%4s : " % (data, y)
for x in sorted(self.working_plates[y].keys(), key=int):
data = "%s %04d" % (data, self.working_plates[y][x]["urdl"])
data = "%s\n" % data
return data
| mit |
zqzhang/crosswalk-test-suite | tools/allpairs-plus/allpairs-plus.py | 19 | 10659 | #!/usr/bin/env python
import sys
import os
import itertools
import shutil
import getopt
import re
import conf
import pdb
import traceback
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
def usage():
print 'allpairs-plus.py usage:'
print '-h: print help message.'
print '-b: use baseline file as part of output, e.g. baseline.txt'
def do_Selfcom(self_combin_file, out_file):
try:
file = open(self_combin_file)
allpairs_in = open(out_file, 'a+')
while True:
line = file.readline().replace("null", "")
line = line.replace(",,", ",")
if (line[-1:] == ","):
line = line[:-1]
if not line:
break
allpairs_in.writelines(line + "\n")
file.close()
allpairs_in.close()
return
except Exception as e:
print Exception, ":", e
def fileline_count(fp):
return len(open(fp).readlines())
def del_Seed(in_file):
try:
caseline = ""
old_list = []
format_list = []
de = ""
row = 0
file = open(in_file)
items = []
self_file = []
s_name = p_name = ""
if (os.path.isdir("self")):
do_Clear(conf.path + "/self")
os.mkdir(conf.path + "/self")
while True:
p_name = s_name
line = file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
s_name = items[0].split("-")[0]
if ((p_name != s_name) and (p_name != "")):
fp = open(conf.path + "/self/" + s_name + "_input.txt", 'a+')
fp.writelines(line + "\n")
else:
fp = open(conf.path + "/self/" + s_name + "_input.txt", 'a+')
fp.writelines(line + "\n")
if (s_name != p_name):
self_file.append(s_name)
fp.close()
file.close()
if (os.path.isfile(conf.selfcomb_file)):
os.remove(conf.selfcomb_file)
for i in range(0, len(self_file)):
line_count = fileline_count(
conf.path +
"/self/" +
self_file[i] +
"_input.txt")
if (line_count >= 2):
lists = [[] for m in range(line_count)]
open_input_file = open(
conf.path +
"/self/" +
self_file[i] +
"_input.txt",
'a+')
while True:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_item = items[1].split(",")
get_item1 = get_item
if (str(get_item).find("null")) > 1:
for element in range(0, len(get_item)):
if get_item[element] == "null":
old_list = old_list + get_item
for g in get_item:
lists[row].append(g)
row = row + 1
input_pair = all_pairs(lists)
open_input_file.close()
output_pair = open(
conf.path +
"/self/" +
self_file[i] +
"_output.txt",
'a+')
for e, v in enumerate(input_pair):
for c in range(0, len(v)):
caseline = caseline + v[c]
caseline = caseline.replace("null", "") + ","
get_output_item = caseline[:-1].split(",")
get_output_item = old_list + get_output_item
format_list = ','.join(dele_list(get_output_item))
# print "get-----",get_output_item
output_pair.writelines(self_file[i] + ":" + format_list)
output_pair.close()
else:
open_input_file = open(
conf.path +
"/self/" +
self_file[i] +
"_input.txt",
'r')
output_pair = open(
conf.path +
"/self/" +
self_file[i] +
"_output.txt",
'a+')
while True:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
output_pair.writelines(line)
output_pair.close()
open_input_file .close()
# 1*********input_seed -> selfcomb.txt
# if more self combination, each self generate itself output
# file,finally all self_input generate one selfcomb.txt
do_Selfcom(
conf.path +
"/self/" +
self_file[i] +
"_output.txt",
conf.selfcomb_file)
row = 0
caseline = ""
format_list = []
get_output_item = []
old_list = []
# 2*********selfcomb -> output file by allpairs
gen_selfcomb_File(conf.selfcomb_file, in_file)
except Exception as e:
print Exception, ":", e
print traceback.format_exc()
def dele_list(old_list):
try:
newList = []
for x in old_list:
if x not in newList:
newList.append(x)
return newList
except Exception as e:
print Exception, ":", e
print traceback.format_exc()
def gen_selfcomb_File(comb_file, in_file):
try:
open_output_file = open(conf.output_file, 'a+')
caseline = ""
get_items = ""
get_case = ""
get_out_put = ""
row = 0
line_count = fileline_count(comb_file)
if (line_count >= 1):
lists = [[] for m in range(line_count)]
open_input_file = open(comb_file)
while True:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_items = get_items + items[0].split("-")[0] + "\t"
open_output_file.writelines(get_items.rstrip("\t") + "\n")
open_input_file.close()
open_input_file = open(comb_file)
for i in range(0, len(lists)):
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":") # items[0]=field;#item[1]=value
value = line[len(items[0]) + 1:]
get_item = value.split(",")
for g in get_item:
lists[row].append(g)
row = row + 1
if len(lists) > 1:
input_pair = all_pairs(lists)
for e, v in enumerate(input_pair):
for c in range(0, len(v)):
get_case = get_case + v[c] + "\t"
get_out_put = get_case.replace(
"null",
"").replace(
"comma",
",").strip("\t")
open_output_file.writelines(get_out_put + "\n")
# print get_case.replace("null","").rstrip("\t")
get_case = ""
open_output_file.close()
return "Generate selfcombination file ------------------------->O.K"
except:
print traceback.format_exc()
def do_Clear(sourceDir):
try:
if (os.path.exists(sourceDir)):
if (os.path.isdir(sourceDir)):
shutil.rmtree(sourceDir)
else:
os.remove(sourceDir)
except IOError as e:
print Exception, "Clear :" + sourceDir + " ------------------------->error", e
def integrate_baseline(baseline_file):
output_bl = open(baseline_file)
output_bl_list = output_bl.readlines()
output = open(conf.output_file)
output_list = output.readlines()
col_outbl = len(output_bl_list[0].split("\t"))
col_out = len(output_list[0].split("\t"))
# Only exist parameter changed
if col_outbl == col_out:
print ">>>>>>> Only exist parameters changed"
for var in output_list:
if var not in output_bl_list:
output_bl_list.append(var)
print ">>>>>>> Generate output with baseline ------------>OK"
output_withbaseline = open(conf.output_file, "w")
output_withbaseline.writelines(output_bl_list)
output_withbaseline.close()
print ">>>>>>> END"
sys.exit()
# New parameters added, and maybe exist parameters also changed
print ">>>>>>> New parameters added, and maybe exist parameters also changed"
out_dict = {}
for var in output_list:
list_row = var.split("\t")
key = '\t'.join(list_row[0:col_outbl])
value = '\t'.join(list_row[col_outbl:col_out])
out_dict[key] = value
output_list_new = []
i = 0
for var in output_bl_list:
var = var.strip('\n')
list_row = var.split("\t")
key = '\t'.join(list_row)
value = out_dict.get(key)
while (i >= len(out_dict)):
i = i / 2
if value is None:
value = out_dict.values()[i]
output_list_new.append("\t".join((var, value)))
i = i + 1
print ">>>>>>> Generate output with baseline ------------>OK"
for var in output_list:
if var not in output_list_new:
output_list_new.append(var)
output_withbaseline = open(conf.output_file, "w")
output_withbaseline.writelines(output_list_new)
output_withbaseline.close()
print ">>>>>>> END"
def main():
try:
do_Clear("./output/output.txt")
del_Seed(conf.seed_file)
do_Clear("./self")
opts, args = getopt.getopt(sys.argv[1:], "hb:")
for op, val in opts:
if op == "-b":
baseline = val
print "use", baseline, "as baseline"
integrate_baseline(baseline)
elif op == "-h":
usage()
sys.exit()
except Exception as e:
print Exception, ":", e
if __name__ == "__main__":
main()
| bsd-3-clause |
creasyw/IMTAphy | framework/library/PyConfig/openwns/rng.py | 1 | 1611 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import time
import unittest
class RNG(object):
__slots__ = ["useRandomSeed", "seed"]
def __init__(self, useRandomSeed):
super(RNG, self).__init__()
self.useRandomSeed = useRandomSeed
self.seed = 4711 # all but not zero
if (self.useRandomSeed):
self.seed = int(round (time.time() / 10))
print "Using random seed. Seed of RNG is: " + str(self.seed)
| gpl-2.0 |
ahmedaljazzar/edx-platform | lms/djangoapps/discussion_api/serializers.py | 13 | 16487 | """
Discussion API serializers
"""
from urllib import urlencode
from urlparse import urlunparse
from django.contrib.auth.models import User as DjangoUser
from django.core.exceptions import ValidationError
from django.urls import reverse
from rest_framework import serializers
from discussion_api.permissions import NON_UPDATABLE_COMMENT_FIELDS, NON_UPDATABLE_THREAD_FIELDS, get_editable_fields
from discussion_api.render import render_body
from django_comment_client.utils import is_comment_too_deep
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_MODERATOR, Role
from django_comment_common.utils import get_course_discussion_settings
from lms.djangoapps.django_comment_client.utils import course_discussion_division_enabled, get_group_names_by_id
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.user import User as CommentClientUser
from lms.lib.comment_client.utils import CommentClientRequestError
def get_context(course, request, thread=None):
"""
Returns a context appropriate for use with ThreadSerializer or
(if thread is provided) CommentSerializer.
"""
# TODO: cache staff_user_ids and ta_user_ids if we need to improve perf
staff_user_ids = {
user.id
for role in Role.objects.filter(
name__in=[FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR],
course_id=course.id
)
for user in role.users.all()
}
ta_user_ids = {
user.id
for role in Role.objects.filter(name=FORUM_ROLE_COMMUNITY_TA, course_id=course.id)
for user in role.users.all()
}
requester = request.user
cc_requester = CommentClientUser.from_django_user(requester).retrieve()
cc_requester["course_id"] = course.id
course_discussion_settings = get_course_discussion_settings(course.id)
return {
"course": course,
"request": request,
"thread": thread,
"discussion_division_enabled": course_discussion_division_enabled(course_discussion_settings),
"group_ids_to_names": get_group_names_by_id(course_discussion_settings),
"is_requester_privileged": requester.id in staff_user_ids or requester.id in ta_user_ids,
"staff_user_ids": staff_user_ids,
"ta_user_ids": ta_user_ids,
"cc_requester": cc_requester,
}
def validate_not_blank(value):
"""
Validate that a value is not an empty string or whitespace.
Raises: ValidationError
"""
if not value.strip():
raise ValidationError("This field may not be blank.")
class _ContentSerializer(serializers.Serializer):
"""
A base class for thread and comment serializers.
"""
id = serializers.CharField(read_only=True) # pylint: disable=invalid-name
author = serializers.SerializerMethodField()
author_label = serializers.SerializerMethodField()
created_at = serializers.CharField(read_only=True)
updated_at = serializers.CharField(read_only=True)
raw_body = serializers.CharField(source="body", validators=[validate_not_blank])
rendered_body = serializers.SerializerMethodField()
abuse_flagged = serializers.SerializerMethodField()
voted = serializers.SerializerMethodField()
vote_count = serializers.SerializerMethodField()
editable_fields = serializers.SerializerMethodField()
non_updatable_fields = set()
def __init__(self, *args, **kwargs):
super(_ContentSerializer, self).__init__(*args, **kwargs)
for field in self.non_updatable_fields:
setattr(self, "validate_{}".format(field), self._validate_non_updatable)
def _validate_non_updatable(self, value):
"""Ensure that a field is not edited in an update operation."""
if self.instance:
raise ValidationError("This field is not allowed in an update.")
return value
def _is_user_privileged(self, user_id):
"""
Returns a boolean indicating whether the given user_id identifies a
privileged user.
"""
return user_id in self.context["staff_user_ids"] or user_id in self.context["ta_user_ids"]
def _is_anonymous(self, obj):
"""
Returns a boolean indicating whether the content should be anonymous to
the requester.
"""
return (
obj["anonymous"] or
obj["anonymous_to_peers"] and not self.context["is_requester_privileged"]
)
def get_author(self, obj):
"""
Returns the author's username, or None if the content is anonymous.
"""
return None if self._is_anonymous(obj) else obj["username"]
def _get_user_label(self, user_id):
"""
Returns the role label (i.e. "Staff" or "Community TA") for the user
with the given id.
"""
return (
"Staff" if user_id in self.context["staff_user_ids"] else
"Community TA" if user_id in self.context["ta_user_ids"] else
None
)
def get_author_label(self, obj):
"""
Returns the role label for the content author.
"""
if self._is_anonymous(obj) or obj["user_id"] is None:
return None
else:
user_id = int(obj["user_id"])
return self._get_user_label(user_id)
def get_rendered_body(self, obj):
"""
Returns the rendered body content.
"""
return render_body(obj["body"])
def get_abuse_flagged(self, obj):
"""
Returns a boolean indicating whether the requester has flagged the
content as abusive.
"""
return self.context["cc_requester"]["id"] in obj.get("abuse_flaggers", [])
def get_voted(self, obj):
"""
Returns a boolean indicating whether the requester has voted for the
content.
"""
return obj["id"] in self.context["cc_requester"]["upvoted_ids"]
def get_vote_count(self, obj):
"""
Returns the number of votes for the content.
"""
return obj.get("votes", {}).get("up_count", 0)
def get_editable_fields(self, obj):
"""
Return the list of the fields the requester can edit
"""
return sorted(get_editable_fields(obj, self.context))
class ThreadSerializer(_ContentSerializer):
"""
A serializer for thread data.
N.B. This should not be used with a comment_client Thread object that has
not had retrieve() called, because of the interaction between DRF's attempts
at introspection and Thread's __getattr__.
"""
course_id = serializers.CharField()
topic_id = serializers.CharField(source="commentable_id", validators=[validate_not_blank])
group_id = serializers.IntegerField(required=False, allow_null=True)
group_name = serializers.SerializerMethodField()
type = serializers.ChoiceField(
source="thread_type",
choices=[(val, val) for val in ["discussion", "question"]]
)
title = serializers.CharField(validators=[validate_not_blank])
pinned = serializers.SerializerMethodField(read_only=True)
closed = serializers.BooleanField(read_only=True)
following = serializers.SerializerMethodField()
comment_count = serializers.SerializerMethodField(read_only=True)
unread_comment_count = serializers.SerializerMethodField(read_only=True)
comment_list_url = serializers.SerializerMethodField()
endorsed_comment_list_url = serializers.SerializerMethodField()
non_endorsed_comment_list_url = serializers.SerializerMethodField()
read = serializers.BooleanField(required=False)
has_endorsed = serializers.BooleanField(source="endorsed", read_only=True)
response_count = serializers.IntegerField(source="resp_total", read_only=True, required=False)
non_updatable_fields = NON_UPDATABLE_THREAD_FIELDS
def __init__(self, *args, **kwargs):
super(ThreadSerializer, self).__init__(*args, **kwargs)
# Compensate for the fact that some threads in the comments service do
# not have the pinned field set
if self.instance and self.instance.get("pinned") is None:
self.instance["pinned"] = False
def get_pinned(self, obj):
"""
Compensate for the fact that some threads in the comments service do
not have the pinned field set.
"""
return bool(obj["pinned"])
def get_group_name(self, obj):
"""
Returns the name of the group identified by the thread's group_id.
"""
return self.context["group_ids_to_names"].get(obj["group_id"])
def get_following(self, obj):
"""
Returns a boolean indicating whether the requester is following the
thread.
"""
return obj["id"] in self.context["cc_requester"]["subscribed_thread_ids"]
def get_comment_list_url(self, obj, endorsed=None):
"""
Returns the URL to retrieve the thread's comments, optionally including
the endorsed query parameter.
"""
if (
(obj["thread_type"] == "question" and endorsed is None) or
(obj["thread_type"] == "discussion" and endorsed is not None)
):
return None
path = reverse("comment-list")
query_dict = {"thread_id": obj["id"]}
if endorsed is not None:
query_dict["endorsed"] = endorsed
return self.context["request"].build_absolute_uri(
urlunparse(("", "", path, "", urlencode(query_dict), ""))
)
def get_endorsed_comment_list_url(self, obj):
"""
Returns the URL to retrieve the thread's endorsed comments.
"""
return self.get_comment_list_url(obj, endorsed=True)
def get_non_endorsed_comment_list_url(self, obj):
"""
Returns the URL to retrieve the thread's non-endorsed comments.
"""
return self.get_comment_list_url(obj, endorsed=False)
def get_comment_count(self, obj):
"""
Increments comment count to include post and returns total count of
contributions (i.e. post + responses + comments) for the thread
"""
return obj["comments_count"] + 1
def get_unread_comment_count(self, obj):
"""
Returns the number of unread comments. If the thread has never been read,
this additionally includes 1 for the post itself, in addition to its responses and
comments.
"""
if not obj["read"] and obj["comments_count"] == obj["unread_comments_count"]:
return obj["unread_comments_count"] + 1
return obj["unread_comments_count"]
def create(self, validated_data):
thread = Thread(user_id=self.context["cc_requester"]["id"], **validated_data)
thread.save()
return thread
def update(self, instance, validated_data):
for key, val in validated_data.items():
instance[key] = val
instance.save()
return instance
class CommentSerializer(_ContentSerializer):
"""
A serializer for comment data.
N.B. This should not be used with a comment_client Comment object that has
not had retrieve() called, because of the interaction between DRF's attempts
at introspection and Comment's __getattr__.
"""
thread_id = serializers.CharField()
parent_id = serializers.CharField(required=False, allow_null=True)
endorsed = serializers.BooleanField(required=False)
endorsed_by = serializers.SerializerMethodField()
endorsed_by_label = serializers.SerializerMethodField()
endorsed_at = serializers.SerializerMethodField()
child_count = serializers.IntegerField(read_only=True)
children = serializers.SerializerMethodField(required=False)
non_updatable_fields = NON_UPDATABLE_COMMENT_FIELDS
def __init__(self, *args, **kwargs):
remove_fields = kwargs.pop('remove_fields', None)
super(CommentSerializer, self).__init__(*args, **kwargs)
if remove_fields:
# for multiple fields in a list
for field_name in remove_fields:
self.fields.pop(field_name)
def get_endorsed_by(self, obj):
"""
Returns the username of the endorsing user, if the information is
available and would not identify the author of an anonymous thread.
"""
endorsement = obj.get("endorsement")
if endorsement:
endorser_id = int(endorsement["user_id"])
# Avoid revealing the identity of an anonymous non-staff question
# author who has endorsed a comment in the thread
if not (
self._is_anonymous(self.context["thread"]) and
not self._is_user_privileged(endorser_id)
):
return DjangoUser.objects.get(id=endorser_id).username
return None
def get_endorsed_by_label(self, obj):
"""
Returns the role label (i.e. "Staff" or "Community TA") for the
endorsing user
"""
endorsement = obj.get("endorsement")
if endorsement:
return self._get_user_label(int(endorsement["user_id"]))
else:
return None
def get_endorsed_at(self, obj):
"""
Returns the timestamp for the endorsement, if available.
"""
endorsement = obj.get("endorsement")
return endorsement["time"] if endorsement else None
def get_children(self, obj):
return [
CommentSerializer(child, context=self.context).data
for child in obj.get("children", [])
]
def to_representation(self, data):
data = super(CommentSerializer, self).to_representation(data)
# Django Rest Framework v3 no longer includes None values
# in the representation. To maintain the previous behavior,
# we do this manually instead.
if 'parent_id' not in data:
data["parent_id"] = None
return data
def validate(self, attrs):
"""
Ensure that parent_id identifies a comment that is actually in the
thread identified by thread_id and does not violate the configured
maximum depth.
"""
parent = None
parent_id = attrs.get("parent_id")
if parent_id:
try:
parent = Comment(id=parent_id).retrieve()
except CommentClientRequestError:
pass
if not (parent and parent["thread_id"] == attrs["thread_id"]):
raise ValidationError(
"parent_id does not identify a comment in the thread identified by thread_id."
)
if is_comment_too_deep(parent):
raise ValidationError({"parent_id": ["Comment level is too deep."]})
return attrs
def create(self, validated_data):
comment = Comment(
course_id=self.context["thread"]["course_id"],
user_id=self.context["cc_requester"]["id"],
**validated_data
)
comment.save()
return comment
def update(self, instance, validated_data):
for key, val in validated_data.items():
instance[key] = val
# TODO: The comments service doesn't populate the endorsement
# field on comment creation, so we only provide
# endorsement_user_id on update
if key == "endorsed":
instance["endorsement_user_id"] = self.context["cc_requester"]["id"]
instance.save()
return instance
class DiscussionTopicSerializer(serializers.Serializer):
"""
Serializer for DiscussionTopic
"""
id = serializers.CharField(read_only=True) # pylint: disable=invalid-name
name = serializers.CharField(read_only=True)
thread_list_url = serializers.CharField(read_only=True)
children = serializers.SerializerMethodField()
def get_children(self, obj):
"""
Returns a list of children of DiscussionTopicSerializer type
"""
if not obj.children:
return []
return [DiscussionTopicSerializer(child).data for child in obj.children]
def create(self, validated_data):
"""
Overriden create abstract method
"""
pass
def update(self, instance, validated_data):
"""
Overriden update abstract method
"""
pass
| agpl-3.0 |
Ingenico-ePayments/connect-sdk-python2 | ingenico/connect/sdk/domain/payment/definitions/approve_payment_mobile_payment_method_specific_output.py | 2 | 1458 | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
class ApprovePaymentMobilePaymentMethodSpecificOutput(DataObject):
"""
| Mobile payment specific response data
"""
__void_response_id = None
@property
def void_response_id(self):
"""
| Result of the authorization reversal request
| Possible values are:
* 00 - Successful reversal
* 0, 8 or 11 - Reversal request submitted
* 5 or 55 - Reversal request declined or referred
* empty or 98 - The provider did not provide a response
Type: str
"""
return self.__void_response_id
@void_response_id.setter
def void_response_id(self, value):
self.__void_response_id = value
def to_dictionary(self):
dictionary = super(ApprovePaymentMobilePaymentMethodSpecificOutput, self).to_dictionary()
if self.void_response_id is not None:
dictionary['voidResponseId'] = self.void_response_id
return dictionary
def from_dictionary(self, dictionary):
super(ApprovePaymentMobilePaymentMethodSpecificOutput, self).from_dictionary(dictionary)
if 'voidResponseId' in dictionary:
self.void_response_id = dictionary['voidResponseId']
return self
| mit |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/botocore/serialize.py | 3 | 26651 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various AWS protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
from xml.etree import ElementTree
import calendar
from botocore.compat import six
from botocore.compat import json, formatdate
from botocore.utils import parse_to_aware_datetime
from botocore.utils import percent_encode
from botocore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
return validate.ParamValidationDecorator(validator, serializer)
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': {},
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value):
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower())
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
serialized['body'] = body_params
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_prefix = '%s.member' % prefix
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(value)
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
json_version = operation_model.metadata['jsonVersion']
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/x-amz-json-%s' % json_version,
}
body = {}
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(value)
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = value
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
return self._timestamp_rfc822(timestamp)
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(params)
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = str(params)
SERIALIZERS = {
'ec2': EC2Serializer,
'query': QuerySerializer,
'json': JSONSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
}
| gpl-2.0 |
bcoca/ansible-modules-extras | storage/netapp/netapp_e_amg_role.py | 27 | 8160 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
choices: [yes, no]
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: string
sample: "No Async Mirror Group with the name."
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to check status of AMG after role reversal. " +
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to check status of AMG after role reversal. " +
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| gpl-3.0 |
stephenbm/pastry | tests/utils/test_pastry_rsa.py | 1 | 1081 | import mock
import unittest
from pastry.utils.pastry_rsa import sign
class PastryRsaTestCase(unittest.TestCase):
@mock.patch('pastry.utils.pastry_rsa.rsa')
def test_sign(self, rsa):
private = mock.MagicMock()
private.n = 'n'
private.d = 'd'
rsa.PrivateKey.load_pkcs1.return_value = private
rsa.common.byte_size.return_value = 'byte_size'
rsa.pkcs1._pad_for_signing.return_value = 'padded'
rsa.transform.bytes2int.return_value = 'payload'
rsa.core.encrypt_int.return_value = 'encrypted'
rsa.transform.int2bytes.return_value = 'int2bytes'
self.assertEqual('int2bytes', sign('message', 'key'))
rsa.PrivateKey.load_pkcs1.assert_called_with('key')
rsa.common.byte_size.assert_called_with('n')
rsa.pkcs1._pad_for_signing.assert_called_with('message', 'byte_size')
rsa.transform.bytes2int.assert_called_with('padded')
rsa.core.encrypt_int.assert_called_with('payload', 'd', 'n')
rsa.transform.int2bytes.assert_called_with('encrypted', 'byte_size')
| mit |
dguerri/ansible-modules-core | cloud/openstack/os_volume.py | 131 | 4754 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_volume
short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove cinder block storage volumes
options:
size:
description:
- Size of volume in GB
required: only when state is 'present'
default: None
display_name:
description:
- Name of volume
required: true
display_description:
description:
- String describing the volume
required: false
default: None
volume_type:
description:
- Volume type for volume
required: false
default: None
image:
description:
- Image name or id for boot from volume
required: false
default: None
snapshot_id:
description:
- Volume snapshot id to create from
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Creates a new volume
- name: create a volume
hosts: localhost
tasks:
- name: create 40g test volume
os_volume:
state: present
cloud: mordred
availability_zone: az2
size: 40
display_name: test_volume
'''
def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=False, id=v['id'], volume=v)
volume_args = dict(
size=module.params['size'],
volume_type=module.params['volume_type'],
display_name=module.params['display_name'],
display_description=module.params['display_description'],
snapshot_id=module.params['snapshot_id'],
availability_zone=module.params['availability_zone'],
)
if module.params['image']:
image_id = cloud.get_image_id(module.params['image'])
volume_args['imageRef'] = image_id
volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args)
module.exit_json(changed=True, id=volume['id'], volume=volume)
def _absent_volume(module, cloud):
try:
cloud.delete_volume(
name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
except shade.OpenStackCloudTimeout:
module.exit_json(changed=False)
module.exit_json(changed=True)
def main():
argument_spec = openstack_full_argument_spec(
size=dict(default=None),
volume_type=dict(default=None),
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
image=dict(default=None),
snapshot_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['image', 'snapshot_id'],
],
)
module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
if state == 'present' and not module.params['size']:
module.fail_json(msg="Size is required when state is 'present'")
try:
cloud = shade.openstack_cloud(**module.params)
if state == 'present':
_present_volume(module, cloud)
if state == 'absent':
_absent_volume(module, cloud)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
walterreade/scikit-learn | examples/text/document_clustering.py | 42 | 8335 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
traveloka/ansible | lib/ansible/executor/action_write_locks.py | 84 | 1782 | # (c) 2016 - Red Hat, Inc. <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from multiprocessing import Lock
from ansible.module_utils.facts import Facts
if 'action_write_locks' not in globals():
# Do not initialize this more than once because it seems to bash
# the existing one. multiprocessing must be reloading the module
# when it forks?
action_write_locks = dict()
# Below is a Lock for use when we weren't expecting a named module.
# It gets used when an action plugin directly invokes a module instead
# of going through the strategies. Slightly less efficient as all
# processes with unexpected module names will wait on this lock
action_write_locks[None] = Lock()
# These plugins are called directly by action plugins (not going through
# a strategy). We precreate them here as an optimization
mods = set(p['name'] for p in Facts.PKG_MGRS)
mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
for mod_name in mods:
action_write_locks[mod_name] = Lock()
| gpl-3.0 |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.py | 229 | 1464 | from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["f90", "+version"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["ld", "-b"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['+Z']
def get_flags(self):
return self.pic_flags + ['+ppu', '+DD64']
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256, 0, 1]):
# XXX status==256 may indicate 'unrecognized option' or
# 'no input file'. So, version_cmd needs more work.
return FCompiler.get_version(self, force, ok_status)
if __name__ == '__main__':
from distutils import log
log.set_verbosity(10)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='hpux')
compiler.customize()
print(compiler.get_version())
| mit |
yifan-gu/.emacs.d | .python-environments/default/lib/python2.7/site-packages/setuptools/command/bdist_egg.py | 155 | 17606 | """setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import marshal
import textwrap
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.compat import basestring
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3] == "2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
# avoid 2.3 zipimport bug when 64 bits
compress = (sys.version >= "2.4")
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
| gpl-2.0 |
hachard/Cra-Magnet | flask/lib/python3.5/site-packages/flask_wtf/i18n.py | 40 | 1790 | # coding: utf-8
"""
flask_wtf.i18n
~~~~~~~~~~~~~~
Internationalization support for Flask WTF.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
from flask import _request_ctx_stack
from babel import support
try:
from flask_babel import get_locale
except ImportError:
from flask_babelex import get_locale
try:
from wtforms.i18n import messages_path
except ImportError:
from wtforms.ext.i18n.utils import messages_path
__all__ = ('Translations', 'translations')
def _get_translations():
"""Returns the correct gettext translations.
Copy from flask-babel with some modifications.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
# babel should be in extensions for get_locale
if 'babel' not in ctx.app.extensions:
return None
translations = getattr(ctx, 'wtforms_translations', None)
if translations is None:
dirname = messages_path()
translations = support.Translations.load(
dirname, [get_locale()], domain='wtforms'
)
ctx.wtforms_translations = translations
return translations
class Translations(object):
def gettext(self, string):
t = _get_translations()
if t is None:
return string
if hasattr(t, 'ugettext'):
return t.ugettext(string)
# Python 3 has no ugettext
return t.gettext(string)
def ngettext(self, singular, plural, n):
t = _get_translations()
if t is None:
if n == 1:
return singular
return plural
if hasattr(t, 'ungettext'):
return t.ungettext(singular, plural, n)
# Python 3 has no ungettext
return t.ngettext(singular, plural, n)
translations = Translations()
| gpl-3.0 |
turbidsoul/isort | isort/hooks.py | 51 | 2808 | """isort.py.
Defines a git hook to allow pre-commit warnings and errors about import order.
usage:
exit_code = git_hook(strict=True)
Copyright (C) 2015 Helen Sherwood-Taylor
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import subprocess
from isort import SortImports
def get_output(command):
"""
Run a command and return raw output
:param str command: the command to run
:returns: the stdout output of the command
"""
return subprocess.check_output(command.split())
def get_lines(command):
"""
Run a command and return lines of output
:param str command: the command to run
:returns: list of whitespace-stripped lines output by command
"""
stdout = get_output(command)
return [line.strip().decode('utf-8') for line in stdout.splitlines()]
def git_hook(strict=False):
"""
Git pre-commit hook to check staged files for isort errors
:param bool strict - if True, return number of errors on exit,
causing the hook to fail. If False, return zero so it will
just act as a warning.
:return number of errors if in strict mode, 0 otherwise.
"""
# Get list of files modified and staged
diff_cmd = "git diff-index --cached --name-only --diff-filter=ACMRTUXB HEAD"
files_modified = get_lines(diff_cmd)
errors = 0
for filename in files_modified:
if filename.endswith('.py'):
# Get the staged contents of the file
staged_cmd = "git show :%s" % filename
staged_contents = get_output(staged_cmd)
sort = SortImports(
file_path=filename,
file_contents=staged_contents.decode(),
check=True
)
if sort.incorrectly_sorted:
errors += 1
return errors if strict else 0
| mit |
fi-ksi/web-backend | endpoint/content.py | 1 | 4510 | import os
import magic
import multipart
import falcon
import util
class Content(object):
# Smaze adresarovou strukturu rekurzivne od nejvic zanoreneho
# dokud jsou adresare prazdne.
def _delete_tree(self, path):
if os.listdir(path) != []:
return
try:
os.rmdir(path)
self._delete_tree(os.path.dirname(path))
except:
return
# GET na content vraci
# a) soubor, pokud je v \path cesta k souboru,
# b) obsah adresare, pokud je v \path cesta k adresari.
def on_get(self, req, resp):
if req.get_param('path'):
shortPath = req.get_param('path').replace('..', '')
else:
shortPath = "."
filePath = 'data/content/' + shortPath
if os.path.isdir(filePath):
req.context['result'] = {
'content': util.content.dir_to_json(shortPath)
}
return
if not os.path.isfile(filePath):
req.context['result'] = {
'content': util.content.empty_content(shortPath)
}
return
resp.content_type = magic.Magic(mime=True).from_file(filePath)
resp.stream_len = os.path.getsize(filePath)
resp.stream = open(filePath, 'rb')
def on_post(self, req, resp):
user = req.context['user']
if (not user.is_logged_in()) or (not user.is_org()):
req.context['result'] = {
'errors': [{
'status': '401',
'title': 'Unauthorized',
'detail': 'Upravovat content může pouze organizátor.'
}]
}
resp.status = falcon.HTTP_400
return
if req.get_param('path'):
shortPath = req.get_param('path').replace('..', '')
else:
shortPath = "."
dirPath = 'data/content/' + shortPath
if not req.content_length:
resp.status = falcon.HTTP_411
return
if req.content_length > util.config.MAX_UPLOAD_FILE_SIZE:
resp.status = falcon.HTTP_413
return
files = multipart.MultiDict()
content_type, options = multipart.parse_options_header(
req.content_type)
boundary = options.get('boundary', '')
if not boundary:
raise multipart.MultipartError(
"No boundary for multipart/form-data.")
try:
if not os.path.isdir(dirPath):
os.makedirs(dirPath)
for part in multipart.MultipartParser(
req.stream, boundary, req.content_length,
2**30, 2**20, 2**18, 2**16, 'utf-8'):
path = '%s/%s' % (dirPath, part.filename)
part.save_as(path)
except:
resp.status = falcon.HTTP_500
raise
req.context['result'] = {}
resp.status = falcon.HTTP_200
def on_delete(self, req, resp):
user = req.context['user']
if (not user.is_logged_in()) or (not user.is_org()):
resp.status = falcon.HTTP_400
return
if req.get_param('path'):
shortPath = req.get_param('path').replace('..', '')
else:
shortPath = "."
filePath = 'data/content/' + shortPath
if not os.path.isfile(filePath):
resp.status = falcon.HTTP_404
return
try:
os.remove(filePath)
self._delete_tree(os.path.dirname(filePath))
except:
resp.status = falcon.HTTP_500
raise
resp.status = falcon.HTTP_200
req.context['result'] = {}
class TaskContent(object):
def on_get(self, req, resp, id, view):
user = req.context['user']
if (view != 'icon' and not view.startswith('reseni')
and not view.startswith('zadani')):
resp.status = falcon.HTTP_400
return
path_param = req.get_param('path')
if path_param is None:
resp.status = falcon.HTTP_400
return
filePath = 'data/task-content/' + id + '/' + view + '/' + \
path_param.replace('..', '')
if not os.path.isfile(filePath):
resp.status = falcon.HTTP_404
return
resp.content_type = magic.Magic(mime=True).from_file(filePath)
resp.stream_len = os.path.getsize(filePath)
resp.stream = open(filePath, 'rb')
| mit |
peterbe/bramble | vendor-local/lib/python/werkzeug/useragents.py | 92 | 5181 | # -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
('iphone|ios', 'iphone'),
(r'darwin|mac|os\s*x', 'macos'),
('win', 'windows'),
(r'android', 'android'),
(r'x11|lin(\b|ux)?', 'linux'),
('(sun|i86)os', 'solaris'),
(r'nintendo\s+wii', 'wii'),
('irix', 'irix'),
('hp-?ux', 'hpux'),
('aix', 'aix'),
('sco|unix_sv', 'sco'),
('bsd', 'bsd'),
('amiga', 'amiga')
)
browsers = (
('googlebot', 'google'),
('msnbot', 'msn'),
('yahoo', 'yahoo'),
('ask jeeves', 'ask'),
(r'aol|america\s+online\s+browser', 'aol'),
('opera', 'opera'),
('chrome', 'chrome'),
('firefox|firebird|phoenix|iceweasel', 'firefox'),
('galeon', 'galeon'),
('safari', 'safari'),
('webkit', 'webkit'),
('camino', 'camino'),
('konqueror', 'konqueror'),
('k-meleon', 'kmeleon'),
('netscape', 'netscape'),
(r'msie|microsoft\s+internet\s+explorer', 'msie'),
('lynx', 'lynx'),
('links', 'links'),
('seamonkey|mozilla', 'seamonkey')
)
_browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
_language_re = re.compile(
r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [(b, re.compile(self._browser_version_re % a))
for a, b in self.browsers]
def __call__(self, user_agent):
for platform, regex in self.platforms:
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers:
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `bsd`
- `hpux`
- `iphone`
- `irix`
- `linux`
- `macos`
- `sco`
- `solaris`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `camino`
- `chrome`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers maked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '')
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = \
self._parser(environ_or_string)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
def __repr__(self):
return '<%s %r/%s>' % (
self.__class__.__name__,
self.browser,
self.version
)
# conceptionally this belongs in this module but because we want to lazily
# load the user agent module (which happens in wrappers.py) we have to import
# it afterwards. The class itself has the module set to this module so
# pickle, inspect and similar modules treat the object as if it was really
# implemented here.
from werkzeug.wrappers import UserAgentMixin
| mpl-2.0 |
raymond91125/tissue_enrichment_tool_hypergeometric_test | pea_paper_docs/src/dictionary_generator.py | 4 | 21443 | """Contains classes solr_query, node and sisters."""
# -*- coding: utf-8 -*-
from urllib.request import urlopen
import simplejson
import json
import numpy as np
import pandas as pd
import contextlib
# import copy
class solr_query():
"""
A solr_query class that stores URLs.
Attbts:
solr_url - the main solr_url
"""
def __init__(self, solr_url, query):
"""Initialize the solr_query object."""
self.solr_url = solr_url
self.query = query
def set_solr_url(self, url):
"""Assign a url to the solr_query object."""
self.solr_url = url
def add_query_url(self, url):
"""Add a query url to the solr_query object."""
self.query = url
def open_query(self, p=0, timeout=10):
"""
Given a query, append it to the main url.
Open URL and use simplejson to load the results
"""
timer = 10 # don't try more than 10x per website
if p:
print(self.solr_url + self.query)
while timer > 0:
try:
with contextlib.closing(urlopen(self.solr_url +
self.query, timeout=timeout)) as conn:
return simplejson.load(conn)
except:
# raise Warning('URL is invalid or may have timed out')
timer -= 1
pass
# raise an error if the timer reached 0
if timer == 0:
print(self.solr_url + self.query)
raise Warning('Url could not be contacted or is invalid')
class node():
"""
A node is intended to be a single ontology term.
Attributes:
name - wbbt id
parents
genes
similarity - no. of genes assoc. with this node divided
by the set of genes of its sister set
drop -- whether to drop or not
good_name -- human readable plus wbbt
QUERIES FOR RELATIONS AND GENES ARE LAMBDA FUNCTIONS
query_relation(x) -- gets families of tissue x
query_genes(x) -- gets genes assoc with x
query_readable
"""
def __init__(self, name):
"""Initialization function."""
self.name = name
self.daughters = []
self.parents = []
self.genes = []
self.similarity = 0
self.drop = False
self.good_name = ''
def get_name(self, human_readable):
"""Generate a good name (human readable + WBid)."""
if human_readable == '':
print('warning, empty human readable name')
self.good_name = human_readable + ' ' + self.name
def add_daughter(self, daughter):
"""Add a daughter to this node."""
self.daughters.append(daughter)
self.daughters = list(set(self.daughters)) # prevent redundancy
def add_parent(self, parent):
"""Add a parent to this node."""
self.parents.append(parent)
self.parents = list(set(self.parents))
def add_annotation(self, gene):
"""Add annotation to this node."""
self.genes.append(gene)
self.genes = list(set(self.genes))
def throw_away(self):
"""Set the `drop` variable to True."""
self.drop = True
def calc_similarity(self, sim):
"""Calculate similarity."""
self.similarity = sim
def find_family(self, solr_url, query_relation, p=0):
"""
Find the family for this node by using solr_url and query_relation.
query_relation(x) --lambda function
"""
# get the json object
rsp_rlshp = solr_query(solr_url, query_relation(self.name))
# extract the array with all the right information
array_of_rlshps = rsp_rlshp.open_query(p=p)['response']['docs'][0]
# go through the array, turning each line into a dictionary
# these mini-dictionaries contain the edges between nodes
for j in json.loads(array_of_rlshps['topology_graph_json'])['edges']:
# if the object isnt the same as the wbbt, object is parent to wbbt
# if object is same as wbbt, wbbt is parent to subject
if self.name != j['obj']:
self.add_parent(j['obj'])
else:
self.add_daughter(j['sub'])
def find_genes(self, solr_url, query_genes):
"""
For a given wbbt, find the genes associated with it.
query_genes(x) -- lambda function!
"""
rsp_genes = solr_query(solr_url, query_genes(self.name))
# extract the array with all the right information
array_of_genes = rsp_genes.open_query()['response']['docs']
# go through the array, turning each line into a dictionary
for entry in array_of_genes:
self.genes.append(entry['id'][3:]) # remove WB: from the string
self.genes = list(set(self.genes))
class sisters(object):
"""
A sister object that contains related terms.
A sister object that is meant to contain a set of terms that are related
Sisters are defined as a set of nodes that share a single parent
If a node is multiparent, it can have as many different sister sets as
parents.
Attributes:
parent -- the parent for this set
sisters -- set of `node` objects that are related by the same parent
geneset -- total set of genes associated with these sisters
threshold -- similarity threshold that specifies above which similarity
sisters must be killed
dropsisters -- boolean
dropped -- an array that keeps track of all sisters ever dropped
"""
def __init__(self, parent, threshold):
"""Initialize function."""
self.parent = parent
self.sisters = []
self.geneset = []
self.threshold = threshold
self.dropsisters = 0
self.dropped = []
def add_sister(self, sister):
"""Add a sister."""
if self.sisters:
self.sisters.append(sister)
else:
self.sisters = [sister]
self.geneset = list(set(self.geneset+(sister.genes)))
def add_sisters(self, sisters):
"""Add multiple sisters."""
self.sisters = list(set(self.sisters+sisters))
for sister in sisters:
self.geneset = self.geneset+sister.genes
self.geneset = list(set(self.geneset))
def add_dropped(self, sister):
"""Add a sister to the `dropped` list."""
if sister not in list:
self.dropped.append(sister)
else:
self.dropped = self.dropped+sister
def calc_similarity(self, method):
"""
Calculate the family wise similarity for this object.
A method to calculate the similarity of a set of sisters to each other
by finding the cardinality of the total gene set and the cardinality of
the gene set for each node
Depending on the method, the sisters.dropsisters value is modified if
the sisters are too similar to each other
"""
if len(self.sisters) == 0:
return 0
if self.geneset == 0:
return 1
if method not in ['avg', 'any']:
raise ValueError('method must be one of \'avg\' or \'any\'')
avg = 0
for sister in self.sisters:
sim = len(sister.genes)/len(self.geneset)
sister.calc_similarity(sim)
if method == 'any':
if sim > self.threshold:
self.dropsisters = 1
avg += sim
avg = avg/len(self.sisters)
if method == 'avg':
if avg > self.threshold:
self.dropsisters = 1
def kill(self):
"""If dropsister variable is 1, set `dropped` = 'sisters'."""
if self.dropsisters == 1:
self.dropped = self.sisters
def trim(self, val):
"""If sister doesn't have `val` genes assoc. with it, drop it."""
if len(self.sisters) == 0:
return
for sister in self.sisters:
if len(sister.genes) < val:
self.dropped.append(sister)
class ontology():
"""An ontological object."""
def __init__(self, name, cutoff, threshold, method, solr_url):
"""Initialization function."""
self.name = name
self.threshold = threshold
self.method = method
self.nodes = {}
self.family = {}
self.solr_url = solr_url
self.query_min_cutoff = 5
self.cutoff = cutoff
self.dropped = {}
self.good = {}
def set_min_cutoff(self, x):
"""Set minimum gene cutoff below which nodes are not fetched."""
self.query_min_cutoff = x
def add_nodes(self, query_terms, query_readable):
"""Add nodes from solr database."""
sq = solr_query(self.solr_url, query_terms(self.query_min_cutoff))
rsp_terms = sq.open_query()
sd = solr_query(self.solr_url, query_readable)
rsp_read = sd.open_query()
i = 0
for k in enumerate(rsp_terms['facet_counts']
['facet_fields']['regulates_closure']):
if i % 2 == 0:
n = node(k[1])
if n.name not in self.nodes:
self.nodes[n.name] = n
self.nodes[n.name].get_name(query_readable)
if n.name not in self.family:
self.family[n.name] = sisters(n.name, self.threshold)
i += 1
for k, val in enumerate(rsp_read['response']['docs']):
if val['id'] not in self.nodes:
continue
self.nodes[val['id']].get_name(val['annotation_class_label'])
def find_node_family(self, lambda_query_rlshp, p=0):
"""Find the nodes that are related to this one."""
for n in iter(self.nodes):
self.nodes[n].find_family(self.solr_url, lambda_query_rlshp, p=p)
def find_node_annotations(self, lambda_query_genes):
"""Fetch the annotations for this node."""
for n in iter(self.nodes):
self.nodes[n].find_genes(self.solr_url, lambda_query_genes)
if len(self.nodes[n].genes) < self.cutoff:
self.dropped[self.nodes[n].name] = self.nodes[n]
def annotate_nodes(self, lambda_query_rlshp, lambda_query_genes):
"""Annotate this node with a family and with annotations."""
self.find_node_family(lambda_query_rlshp)
self.find_node_annotations(lambda_query_genes)
def find_families(self):
"""Figure out the family structure for each node."""
for node in self.nodes:
n = self.nodes[node]
for daughter in n.daughters:
if daughter not in self.nodes:
continue
# if 'WBbt:0002367' == daughter:
# print('hi')
if len(self.nodes[daughter].genes) < self.threshold:
# add sister
self.family[n.name].add_sister(self.nodes[daughter])
# place it in sister.dropped
self.family[n.name].add_dropped(self.nodes[daughter])
# but also in self.dropped
self.dropped[n.name] = n
else:
self.family[n.name].add_sister(self.nodes[daughter])
def calculate_similarities(self):
"""Calculate the family-wise similarity."""
for parent in self.family:
self.family[parent].calc_similarity(self.method)
def kill(self):
"""Remove whatever nodes fulfill the sisters.kill criterion."""
for parent in self.family:
self.family[parent].kill()
for killed in self.family[parent].dropped:
if killed.name in self.nodes:
self.dropped[killed.name] = killed
def ceiling(self):
"""If a node has all its complement of daughters, kill it."""
for parent in self.family:
if parent not in self.nodes:
continue
if len(self.family[parent].sisters) == 0:
continue
if len(self.family[parent].dropped) == 0:
self.dropped[self.nodes[parent].name] = self.nodes[parent]
def find_good(self):
"""Fetch the surviving nodes."""
for node in self.nodes:
if node not in self.dropped:
self.good[self.nodes[node].good_name] = self.nodes[node]
def build_dictionary(wbbts, tissue_array, genes):
"""Build the dictionary from a list of terms and wbbts."""
# given a list of tissues, find the genes associated with each tissue and
# place them in a vector.....
mat = np.zeros(shape=(len(genes), len(wbbts)))
d = {}
for i, gene in enumerate(genes):
d[gene] = i
# for j, tissue in enumerate(wbbts):
# if gene in wbbts[tissue].genes:
# mat[i, j] = 1
for j, tissue in enumerate(wbbts):
for gene in wbbts[tissue].genes:
mat[d[gene], j] = 1
cols = tissue_array
df = pd.DataFrame(mat, columns=cols)
df.insert(0, 'wbid', genes)
# drop the root term, for some reason it causes problems with hgt
if 'C. elegans Cell and Anatomy WBbt:0000100' in df.columns:
df.drop('C. elegans Cell and Anatomy WBbt:0000100', axis=1,
inplace=True)
return df
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# # # # # # # #
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
if __name__ == '__main__':
# Raymond:
# I have split up the URLs into 2 different variables to make life easier
# solr_url contains the first part
# query_xxx contains the second. However, query_xx can be a lambda function
# basically, at a point in the string, I have written something like...
# 'select?qt=standard&indent={0}'.format(x) -- the {0} is replaced by x
# this allows me to modify the query in predictable ways.
# hope this is clear.
import argparse
import sys
parser = argparse.ArgumentParser(description='Run Dictionary Maker')
parser.add_argument("threshold", help='The redundancy threshold',
type=float)
parser.add_argument('cutoff', help='The annotation cutoff for each term',
type=int)
parser.add_argument("fname",
help='Filename (complete with path) to save to',
type=str)
parser.add_argument("-m", '--method',
help='method - defaults to \'any\' if not specified',
type=str)
parser.add_argument("-mc", '--mincutoff',
help='The minimum cutoff to fetch. Defaults to 2.',
type=int)
parser.add_argument("-su", '--solrurl',
help='The main body of the solr url.', type=str)
parser.add_argument("-o", "--ontology",
help='One of `phenotype`, `tissue` or `gene`. Only\
works if --solrurl has not been specified',
type=str, default='anatomy',
choices=['anatomy', 'phenotype', 'go'])
args = parser.parse_args()
# main solr url
if args.solrurl:
solr_url = args.solrurl
else:
# solr_url = 'http://wobr.caltech.edu:8082/solr/anatomy/'
s = 'http://wobr.caltech.edu:8082/solr/{0}/'
solr_url = s.format(args.ontology)
# queries must be lambda functions
# query for terms. Finds terms that have x or more annotating genes
def query_terms(x, ontology=args.ontology):
"""Search solr for terms (nodes) in the ontology."""
if ontology != 'go':
s = 'select?qt=standard&indent=on&wt=json&version=2.2&fl=' +\
'id&start=0&rows=0&q=document_category:bioentity' +\
'&facet=true&facet.field=regulates_closure&' +\
'facet.limit=-1&facet.mincount={0}&facet.sort' +\
'=count&fq=source:%22WB%22&fq=-qualifier:%22not%22'
else:
s = 'select?qt=standard&indent=on&wt=json&version=2.2&fl=' +\
'id&start=0&rows=1&q=document_category:bioentity&facet=' +\
'true&facet.field=regulates_closure&facet.limit=-1&' +\
'facet.mincount={0}&facet.sort=count&fq=source:%22WB' +\
'%22&fq=taxon:%22NCBITaxon:6239%22&fq=-qualifier:%22not%22'
return s.format(x)
def query_relation(x, ontology=args.ontology):
"""
query for relationships between nodes.
given a wbbt ID `x`, find the nodes connected to it.
Links are slightly different for [anatomy, phenotype] and GO, because
in WormBase, the GO solr database includes all other worm species as
well.
"""
if ontology != 'go':
s = "select?qt=standard&fl=topology_graph_json&" +\
"version=2.2&wt=json&indent=on&rows=1&q=id:" +\
"%22{0}%22&fq=document_category:%22ontology_class%22"
else:
s = "select?qt=standard&fl=topology_graph_json&" +\
"version=2.2&wt=json&indent=on&rows=1&q=id:" +\
"%22{0}%22&fq=document_category:%22ontology_class%22"
return s.format(x)
def query_genes(x, ontology=args.ontology):
"""
find the genes associated with every node.
given a wbbt ID `x`, open URL that contains genes assoc. with it.
"""
if ontology != 'go':
s = "select?qt=standard&indent=on&wt=json&version=2.2&" +\
"fl=id&start=0&rows=10000&q=document_category:bioentity" +\
"&fq=source:%22WB%22&fq=-qualifier:%22not%22&" +\
"fq=regulates_closure:%22{0}%22"
else:
s = "select?qt=standard&indent=on&wt=json&version=2.2&" +\
"fl=id&start=0&rows=10000&q=document_category:bioentity" +\
"&fq=source:%22WB%22&fq=taxon:%22NCBITaxon:6239%22" +\
"&fq=-qualifier:%22not%22&" +\
"fq=regulates_closure:%22{0}%22"
return s.format(x)
# query for readable names
query_readable = "select?qt=standard&fl=id,annotation_class_label" +\
"&version=2.2&wt=json&indent=on&rows=100000&q=id:" +\
"*&fq=document_category:ontology_class&" +\
"fq=-is_obsolete:true"
queries = [query_terms, query_relation, query_genes, query_readable]
threshold = args.threshold
cutoff = args.cutoff
if args.method:
method = args.method
else:
method = 'any'
if args.mincutoff:
min_annot = args.mincutoff
else:
min_annot = 2
trial1 = ontology('tissue_ontology', cutoff, threshold, method, solr_url)
print('Object made')
print('Min cutoff set at: {0}....'.format(min_annot))
sys.stdout.flush()
trial1.set_min_cutoff(min_annot)
print('Fetching nodes.....')
sys.stdout.flush()
trial1.add_nodes(query_terms, query_readable)
print('Annotating nodes')
sys.stdout.flush()
trial1.find_node_annotations(query_genes)
print('Finding node families...')
sys.stdout.flush()
trial1.find_node_family(query_relation)
print('Generating node family representation...')
sys.stdout.flush()
trial1.find_families()
message = 'Calculating similarities and \
removing nodes with more than {0:.2} similarity...'
print(message.format(threshold))
sys.stdout.flush()
trial1.calculate_similarities()
message = 'killing nodes that have less than {0} annotations...'
print(message.format(cutoff))
sys.stdout.flush()
trial1.kill()
print('Applying ceiling...')
sys.stdout.flush()
trial1.ceiling()
print('Generating final list of terms...')
trial1.find_good()
print('No. of terms in dictionary: {0}'.format(len(trial1.good)))
# extract keys
print('Generating file at {0}'.format(args.fname))
tissues = []
genes = []
for n in trial1.good:
tissues.append(n)
# print(n)
genes = genes+trial1.good[n].genes
genes = list(set(genes))
df = build_dictionary(trial1.good, tissues, genes)
df.to_csv(args.fname, index=False)
| mit |
nwchandler/ansible | lib/ansible/modules/cloud/openstack/os_security_group_rule.py | 29 | 11653 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_security_group_rule
short_description: Add/Delete rule from an existing security group
author: "Benno Joy (@bennojoy)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove rule from an existing security group
options:
security_group:
description:
- Name or ID of the security group
required: true
protocol:
description:
- IP protocols TCP UDP ICMP 112 (VRRP)
choices: ['tcp', 'udp', 'icmp', '112', None]
default: None
port_range_min:
description:
- Starting port
required: false
default: None
port_range_max:
description:
- Ending port
required: false
default: None
remote_ip_prefix:
description:
- Source IP address(es) in CIDR notation (exclusive with remote_group)
required: false
remote_group:
description:
- Name or ID of the Security group to link (exclusive with
remote_ip_prefix)
required: false
ethertype:
description:
- Must be IPv4 or IPv6, and addresses represented in CIDR must
match the ingress or egress rules. Not all providers support IPv6.
choices: ['IPv4', 'IPv6']
default: IPv4
direction:
description:
- The direction in which the security group rule is applied. Not
all providers support egress.
choices: ['egress', 'ingress']
default: ingress
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a security group rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
# Create a security group rule for ping
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Another way to create the ping rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
port_range_min: -1
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
# Create a TCP rule covering all ports
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 1
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
# Another way to create the TCP rule above (defaults to all ports)
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
# Create a rule for VRRP with numbered protocol 112
- os_security_group_rule:
security_group: loadbalancer_sg
protocol: 112
remote_group: loadbalancer-node_sg
'''
RETURN = '''
id:
description: Unique rule UUID.
type: string
returned: state == present
direction:
description: The direction in which the security group rule is applied.
type: string
sample: 'egress'
returned: state == present
ethertype:
description: One of IPv4 or IPv6.
type: string
sample: 'IPv4'
returned: state == present
port_range_min:
description: The minimum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
returned: state == present
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
returned: state == present
protocol:
description: The protocol that is matched by the security group rule.
type: string
sample: 'tcp'
returned: state == present
remote_ip_prefix:
description: The remote IP prefix to be associated with this security group rule.
type: string
sample: '0.0.0.0/0'
returned: state == present
security_group_id:
description: The security group ID to associate with this security group rule.
type: string
returned: state == present
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
"""
Capture the complex port matching logic.
The port values coming in for the module might be -1 (for ICMP),
which will work only for Nova, but this is handled by shade. Likewise,
they might be None, which works for Neutron, but not Nova. This too is
handled by shade. Since shade will consistently return these port
values as None, we need to convert any -1 values input to the module
to None here for comparison.
For TCP and UDP protocols, None values for both min and max are
represented as the range 1-65535 for Nova, but remain None for
Neutron. Shade returns the full range when Nova is the backend (since
that is how Nova stores them), and None values for Neutron. If None
values are input to the module for both values, then we need to adjust
for comparison.
"""
# Check if the user is supplying -1 for ICMP.
if protocol == 'icmp':
if module_min and int(module_min) == -1:
module_min = None
if module_max and int(module_max) == -1:
module_max = None
# Check if user is supplying None values for full TCP/UDP port range.
if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:
if (rule_min and int(rule_min) == 1
and rule_max and int(rule_max) == 65535):
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
module_min = int(module_min)
if module_max:
module_max = int(module_max)
if rule_min:
rule_min = int(rule_min)
if rule_max:
rule_max = int(rule_max)
return module_min == rule_min and module_max == rule_max
def _find_matching_rule(module, secgroup, remotegroup):
"""
Find a rule in the group that matches the module parameters.
:returns: The matching rule dict, or None if no matches.
"""
protocol = module.params['protocol']
remote_ip_prefix = module.params['remote_ip_prefix']
ethertype = module.params['ethertype']
direction = module.params['direction']
remote_group_id = remotegroup['id']
for rule in secgroup['security_group_rules']:
if (protocol == rule['protocol']
and remote_ip_prefix == rule['remote_ip_prefix']
and ethertype == rule['ethertype']
and direction == rule['direction']
and remote_group_id == rule['remote_group_id']
and _ports_match(protocol,
module.params['port_range_min'],
module.params['port_range_max'],
rule['port_range_min'],
rule['port_range_max'])):
return rule
return None
def _system_state_change(module, secgroup, remotegroup):
state = module.params['state']
if secgroup:
rule_exists = _find_matching_rule(module, secgroup, remotegroup)
else:
return False
if state == 'present' and not rule_exists:
return True
if state == 'absent' and rule_exists:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
security_group = dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
remote_group = dict(required=False, default=None),
ethertype = dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction = dict(default='ingress',
choices=['egress', 'ingress']),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
security_group = module.params['security_group']
remote_group = module.params['remote_group']
changed = False
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(security_group)
if remote_group:
remotegroup = cloud.get_security_group(remote_group)
else:
remotegroup = { 'id' : None }
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
if state == 'present':
if not secgroup:
module.fail_json(msg='Could not find security group %s' %
security_group)
rule = _find_matching_rule(module, secgroup, remotegroup)
if not rule:
rule = cloud.create_security_group_rule(
secgroup['id'],
port_range_min=module.params['port_range_min'],
port_range_max=module.params['port_range_max'],
protocol=module.params['protocol'],
remote_ip_prefix=module.params['remote_ip_prefix'],
remote_group_id=remotegroup['id'],
direction=module.params['direction'],
ethertype=module.params['ethertype']
)
changed = True
module.exit_json(changed=changed, rule=rule, id=rule['id'])
if state == 'absent' and secgroup:
rule = _find_matching_rule(module, secgroup, remotegroup)
if rule:
cloud.delete_security_group_rule(rule['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/cdn/tests/latest/test_afd_log_analytic_scenarios.py | 2 | 4930 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ResourceGroupPreparer, JMESPathCheck
from azure.cli.testsdk import ScenarioTest, record_only
from .afdx_scenario_mixin import CdnAfdScenarioMixin
import datetime
class CdnAfdLogAnalyticScenarioTest(CdnAfdScenarioMixin, ScenarioTest):
@ResourceGroupPreparer()
def test_afd_log_analytic(self, resource_group):
profile_name = 'profile123'
self.afd_profile_create_cmd(resource_group, profile_name)
endpoint_name = self.create_random_name(prefix='endpoint', length=24)
origin_response_timeout_seconds = 100
enabled_state = "Enabled"
checks = [JMESPathCheck('originResponseTimeoutSeconds', 100),
JMESPathCheck('enabledState', 'Enabled')]
self.afd_endpoint_create_cmd(resource_group,
profile_name,
endpoint_name,
origin_response_timeout_seconds,
enabled_state,
checks=checks)
domain = f'{endpoint_name}.z01.azurefd.net'
start_time = datetime.datetime.now().astimezone().replace(microsecond=0)
if self.is_playback_mode():
start_time = datetime.datetime(2021, 4, 2, 10, 23, 7, tzinfo=datetime.timezone.utc)
end_time = start_time + datetime.timedelta(seconds=300)
location_list_commands = f"afd log-analytic location list -g {resource_group} --profile-name {profile_name}"
self.cmd(location_list_commands, expect_failure=False)
resource_list_commands = f"afd log-analytic resource list -g {resource_group} --profile-name {profile_name}"
self.cmd(resource_list_commands, expect_failure=False)
metric_list_commands = f"afd log-analytic metric list -g {resource_group} --profile-name {profile_name} --metrics clientRequestCount " + \
f"--date-time-begin {start_time.isoformat()} --granularity PT5M --date-time-end {end_time.isoformat()} --custom-domains {domain} --protocols http --group-by cacheStatus"
self.cmd(metric_list_commands, expect_failure=False)
ranking_list_commands = f"afd log-analytic ranking list -g {resource_group} --profile-name {profile_name} --metrics clientRequestCount " + \
f"--date-time-begin {start_time.isoformat()} --date-time-end {end_time.isoformat()} --custom-domains {domain} --rankings referrer --max-ranking 10"
self.cmd(ranking_list_commands, expect_failure=False)
self.afd_endpoint_delete_cmd(resource_group, endpoint_name, profile_name)
@ResourceGroupPreparer()
def test_afd_waf_log_analytic(self, resource_group):
profile_name = 'profile123'
self.afd_profile_create_cmd(resource_group, profile_name, sku="Premium_AzureFrontDoor")
endpoint_name = self.create_random_name(prefix='endpoint', length=24)
origin_response_timeout_seconds = 100
enabled_state = "Enabled"
checks = [JMESPathCheck('originResponseTimeoutSeconds', 100),
JMESPathCheck('enabledState', 'Enabled')]
self.afd_endpoint_create_cmd(resource_group,
profile_name,
endpoint_name,
origin_response_timeout_seconds,
enabled_state,
checks=checks)
start_time = datetime.datetime.now().astimezone().replace(microsecond=0)
if self.is_playback_mode():
start_time = datetime.datetime(2021, 4, 2, 10, 23, 6, tzinfo=datetime.timezone.utc)
end_time = start_time + datetime.timedelta(seconds=300)
metric_list_commands = f"afd waf-log-analytic metric list -g {resource_group} --profile-name {profile_name} --metrics clientRequestCount " + \
f"--date-time-begin {start_time.isoformat()} --date-time-end {end_time.isoformat()} --granularity PT5M --rule-types managed"
self.cmd(metric_list_commands, expect_failure=False)
ranking_list_commands = f"afd waf-log-analytic ranking list -g {resource_group} --profile-name {profile_name} --metrics clientRequestCount " + \
f"--date-time-begin {start_time.isoformat()} --date-time-end {end_time.isoformat()} --rankings action --max-ranking 10"
self.cmd(ranking_list_commands, expect_failure=False)
self.afd_endpoint_delete_cmd(resource_group, endpoint_name, profile_name)
| mit |
soumyajitpaul/soumyajit-github-byte2 | lib/flask/testsuite/__init__.py | 564 | 7022 | # -*- coding: utf-8 -*-
"""
flask.testsuite
~~~~~~~~~~~~~~~
Tests Flask itself. The majority of Flask is already tested
as part of Werkzeug.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import sys
import flask
import warnings
import unittest
from functools import update_wrapper
from contextlib import contextmanager
from werkzeug.utils import import_string, find_modules
from flask._compat import reraise, StringIO
def add_to_path(path):
"""Adds an entry to sys.path if it's not already there. This does
not append it but moves it to the front so that we can be sure it
is loaded.
"""
if not os.path.isdir(path):
raise RuntimeError('Tried to add nonexisting path')
def _samefile(x, y):
if x == y:
return True
try:
return os.path.samefile(x, y)
except (IOError, OSError, AttributeError):
# Windows has no samefile
return False
sys.path[:] = [x for x in sys.path if not _samefile(path, x)]
sys.path.insert(0, path)
def iter_suites():
"""Yields all testsuites."""
for module in find_modules(__name__):
mod = import_string(module)
if hasattr(mod, 'suite'):
yield mod.suite()
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
@contextmanager
def catch_warnings():
"""Catch warnings in a with block in a list"""
# make sure deprecation warnings are active in tests
warnings.simplefilter('default', category=DeprecationWarning)
filters = warnings.filters
warnings.filters = filters[:]
old_showwarning = warnings.showwarning
log = []
def showwarning(message, category, filename, lineno, file=None, line=None):
log.append(locals())
try:
warnings.showwarning = showwarning
yield log
finally:
warnings.filters = filters
warnings.showwarning = old_showwarning
@contextmanager
def catch_stderr():
"""Catch stderr in a StringIO"""
old_stderr = sys.stderr
sys.stderr = rv = StringIO()
try:
yield rv
finally:
sys.stderr = old_stderr
def emits_module_deprecation_warning(f):
def new_f(self, *args, **kwargs):
with catch_warnings() as log:
f(self, *args, **kwargs)
self.assert_true(log, 'expected deprecation warning')
for entry in log:
self.assert_in('Modules are deprecated', str(entry['message']))
return update_wrapper(new_f, f)
class FlaskTestCase(unittest.TestCase):
"""Baseclass for all the tests that Flask uses. Use these methods
for testing instead of the camelcased ones in the baseclass for
consistency.
"""
def ensure_clean_request_context(self):
# make sure we're not leaking a request context since we are
# testing flask internally in debug mode in a few cases
leaks = []
while flask._request_ctx_stack.top is not None:
leaks.append(flask._request_ctx_stack.pop())
self.assert_equal(leaks, [])
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.ensure_clean_request_context()
self.teardown()
def assert_equal(self, x, y):
return self.assertEqual(x, y)
def assert_raises(self, exc_type, callable=None, *args, **kwargs):
catcher = _ExceptionCatcher(self, exc_type)
if callable is None:
return catcher
with catcher:
callable(*args, **kwargs)
def assert_true(self, x, msg=None):
self.assertTrue(x, msg)
def assert_false(self, x, msg=None):
self.assertFalse(x, msg)
def assert_in(self, x, y):
self.assertIn(x, y)
def assert_not_in(self, x, y):
self.assertNotIn(x, y)
if sys.version_info[:2] == (2, 6):
def assertIn(self, x, y):
assert x in y, "%r unexpectedly not in %r" % (x, y)
def assertNotIn(self, x, y):
assert x not in y, "%r unexpectedly in %r" % (x, y)
class _ExceptionCatcher(object):
def __init__(self, test_case, exc_type):
self.test_case = test_case
self.exc_type = exc_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
exception_name = self.exc_type.__name__
if exc_type is None:
self.test_case.fail('Expected exception of type %r' %
exception_name)
elif not issubclass(exc_type, self.exc_type):
reraise(exc_type, exc_value, tb)
return True
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def setup_path():
add_to_path(os.path.abspath(os.path.join(
os.path.dirname(__file__), 'test_apps')))
def suite():
"""A testsuite that has all the Flask tests. You can use this
function to integrate the Flask tests into your own testsuite
in case you want to test that monkeypatches to Flask do not
break it.
"""
setup_path()
suite = unittest.TestSuite()
for other_suite in iter_suites():
suite.addTest(other_suite)
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
| apache-2.0 |
AnimeshSinha1309/WebsiteEdunet | WebsiteEdunet/env/Lib/site-packages/django/db/backends/mysql/operations.py | 65 | 8583 | from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 65535),
PositiveIntegerField=(0, 4294967295),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = "DATE(%s)" % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
| mit |
Infraxion/modis | modis/gui/tabs/database.py | 1 | 10062 | import logging
import json
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog, messagebox
from modis.tools import config, data
logger = logging.getLogger(__name__)
class Frame(ttk.Frame):
"""A tab containing controls for the data.json"""
def __init__(self, parent):
"""Create the frame.
Args:
parent: A tk or ttk object.
"""
super(Frame, self).__init__(parent, padding=8)
# Add elements
tree = self.DataTree(self)
# Grid elements
tree.grid(column=0, row=0, padx=8, pady=8, sticky="W E N S")
# Configure stretch ratios
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
class DataTree(ttk.LabelFrame):
"""The database display"""
def __init__(self, parent):
"""Create the tree.
Args:
parent: A tk or ttk object.
"""
super(Frame.DataTree, self).__init__(parent, padding=8, text="Database tree")
# Define variables
self.datapath = tk.StringVar(value=config.DATAFILE)
self.selected_key_var = tk.StringVar(value="Select a variable to edit")
self.selected_val_var = tk.StringVar(value="")
self.all_items = []
self.selected_path = []
# Add elements
frame_path = ttk.Frame(self)
datapath_label = ttk.Label(frame_path, text="Data file path:")
datapath_entry = ttk.Entry(frame_path, textvariable=self.datapath, state="readonly")
datapath_button = ttk.Button(frame_path, command=self.set_data_location, text="Change")
frame_control = ttk.Frame(self)
button_refresh = ttk.Button(frame_control, text="Refresh", command=self.tree_update)
button_expand = ttk.Button(frame_control, text="Expand all", command=self.tree_expand)
button_collapse = ttk.Button(frame_control, text="Collapse all", command=self.tree_collapse)
frame_tree = ttk.Frame(self)
self.tree = ttk.Treeview(frame_tree, columns="val")
self.tree.bind("<<TreeviewSelect>>", self.tree_select)
self.tree.column("#0", width=50)
self.tree.heading("#0", text="Key")
self.tree.heading("val", text="Value")
self.tree_update()
yscrollbar = ttk.Scrollbar(frame_tree, orient="vertical", command=self.tree.yview)
xscrollbar = ttk.Scrollbar(frame_tree, orient="horizontal", command=self.tree.xview)
self.tree['yscrollcommand'] = yscrollbar.set
self.tree['xscrollcommand'] = xscrollbar.set
frame_edit = ttk.Frame(self)
label_key = ttk.Label(frame_edit, text="Selected key:")
label_val = ttk.Label(frame_edit, text="Change to:")
selected_key = ttk.Entry(frame_edit, textvariable=self.selected_key_var, state="readonly")
selected_val = ttk.Entry(frame_edit, textvariable=self.selected_val_var)
self.selected_enter = ttk.Button(frame_edit, text="Enter", command=self.tree_edit, state="disabled")
# Grid elements
datapath_label.grid(column=0, row=0, padx=4, pady=4, stick="E")
datapath_entry.grid(column=1, row=0, padx=4, pady=4, sticky="W E")
datapath_button.grid(column=2, row=0, padx=4, pady=4, sticky="E")
button_refresh.grid(column=0, row=0, padx=4, pady=4, sticky="W")
button_expand.grid(column=1, row=0, padx=4, pady=4, sticky="W")
button_collapse.grid(column=2, row=0, padx=4, pady=4, sticky="W")
self.tree.grid(column=0, row=0, sticky="W E N S")
yscrollbar.grid(column=1, row=0, sticky="N S")
xscrollbar.grid(column=0, row=1, sticky="W E")
label_key.grid(column=0, row=0, padx=4, pady=4, sticky="E")
selected_key.grid(column=1, columnspan=2, row=0, padx=4, pady=4, sticky="W E")
label_val.grid(column=0, row=1, padx=4, pady=4, sticky="E")
selected_val.grid(column=1, row=1, padx=4, pady=4, sticky="W E")
self.selected_enter.grid(column=2, row=1, padx=4, pady=4, sticky="E")
frame_path.grid(column=0, row=0, sticky="W E N S")
frame_control.grid(column=0, row=1, sticky="W E N S")
frame_tree.grid(column=0, row=2, sticky="W E N S")
frame_edit.grid(column=0, row=3, sticky="W E N S")
# Configure stretch ratios
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=0)
self.rowconfigure(2, weight=1)
self.rowconfigure(3, weight=0)
frame_path.columnconfigure(0, weight=0)
frame_path.columnconfigure(1, weight=1)
frame_path.columnconfigure(2, weight=0)
frame_path.rowconfigure(0, weight=0)
frame_control.columnconfigure(0, weight=0)
frame_control.rowconfigure(0, weight=0)
frame_control.rowconfigure(1, weight=0)
frame_control.rowconfigure(2, weight=0)
frame_tree.columnconfigure(0, weight=1)
frame_tree.columnconfigure(1, weight=0)
frame_tree.rowconfigure(0, weight=1)
frame_tree.rowconfigure(1, weight=0)
frame_edit.columnconfigure(0, weight=0)
frame_edit.columnconfigure(1, weight=1)
frame_edit.columnconfigure(2, weight=0)
frame_edit.rowconfigure(0, weight=0)
frame_edit.rowconfigure(0, weight=0)
def tree_update(self):
for item in self.all_items:
try:
self.tree.delete(item)
except tk.TclError:
pass
self.all_items = []
from modis.tools import data
def recursive_add(entry, parent=""):
for key in entry:
if type(entry[key]) is dict:
new_item = self.tree.insert(parent, "end", text=key)
self.all_items.append(new_item)
recursive_add(entry[key], new_item)
else:
new_item = self.tree.insert(parent, "end", text=key, values=str(entry[key]))
self.all_items.append(new_item)
recursive_add(data.cache)
def tree_expand(self):
for item in self.all_items:
self.tree.item(item, open=True)
def tree_collapse(self):
for item in self.all_items:
self.tree.item(item, open=False)
def tree_select(self, event):
selected = self.tree.focus()
self.selected_key_var.set(self.tree.item(selected)["text"])
if self.tree.item(selected)["values"]:
self.selected_val_var.set(self.tree.item(selected)["values"][0])
self.selected_enter.config(state="enabled")
elif not self.tree.get_children(selected):
self.selected_val_var.set("")
self.selected_enter.config(state="enabled")
else:
self.selected_val_var.set("")
self.selected_enter.config(state="disabled")
self.selected_path = []
def recursive_get(item):
parent_item = self.tree.parent(item)
parent_name = self.tree.item(parent_item)["text"]
self.selected_path.insert(0, self.tree.item(item)["text"])
if parent_name:
recursive_get(parent_item)
recursive_get(selected)
self.selected_key_var.set(".".join(self.selected_path))
def tree_edit(self):
if not messagebox.askokcancel(title="Edit value", message="Set {} to:\n{}".format(self.selected_key_var.get(), self.selected_val_var.get())):
# User cancelled value edit
messagebox.showinfo(title="Edit value", message="Value not changed.")
return
from modis.tools import data
pathstr = ""
for item in self.selected_path:
pathstr += """["{}"]""".format(item)
logger.warning("Updating {} to {}".format(self.selected_key_var.get(), self.selected_val_var.get()))
exec("data.cache{} = self.selected_val_var.get()".format(pathstr))
data.push()
self.tree_update()
messagebox.showinfo(title="Edit value", message="Edit successful.")
def set_data_location(self):
newpath = filedialog.askopenfile()
oldpath = config.DATAFILE
try:
newpath = newpath.name
except AttributeError:
# Window was closed
logger.warning("Data file not changed")
return
if not messagebox.askokcancel(title="Change data file path", message="Change data file to:\n{}".format(newpath)):
# User cancelled path change
messagebox.showinfo(title="Change data file path", message="Data file not changed.")
return
# Change the path
config.DATAFILE = newpath
try:
data.pull()
except json.decoder.JSONDecodeError:
# Chosen file invalid
logger.error("Chosen file is not a valid json; reverting changes")
messagebox.showerror(title="Change data file path", message="Chosen file is not a valid json.")
# Try again
config.DATAFILE = oldpath
data.pull()
self.set_data_location()
return
# Successful change
self.datapath.set(newpath)
self.tree_update()
logger.warning("data file changed to " + config.DATAFILE)
messagebox.showinfo(title="Change data file path", message="Data file change successful.")
| apache-2.0 |
tonnrueter/pymca_devel | PyMca/tests/StackBaseTest.py | 1 | 8321 | #/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
#############################################################################*/
__author__ = "V.A. Sole - ESRF Data Analysis"
import unittest
import numpy
class DummyArray(object):
def __init__(self, data):
"""
This class forces ROI and spectra calculation to be performed as
it is made for a dynamically loaded array.
This allows detection of the PyMca bug track issue 3544665
"""
self.data = numpy.array(data, copy=False)
def __getitem__(self, *var):
if len(var) == 1:
return self.data[var[0]]
elif len(var) == 2:
return self.data[var[0], var[1]]
elif len(var) == 3:
return self.data[var[0], var[1], var[2]]
def getShape(self):
return self.data.shape
def getDType(self):
return self.data.dtype
def getSize(self):
s = 1
for item in self.__shape:
s *= item
return s
shape = property(getShape)
dtype = property(getDType)
size = property(getSize)
class testStackBase(unittest.TestCase):
def testStackBaseImport(self):
from PyMca import StackBase
def testStackBaseStack1DDataHandling(self):
from PyMca import StackBase
nrows = 50
ncolumns = 100
nchannels = 500
a = numpy.ones((nrows, ncolumns), numpy.float)
referenceData = numpy.zeros((nrows, ncolumns, nchannels),
numpy.float)
for i in range(nchannels):
referenceData[:, :, i] = a * i
a = None
mask = numpy.zeros((nrows, ncolumns), numpy.uint8)
mask[20:30, 15:50] = 1
dummyArray = DummyArray(referenceData)
defaultMca = referenceData.sum(axis=0, dtype=numpy.float64).sum(axis=0)
maskedMca = referenceData[mask>0, :].sum(axis=0)
for fileindex in [0, 1]:
#usually only one file index case is used but
#we test both to have a better coverage
j = 0
for data in [referenceData, dummyArray]:
if j == 0:
dynamic = ""
j = 1
else:
dynamic = "dynamic "
stackBase = StackBase.StackBase()
stackBase.setStack(data, mcaindex=2, fileindex=fileindex)
channels, counts = stackBase.getActiveCurve()[0:2]
self.assertTrue(numpy.allclose(defaultMca, counts),
"Incorrect %sdefault mca" % dynamic)
# set mask
stackBase.setSelectionMask(mask)
self.assertTrue(numpy.allclose(stackBase.getSelectionMask(), mask),
"Incorrect mask set and get")
# get mca from mask
mcaDataObject = stackBase.calculateMcaDataObject()
self.assertTrue(numpy.allclose(mcaDataObject.y[0], maskedMca),
"Incorrect %smca from mask calculation" % dynamic)
#get image from roi
i0 = 100
imiddle = 200
i1 = 400
# calculate
imageDict = stackBase.calculateROIImages(i0, i1, imiddle=imiddle)
self.assertTrue(numpy.allclose(imageDict['ROI'], data[:,:,i0:i1].sum(axis=-1)),
"Incorrect ROI image from %sROI calculation" % dynamic)
self.assertTrue(numpy.allclose(imageDict['Left'], data[:,:,i0]),
"Incorrect Left image from %sROI calculation" % dynamic)
self.assertTrue(numpy.allclose(imageDict['Right'], data[:,:,i1-1]),
"Incorrect Right image from %sROI calculation" % dynamic)
self.assertTrue(numpy.allclose(imageDict['Middle'], data[:,:,imiddle]),
"Incorrect Middle image from %sROI calculation" % dynamic)
stackBase = None
data = None
dummyArray = None
referenceData = None
def testStackBaseStack2DDataHandling(self):
from PyMca import StackBase
nrows = 50
ncolumns = 100
nchannels = 500
a = numpy.ones((nrows, ncolumns), numpy.float)
referenceData = numpy.zeros((nchannels, nrows, ncolumns),
numpy.float)
for i in range(nchannels):
referenceData[i] = a * i
a = None
mask = numpy.zeros((nrows, ncolumns), numpy.uint8)
mask[20:30, 15:50] = 1
dummyArray = DummyArray(referenceData)
defaultMca = referenceData.sum(axis=2, dtype=numpy.float64).sum(axis=1)
maskedMca = referenceData[:,mask>0].sum(axis=1)
for fileindex in [1, 2]:
#usually only one file index case is used but
#we test both to have a better coverage
j = 0
for data in [referenceData, dummyArray]:
if j == 0:
dynamic = ""
j = 1
else:
dynamic = "dynamic "
stackBase = StackBase.StackBase()
stackBase.setStack(data, mcaindex=0, fileindex=fileindex)
channels, counts = stackBase.getActiveCurve()[0:2]
self.assertTrue(numpy.allclose(defaultMca, counts),
"Incorrect %sdefault mca" % dynamic)
# set mask
stackBase.setSelectionMask(mask)
self.assertTrue(numpy.allclose(stackBase.getSelectionMask(), mask),
"Incorrect mask set and get")
# get mca from mask
mcaDataObject = stackBase.calculateMcaDataObject()
self.assertTrue(numpy.allclose(mcaDataObject.y[0], maskedMca),
"Incorrect %smca from mask calculation" % dynamic)
#get image from roi
i0 = 100
imiddle = 200
i1 = 400
# calculate
imageDict = stackBase.calculateROIImages(i0, i1, imiddle=imiddle)
self.assertTrue(numpy.allclose(imageDict['ROI'], data[i0:i1, :,:].sum(axis=0)),
"Incorrect ROI image from %sROI calculation" % dynamic)
self.assertTrue(numpy.allclose(imageDict['Left'], data[i0,:,:]),
"Incorrect Left image from %sROI calculation" % dynamic)
self.assertTrue(numpy.allclose(imageDict['Right'], data[i1-1,:,:]),
"Incorrect Right image from %sROI calculation" % dynamic)
self.assertTrue(numpy.allclose(imageDict['Middle'], data[imiddle,:,:]),
"Incorrect Middle image from %sROI calculation" % dynamic)
stackBase = None
data = None
dummyArray = None
referenceData = None
def getSuite(auto=True):
testSuite = unittest.TestSuite()
if auto:
testSuite.addTest(\
unittest.TestLoader().loadTestsFromTestCase(testStackBase))
else:
# use a predefined order
testSuite.addTest(testStackBase("testStackBaseImport"))
testSuite.addTest(testStackBase("testStackBaseStack1DDataHandling"))
testSuite.addTest(testStackBase("testStackBaseStack2DDataHandling"))
return testSuite
def test(auto=False):
unittest.TextTestRunner(verbosity=2).run(getSuite(auto=auto))
if __name__ == '__main__':
test()
| gpl-2.0 |
funkyHat/cloudify-gcp-plugin | system_tests/local/__init__.py | 1 | 3662 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
from copy import copy
from abc import ABCMeta, abstractmethod, abstractproperty
from cloudify.workflows import local
from cloudify_cli import constants as cli_constants
from cosmo_tester.framework.testenv import (
clear_environment,
initialize_without_bootstrap,
)
def setUp():
initialize_without_bootstrap()
def tearDown():
clear_environment()
class GCPTest(object):
__metaclass__ = ABCMeta
@abstractmethod
def assertions(self):
"""This will be called after the deployment is finished.
Put your test assertions here.
The deployment is uninstalled during test teardown"""
@abstractproperty
def blueprint_name(self):
"""The path to the blueprint file, relative to
`system_tests/resources`
"""
@abstractproperty
def inputs(self):
"""The list of inputs which should be copied from the provider context
inputs to the deployment inputs for `blueprint`"""
def setUp(self):
super(GCPTest, self).setUp()
self.ext_inputs = {
k: self.env.cloudify_config[k]
for k in self.inputs}
blueprints_path = os.path.split(os.path.abspath(__file__))[0]
blueprints_path = os.path.split(blueprints_path)[0]
self.blueprints_path = os.path.join(
blueprints_path,
'resources',
)
def test_blueprint(self):
blueprint = os.path.join(self.blueprints_path, self.blueprint_name)
self.logger.info('Creating a new Network')
self.pre_install_hook()
inputs = copy(self.ext_inputs)
self.test_env = local.init_env(
blueprint,
inputs=inputs,
name=self._testMethodName,
ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)
self.addCleanup(
self.test_env.execute,
'uninstall',
task_retries=10,
task_retry_interval=3,
)
self.test_env.execute(
'install',
task_retries=10,
task_retry_interval=3,
)
self.outputs = self.test_env.outputs()
self.assertions()
print('\n{}:test_blueprint succeded\n'.format(type(self).__name__))
def pre_install_hook(self):
"Override this if your test needs to do something before installing"
def assertIP(self, ip, msg=None, match=None):
"""
Set of assertions to valiate IPv4 addresses.
optional `match` input is a regular expression which can be used to
further constrain the allowed addresses.
"""
parts = [int(n) for n in ip.split('.')]
self.assertEqual(len(parts), 4)
for i, part in enumerate(parts):
part = int(part)
self.assertLess(part, 256, 'part {} too big'.format(i))
self.assertGreater(part, -1, 'part {} too small'.format(i))
if match:
self.assertRegexpMatches(ip, match)
| apache-2.0 |
RenaudParis/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py | 499 | 1854 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_transfer_data() signature.
"""
def web_socket_do_extra_handshake(request):
pass
def no_web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_transfer_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
vmendez/DIRAC | Core/scripts/dirac-executor.py | 1 | 1731 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-executor
# Author : Adria Casajus
########################################################################
__RCSID__ = "$Id$"
""" This is a script to launch DIRAC executors
"""
import sys
import DIRAC
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC import gLogger, gConfig
from DIRAC.Core.Base.ExecutorReactor import ExecutorReactor
from DIRAC.Core.Utilities.DErrno import includeExtensionErrors
localCfg = LocalConfiguration()
positionalArgs = localCfg.getPositionalArguments()
if len( positionalArgs ) == 0:
gLogger.fatal( "You must specify which executor to run!" )
sys.exit( 1 )
if len( positionalArgs ) == 1 and positionalArgs[0].find( "/" ) > -1:
mainName = positionalArgs[0]
else:
mainName = "Framework/MultiExecutor"
localCfg.setConfigurationForExecutor( mainName )
localCfg.addMandatoryEntry( "/DIRAC/Setup" )
localCfg.addDefaultEntry( "/DIRAC/Security/UseServerCertificate", "yes" )
localCfg.addDefaultEntry( "LogLevel", "INFO" )
localCfg.addDefaultEntry( "LogColor", True )
resultDict = localCfg.loadUserData()
if not resultDict[ 'OK' ]:
gLogger.fatal( "There were errors when loading configuration", resultDict[ 'Message' ] )
sys.exit( 1 )
includeExtensionErrors()
executorReactor = ExecutorReactor()
result = executorReactor.loadModules( positionalArgs )
if not result[ 'OK' ]:
gLogger.fatal( "Error while loading executor", result[ 'Message' ] )
sys.exit( 1 )
result = executorReactor.go()
if not result[ 'OK' ]:
gLogger.fatal( result[ 'Message' ] )
sys.exit( 1 )
gLogger.notice( "Graceful exit. Bye!" )
sys.exit(0)
| gpl-3.0 |
fweik/espresso | src/config/gen_featureconfig.py | 4 | 4483 | # Copyright (C) 2013-2019 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates the files featureconfig.h and featureconfig.c.
#
import time
import string
import inspect
import sys
import os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..'))
import featuredefs
if len(sys.argv) != 4:
print("Usage: {} DEFFILE HPPFILE CPPFILE".format(
sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, hfilename, cfilename = sys.argv[1:5]
print("Reading definitions from " + deffilename + "...")
defs = featuredefs.defs(deffilename)
print("Done.")
print("Writing " + hfilename + "...")
hfile = open(hfilename, 'w')
hfile.write("""/*
WARNING: This file was autogenerated by
%s on %s
Do not modify it or your changes will be overwritten!
Modify features.def instead.
*/
#ifndef ESPRESSO_FEATURECONFIG_HPP
#define ESPRESSO_FEATURECONFIG_HPP
#include <cmake_config.hpp>
#include "myconfig-final.hpp"
""" % (sys.argv[0], time.asctime()))
# external features can only be set by the build
# system, so in case the user has defined some of
# them, we undef all external features and include
# the config from the build system again, to make
# sure only the detected ones are set.
hfile.write('/* Guards for externals */')
external_template = string.Template("""
// $feature is external
#if defined($feature)
#undef $feature
#endif
""")
for feature in defs.externals:
hfile.write(external_template.substitute(feature=feature))
# Include definitions from CMake
hfile.write("""
/* Definitions from CMake */
#include <cmake_config.hpp>
""")
# handle implications
hfile.write('/* Handle implications */')
implication_template = string.Template("""
// $feature implies $implied
#if defined($feature) && !defined($implied)
#define $implied
#endif
""")
for feature, implied in defs.implications:
hfile.write(implication_template.substitute(
feature=feature, implied=implied))
# output warnings if internal features are set manually
hfile.write('/* Warn when derived switches are specified manually */')
derivation_template = string.Template("""
// $feature equals $expr
#ifdef $feature
#warning $feature is a derived switch and should not be set manually!
#elif $cppexpr
#define $feature
#endif
""")
for feature, expr, cppexpr in defs.derivations:
hfile.write(derivation_template.substitute(
feature=feature, cppexpr=cppexpr, expr=expr))
# write footer
# define external FEATURES and NUM_FEATURES
hfile.write("""
extern const char* FEATURES[];
extern const int NUM_FEATURES;
#endif /* of _FEATURECONFIG_HPP */""")
hfile.close()
print("Done.")
print("Writing " + cfilename + "...")
cfile = open(cfilename, 'w')
# handle requirements
cfile.write("""/*
WARNING: This file was autogenerated by
{script}
on
{date}
Do not modify it or your changes will be overwritten!
Modify features.def instead.
*/
/* config.hpp includes config-features.hpp and myconfig.hpp */
#include "config.hpp"
""".format(script=sys.argv[0], date=time.asctime()))
cfile.write('/* Handle requirements */')
requirement_string = """
// {feature} requires {expr}
#if defined({feature}) && !({cppexpr})
#error Feature {feature} requires {expr}
#endif
"""
for feature, expr, cppexpr in defs.requirements:
cfile.write(
requirement_string.format(
feature=feature, cppexpr=cppexpr, expr=expr))
cfile.write("""
/* Feature list */
const char* FEATURES[] = {
""")
feature_string = """
#ifdef {feature}
"{feature}",
#endif
"""
for feature in defs.externals.union(defs.features, defs.derived):
cfile.write(feature_string.format(feature=feature))
cfile.write("""
};
const int NUM_FEATURES = sizeof(FEATURES)/sizeof(char*);
""")
cfile.close()
print("Done.")
| gpl-3.0 |
goodwillcoding/RIDE | src/robotide/controller/arguments.py | 1 | 1357 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robotide.robotapi import is_var
from robotide.utils import variablematcher
def parse_arguments_to_var_dict(args, name):
result = {}
for arg in args:
parsed = parse_argument(arg)
if parsed:
result[parsed[0]] = parsed[1]
if not args and name:
for var in variablematcher.find_variable_basenames(name):
if variablematcher.is_scalar_variable(var):
result[var] = None
return result
default_val_regexp = re.compile(r'([$@]\{.*\})\s*=\s*(.*)')
def parse_argument(argument):
match = default_val_regexp.match(argument)
if match:
return (match.group(1), match.group(2))
if is_var(argument):
return (argument, None)
return None
| apache-2.0 |
ChainsAutomation/chains | lib/chains/services/tellstick2/tellconf.py | 1 | 2738 | import chains.service, copy, re, td, Queue, time
from chains.common import log
import types
def fromServiceConfig(config):
base = config.get('tellstickconf')
base['device'] = []
if config.get('devices'):
for key in config.get('devices'):
device = {}
source = config['devices'][key]
if source.get('class') and source['class'] != 'command':
continue
if source.get('id'):
device['id'] = source['id']
else:
log.error("Ignoring device because missing id: %s" % source)
#raise Exception("Ignoring device because missing id: %s" % source)
continue
device['name'] = key
#if source.get('name'): device['name'] = source['name']
if source.get('controller'): device['controller'] = source['controller']
if source.get('protocol'): device['protocol'] = source['protocol']
if source.get('model'): device['model'] = source['model']
if source.get('parameters'):
device['parameters'] = {}
for param in source['parameters']:
device['parameters'][param] = str(source['parameters'][param])
base['device'].append(device)
lines = []
_render(base, lines, -1, '')
text = '\n'.join(lines)
return text
def write(text, path=None):
if not path:
path = '/etc/tellstick.conf'
fp = open(path, 'w')
fp.write(text.encode('utf-8'))
fp.close()
def _render(config, lines, indent, parent):
indent += 1
if type(config) == types.DictType:
for key in config:
value = config[key]
if type(value) == types.ListType:
_render(value, lines, indent, key)
elif type(value) == types.DictType:
_formatLine(indent, lines, '%s {' % key)
_render(value, lines, indent, '')
_formatLine(indent, lines, '}')
else:
_formatLine(indent, lines, '%s = %s' % (key, _formatValue(config[key])))
elif type(config) == types.ListType:
for item in config:
_formatLine(indent-1, lines, '%s {' % parent)
_render(item, lines, indent-1, '')
_formatLine(indent-1, lines, '}')
def _formatLine(indent, lines, string):
lines.append( '%s%s' % (' ' * indent, string) )
def _formatValue(value):
if type(value) == types.IntType:
return '%s' % value
elif type(value) == types.FloatType:
return '%s' % value
elif type(value) == types.BooleanType:
if value: return 'true'
else: return 'false'
else:
return '"%s"' % value
| gpl-2.0 |
qedsoftware/commcare-hq | corehq/apps/locations/forms.py | 1 | 14284 | import re
from django import forms
from django.db.models import Q
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.commtrack.util import generate_code
from corehq.apps.custom_data_fields import CustomDataEditor
from corehq.apps.custom_data_fields.edit_entity import get_prefixed
from corehq.apps.es import UserES
from corehq.apps.users.forms import MultipleSelectionForm
from corehq.apps.locations.permissions import LOCATION_ACCESS_DENIED
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.util import raw_username, user_display_string
from .models import SQLLocation, LocationType
from .permissions import user_can_access_location_id
from .signals import location_edited
class ParentLocWidget(forms.Widget):
def render(self, name, value, attrs=None):
return get_template(
'locations/manage/partials/parent_loc_widget.html'
).render(Context({
'name': name,
'value': value,
}))
class LocTypeWidget(forms.Widget):
def render(self, name, value, attrs=None):
return get_template(
'locations/manage/partials/loc_type_widget.html'
).render(Context({
'name': name,
'value': value,
}))
class LocationForm(forms.Form):
parent_id = forms.CharField(
label=ugettext_lazy('Parent'),
required=False,
widget=ParentLocWidget(),
)
name = forms.CharField(
label=ugettext_lazy('Name'),
max_length=100,
)
location_type = forms.CharField(
label=ugettext_lazy('Organization Level'),
required=False,
widget=LocTypeWidget(),
)
coordinates = forms.CharField(
label=ugettext_lazy('Coordinates'),
max_length=30,
required=False,
help_text=ugettext_lazy("enter as 'lat lon' or 'lat, lon' "
"(e.g., '42.3652 -71.1029')"),
)
site_code = forms.CharField(
label='Site Code',
required=False,
help_text=ugettext_lazy("A unique system code for this location. "
"Leave this blank to have it auto generated"),
)
external_id = forms.CharField(
label='External ID',
required=False,
help_text=ugettext_lazy("A number referencing this location on an external system")
)
external_id.widget.attrs['readonly'] = True
strict = True # optimization hack: strict or loose validation
def __init__(self, location, bound_data=None, is_new=False, user=None,
*args, **kwargs):
self.location = location
self.domain = location.domain
self.user = user
self.is_new_location = is_new
kwargs['initial'] = {
'parent_id': location.parent_location_id,
'name': location.name,
'site_code': location.site_code,
'external_id': location.external_id,
}
if not self.is_new_location:
kwargs['initial']['location_type'] = self.location.location_type.name
kwargs['initial']['parent_id'] = self.location.parent_location_id
lat, lon = (getattr(self.location, k, None)
for k in ('latitude', 'longitude'))
kwargs['initial']['coordinates'] = ('%s, %s' % (lat, lon)
if lat is not None else '')
self.custom_data = self.get_custom_data(bound_data, is_new)
self.custom_data.form.helper.label_class = 'col-sm-3 col-md-4 col-lg-2'
self.custom_data.form.helper.field_class = 'col-sm-4 col-md-5 col-lg-3'
super(LocationForm, self).__init__(bound_data, *args, **kwargs)
self.fields['parent_id'].widget.domain = self.domain
self.fields['parent_id'].widget.user = user
if not self.location.external_id:
self.fields['external_id'].widget = forms.HiddenInput()
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-3 col-md-4 col-lg-2'
self.helper.field_class = 'col-sm-4 col-md-5 col-lg-3'
self.helper.layout = crispy.Layout(
crispy.Fieldset(*self.get_fields(is_new))
)
def get_fields(self, is_new):
if is_new:
return filter(None, [
_("Location Information"),
'name',
'location_type' if len(self._get_allowed_types(self.domain, self.location.parent)) > 1 else None,
])
else:
return [
_("Location Information"),
'name',
'parent_id',
'location_type',
'coordinates',
'site_code',
'external_id',
]
def get_custom_data(self, bound_data, is_new):
from .views import LocationFieldsView
existing = self.location.metadata
# Don't show validation error preemptively on new user creation
if is_new and bound_data is None:
existing = None
return CustomDataEditor(
field_view=LocationFieldsView,
domain=self.domain,
# For new locations, only display required fields
required_only=is_new,
existing_custom_data=existing,
post_dict=bound_data,
)
def is_valid(self):
return all([
super(LocationForm, self).is_valid(),
self.custom_data.is_valid(),
])
@property
def errors(self):
errors = super(LocationForm, self).errors
errors.update(self.custom_data.errors)
return errors
def clean_parent_id(self):
if self.is_new_location:
parent = self.location.parent
parent_id = self.location.parent_location_id
else:
parent_id = self.cleaned_data['parent_id'] or None
parent = SQLLocation.objects.get(location_id=parent_id) if parent_id else None
if self.user and not user_can_access_location_id(self.domain, self.user, parent_id):
raise forms.ValidationError(LOCATION_ACCESS_DENIED)
self.cleaned_data['parent'] = parent
if self.location.location_id is not None and self.location.parent_location_id != parent_id:
# location is being re-parented
if parent and self.location.location_id in parent.path:
raise forms.ValidationError(_("Location's parent is itself or a descendant"))
if self.location.get_descendants().exists():
raise forms.ValidationError(_(
'only locations that have no child locations can be '
'moved to a different parent'
))
self.cleaned_data['orig_parent_id'] = self.location.parent_location_id
return parent_id
def clean_name(self):
def has_siblings_with_name(location, name, parent_location_id):
qs = SQLLocation.objects.filter(domain=location.domain,
name=name)
if parent_location_id:
qs = qs.filter(parent__location_id=parent_location_id)
else: # Top level
qs = qs.filter(parent=None)
return (qs.exclude(location_id=self.location.location_id)
.exists())
name = self.cleaned_data['name']
parent_location_id = self.cleaned_data.get('parent_id', None)
if self.strict:
if has_siblings_with_name(self.location, name, parent_location_id):
raise forms.ValidationError(_(
'name conflicts with another location with this parent'
))
return name
def clean_site_code(self):
site_code = self.cleaned_data['site_code']
if site_code:
site_code = site_code.lower()
if (SQLLocation.objects.filter(domain=self.domain,
site_code__iexact=site_code)
.exclude(location_id=self.location.location_id)
.exists()):
raise forms.ValidationError(_(
'another location already uses this site code'
))
return site_code
def clean(self):
if 'name' in self.cleaned_data and not self.cleaned_data.get('site_code', None):
all_codes = [
code.lower() for code in
(SQLLocation.objects.exclude(location_id=self.location.location_id)
.filter(domain=self.domain)
.values_list('site_code', flat=True))
]
self.cleaned_data['site_code'] = generate_code(self.cleaned_data['name'], all_codes)
@staticmethod
def _get_allowed_types(domain, parent):
parent_type = parent.location_type if parent else None
return list(LocationType.objects
.filter(domain=domain,
parent_type=parent_type)
.all())
def clean_location_type(self):
loc_type = self.cleaned_data['location_type']
allowed_types = self._get_allowed_types(self.domain, self.cleaned_data.get('parent'))
if not allowed_types:
raise forms.ValidationError(_('The selected parent location cannot have child locations!'))
if not loc_type:
if len(allowed_types) == 1:
loc_type_obj = allowed_types[0]
else:
raise forms.ValidationError(_('You must select a location type'))
else:
try:
loc_type_obj = (LocationType.objects
.filter(domain=self.domain)
.get(Q(code=loc_type) | Q(name=loc_type)))
except LocationType.DoesNotExist:
raise forms.ValidationError(_("LocationType '{}' not found").format(loc_type))
else:
if loc_type_obj not in allowed_types:
raise forms.ValidationError(_('Location type not valid for the selected parent.'))
self.cleaned_data['location_type_object'] = loc_type_obj
return loc_type_obj.name
def clean_coordinates(self):
coords = self.cleaned_data['coordinates'].strip()
if not coords:
return None
pieces = re.split('[ ,]+', coords)
if len(pieces) != 2:
raise forms.ValidationError(_('could not understand coordinates'))
try:
lat = float(pieces[0])
lon = float(pieces[1])
except ValueError:
raise forms.ValidationError(_('could not understand coordinates'))
return [lat, lon]
def save(self, instance=None, commit=True):
if self.errors:
raise ValueError('form does not validate')
location = instance or self.location
is_new = location.location_id is None
location.name = self.cleaned_data['name']
location.site_code = self.cleaned_data['site_code']
location.location_type = self.cleaned_data['location_type_object']
location.metadata = self.custom_data.get_data_to_save()
location.parent = self.cleaned_data['parent']
coords = self.cleaned_data['coordinates']
if coords:
location.latitude = coords[0]
location.longitude = coords[1]
location.metadata.update(get_prefixed(self.data))
if commit:
location.save()
if not is_new:
orig_parent_id = self.cleaned_data.get('orig_parent_id')
reparented = orig_parent_id is not None
location_edited.send(sender='loc_mgmt', sql_loc=location,
moved=reparented, previous_parent=orig_parent_id)
return location
class UsersAtLocationForm(MultipleSelectionForm):
def __init__(self, domain_object, location, *args, **kwargs):
self.domain_object = domain_object
self.location = location
super(UsersAtLocationForm, self).__init__(
initial={'selected_ids': self.users_at_location},
*args, **kwargs
)
self.fields['selected_ids'].choices = self.get_all_users()
self.fields['selected_ids'].label = ugettext_lazy("Workers at Location")
def get_all_users(self):
user_query = (UserES()
.domain(self.domain_object.name)
.mobile_users()
.fields(['_id', 'username', 'first_name', 'last_name']))
return [
(u['_id'], user_display_string(u['username'],
u.get('first_name', ''),
u.get('last_name', '')))
for u in user_query.run().hits
]
@property
@memoized
def users_at_location(self):
return (UserES()
.domain(self.domain_object.name)
.mobile_users()
.location(self.location.location_id)
.get_ids())
def unassign_users(self, users):
for doc in iter_docs(CommCareUser.get_db(), users):
# This could probably be sped up by bulk saving, but there's a lot
# of stuff going on - seems tricky.
CommCareUser.wrap(doc).unset_location_by_id(self.location.location_id, fall_back_to_next=True)
def assign_users(self, users):
for doc in iter_docs(CommCareUser.get_db(), users):
CommCareUser.wrap(doc).add_to_assigned_locations(self.location)
def save(self):
selected_users = set(self.cleaned_data['selected_ids'])
previous_users = set(self.users_at_location)
to_remove = previous_users - selected_users
to_add = selected_users - previous_users
self.unassign_users(to_remove)
self.assign_users(to_add)
| bsd-3-clause |
MozillaSecurity/FuzzManager | FTB/tests/test_CoverageHelper.py | 1 | 5836 | # coding: utf-8
'''
Tests
@author: Christian Holler (:decoder)
@license:
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
@contact: choller@mozilla.com
'''
import json
from FTB import CoverageHelper
covdata = r"""
{
"children": {
"topdir1": {
"children": {
"subdir1": {
"children": {
"file1.c": {
"coverage": [
-1,
-1,
12,
12,
10,
2,
0,
0,
-1
],
"coveragePercent": 66.67,
"linesCovered": 4,
"linesMissed": 2,
"linesTotal": 6,
"name": "file1.c"
},
"file2.c": {
"coverage": [
-1,
20,
20,
15,
15,
15,
15,
0,
-1,
-1
],
"coveragePercent": 85.71,
"linesCovered": 6,
"linesMissed": 1,
"linesTotal": 7,
"name": "file2.c"
}
},
"coveragePercent": 76.92,
"linesCovered": 10,
"linesMissed": 3,
"linesTotal": 13,
"name": "subdir1"
},
"subdir2": {
"children": {
"file3.c": {
"coverage": [
-1,
-1,
12,
12,
10,
2,
0,
0,
-1
],
"coveragePercent": 66.67,
"linesCovered": 4,
"linesMissed": 2,
"linesTotal": 6,
"name": "file3.c"
}
},
"coveragePercent": 66.67,
"linesCovered": 4,
"linesMissed": 2,
"linesTotal": 6,
"name": "subdir2"
}
},
"coveragePercent": 73.68,
"linesCovered": 14,
"linesMissed": 5,
"linesTotal": 19,
"name": "topdir1"
},
"topdir2": {
"children": {
"subdir1": {
"children": {
"file1.c": {
"coverage": [
-1,
-1,
0,
0,
0,
0,
0,
0,
-1
],
"coveragePercent": 0.0,
"linesCovered": 0,
"linesMissed": 6,
"linesTotal": 6,
"name": "file1.c"
}
},
"coveragePercent": 0.0,
"linesCovered": 0,
"linesMissed": 6,
"linesTotal": 6,
"name": "subdir1"
}
},
"coveragePercent": 0.0,
"linesCovered": 0,
"linesMissed": 6,
"linesTotal": 6,
"name": "topdir2"
}
},
"coveragePercent": 56.0,
"linesCovered": 14,
"linesMissed": 11,
"linesTotal": 25,
"name": null
}
""" # noqa
def test_CoverageHelperFlattenNames():
node = json.loads(covdata)
result = CoverageHelper.get_flattened_names(node, prefix="")
expected_names = [
'topdir1',
'topdir1/subdir2',
'topdir1/subdir2/file3.c',
'topdir1/subdir1/file2.c',
'topdir1/subdir1',
'topdir1/subdir1/file1.c',
'topdir2',
'topdir2/subdir1',
'topdir2/subdir1/file1.c'
]
assert result == set(expected_names)
def test_CoverageHelperApplyDirectivesMixed():
node = json.loads(covdata)
# Check that mixed directives work properly (exclude multiple paths, include some back)
directives = ["-:topdir1/subdir1/**",
"+:topdir1/subdir?/file1.c",
"+:topdir1/subdir?/file3.c",
"-:topdir1/subdir2/**"]
CoverageHelper.apply_include_exclude_directives(node, directives)
result = CoverageHelper.get_flattened_names(node, prefix="")
expected_names = [
'topdir1',
'topdir1/subdir1/file1.c',
'topdir1/subdir1',
'topdir2',
'topdir2/subdir1',
'topdir2/subdir1/file1.c'
]
assert result == set(expected_names)
def test_CoverageHelperApplyDirectivesPrune():
node = json.loads(covdata)
# Check that any empty childs are pruned (empty childs are not useful)
directives = ["-:topdir1/subdir1/**", "-:topdir1/subdir2/**"]
CoverageHelper.apply_include_exclude_directives(node, directives)
result = CoverageHelper.get_flattened_names(node, prefix="")
expected_names = [
'topdir2',
'topdir2/subdir1',
'topdir2/subdir1/file1.c'
]
assert result == set(expected_names)
def test_CoverageHelperApplyDirectivesExcludeAll():
node = json.loads(covdata)
# Check that excluding all paths works (specialized case)
directives = ["-:**", "+:topdir2/subdir1/**"]
CoverageHelper.apply_include_exclude_directives(node, directives)
result = CoverageHelper.get_flattened_names(node, prefix="")
expected_names = [
'topdir2',
'topdir2/subdir1',
'topdir2/subdir1/file1.c'
]
assert result == set(expected_names)
def test_CoverageHelperApplyDirectivesMakeEmpty():
node = json.loads(covdata)
# Check that making the set entirely empty doesn't crash things (tsmith mode)
directives = ["-:**"]
CoverageHelper.apply_include_exclude_directives(node, directives)
result = CoverageHelper.get_flattened_names(node, prefix="")
expected_names = []
assert result == set(expected_names)
| mpl-2.0 |
xfournet/intellij-community | python/lib/Lib/encodings/iso8859_13.py | 593 | 13527 | """ Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u201c' # 0xB4 -> LEFT DOUBLE QUOTATION MARK
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xe6' # 0xBF -> LATIN SMALL LETTER AE
u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
u'\u2019' # 0xFF -> RIGHT SINGLE QUOTATION MARK
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
ferabra/edx-platform | common/djangoapps/student/migrations/0041_add_dashboard_config.py | 120 | 13750 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DashboardConfiguration'
db.create_table('student_dashboardconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('recent_enrollment_time_delta', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('student', ['DashboardConfiguration'])
def backwards(self, orm):
# Deleting model 'DashboardConfiguration'
db.delete_table('student_dashboardconfiguration')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.dashboardconfiguration': {
'Meta': {'object_name': 'DashboardConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recent_enrollment_time_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
hnarayanan/django-rest-framework | tests/test_negotiation.py | 26 | 1648 | from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.negotiation import DefaultContentNegotiation
from rest_framework.request import Request
from rest_framework.renderers import BaseRenderer
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class MockJSONRenderer(BaseRenderer):
media_type = 'application/json'
class MockHTMLRenderer(BaseRenderer):
media_type = 'text/html'
class NoCharsetSpecifiedRenderer(BaseRenderer):
media_type = 'my/media'
class TestAcceptedMediaType(TestCase):
def setUp(self):
self.renderers = [MockJSONRenderer(), MockHTMLRenderer()]
self.negotiator = DefaultContentNegotiation()
def select_renderer(self, request):
return self.negotiator.select_renderer(request, self.renderers)
def test_client_without_accept_use_renderer(self):
request = Request(factory.get('/'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json')
def test_client_underspecifies_accept_use_renderer(self):
request = Request(factory.get('/', HTTP_ACCEPT='*/*'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json')
def test_client_overspecifies_accept_use_client(self):
request = Request(factory.get('/', HTTP_ACCEPT='application/json; indent=8'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json; indent=8')
| bsd-2-clause |
HybridF5/nova | nova/tests/unit/api/openstack/compute/legacy_v2/test_extensions.py | 9 | 24421 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import iso8601
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from nova.api.openstack import compute
from nova.api.openstack.compute.legacy_v2 import extensions \
as compute_extensions
from nova.api.openstack import extensions as base_extensions
from nova.api.openstack import wsgi
from nova import exception
import nova.policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
CONF = cfg.CONF
NS = "{http://docs.openstack.org/common/api/v1.0}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
response_body = "Try to say this Mr. Knox, sir..."
extension_body = "I am not a fox!"
class StubController(object):
def __init__(self, body):
self.body = body
def index(self, req):
return self.body
def create(self, req, body):
msg = 'All aboard the fail train!'
raise webob.exc.HTTPBadRequest(explanation=msg)
def show(self, req, id):
raise webob.exc.HTTPNotFound()
class StubActionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return self.body
class StubControllerExtension(base_extensions.ExtensionDescriptor):
name = 'twaadle'
def __init__(self):
pass
class StubEarlyExtensionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.extends
def index(self, req):
yield self.body
@wsgi.extends(action='fooAction')
def _action_foo(self, req, id, body):
yield self.body
class StubLateExtensionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.extends
def index(self, req, resp_obj):
return self.body
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return self.body
class StubExtensionManager(object):
"""Provides access to Tweedle Beetles."""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
controller_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
self.controller_ext = controller_ext
self.extra_resource_ext = None
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
if self.extra_resource_ext:
resource_exts.append(self.extra_resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_controller_extensions(self):
controller_extensions = []
if self.controller_ext:
controller_extensions.append(self.controller_ext)
return controller_extensions
class ExtensionTestCase(test.TestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
ext_list = CONF.osapi_compute_extension[:]
fox = ('nova.tests.unit.api.openstack.compute.legacy_v2.extensions.'
'foxinsocks.Foxinsocks')
if fox not in ext_list:
ext_list.append(fox)
self.flags(osapi_compute_extension=ext_list)
self.fake_context = nova.context.RequestContext('fake', 'fake')
def _test_extension_authorizer_throws_exception_if_policy_fails(self,
rule,
authorize):
target = {'project_id': '1234',
'user_id': '5678'}
self.mox.StubOutWithMock(nova.policy, 'enforce')
nova.policy.enforce(self.fake_context,
rule,
target).AndRaise(
exception.PolicyNotAuthorized(
action=rule))
self.mox.ReplayAll()
self.assertRaises(exception.PolicyNotAuthorized, authorize,
self.fake_context, target=target)
def test_extension_authorizer_throws_exception_if_policy_fails(self):
authorize = base_extensions.extension_authorizer(
'compute', 'used_limits_for_admin')
self._test_extension_authorizer_throws_exception_if_policy_fails(
"compute_extension:used_limits_for_admin",
authorize)
def test_os_compute_api_authorizer_throws_exception_if_policy_fails(self):
authorize = base_extensions.os_compute_authorizer(
'used_limits_for_admin')
self._test_extension_authorizer_throws_exception_if_policy_fails(
"os_compute_api:used_limits_for_admin",
authorize)
def _test_core_authorizer_throws_exception_if_policy_fails(self,
rule,
authorize):
target = {'project_id': '1234',
'user_id': '5678'}
self.mox.StubOutWithMock(nova.policy, 'enforce')
nova.policy.enforce(self.fake_context,
rule,
target).AndRaise(
exception.PolicyNotAuthorized(
action=rule))
self.mox.ReplayAll()
self.assertRaises(exception.PolicyNotAuthorized, authorize,
self.fake_context, target=target)
def test_core_authorizer_throws_exception_if_policy_fails(self):
authorize = base_extensions.core_authorizer('compute',
'used_limits_for_admin')
self._test_core_authorizer_throws_exception_if_policy_fails(
"compute:used_limits_for_admin", authorize)
class ExtensionControllerTest(ExtensionTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.ext_list = [
"AdminActions",
"Aggregates",
"AssistedVolumeSnapshots",
"AvailabilityZone",
"Agents",
"Certificates",
"Cloudpipe",
"CloudpipeUpdate",
"ConsoleOutput",
"Consoles",
"Createserverext",
"DeferredDelete",
"DiskConfig",
"ExtendedAvailabilityZone",
"ExtendedFloatingIps",
"ExtendedIps",
"ExtendedIpsMac",
"ExtendedVIFNet",
"Evacuate",
"ExtendedStatus",
"ExtendedVolumes",
"ExtendedServerAttributes",
"FixedIPs",
"FlavorAccess",
"FlavorDisabled",
"FlavorExtraSpecs",
"FlavorExtraData",
"FlavorManage",
"FlavorRxtx",
"FlavorSwap",
"FloatingIps",
"FloatingIpDns",
"FloatingIpPools",
"FloatingIpsBulk",
"Fox In Socks",
"Hosts",
"ImageSize",
"InstanceActions",
"Keypairs",
"Multinic",
"MultipleCreate",
"QuotaClasses",
"Quotas",
"ExtendedQuotas",
"Rescue",
"SchedulerHints",
"SecurityGroupDefaultRules",
"SecurityGroups",
"ServerDiagnostics",
"ServerListMultiStatus",
"ServerPassword",
"ServerStartStop",
"Services",
"SimpleTenantUsage",
"UsedLimits",
"UserData",
"VirtualInterfaces",
"VolumeAttachmentUpdate",
"Volumes",
]
self.ext_list.sort()
def test_list_extensions_json(self):
app = compute.APIRouter(init_only=('extensions',))
request = webob.Request.blank("/fake/extensions")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
# Make sure we have all the extensions, extra extensions being OK.
data = jsonutils.loads(response.body)
names = [str(x['name']) for x in data['extensions']
if str(x['name']) in self.ext_list]
names.sort()
self.assertEqual(names, self.ext_list)
# Ensure all the timestamps are valid according to iso8601
for ext in data['extensions']:
iso8601.parse_date(ext['updated'])
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(
{'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension.',
'alias': 'FOXNSOX',
'links': []}, fox_ext)
for ext in data['extensions']:
url = '/fake/extensions/%s' % ext['alias']
request = webob.Request.blank(url)
response = request.get_response(app)
output = jsonutils.loads(response.body)
self.assertEqual(output['extension']['alias'], ext['alias'])
def test_get_extension_json(self):
app = compute.APIRouter(init_only=('extensions',))
request = webob.Request.blank("/fake/extensions/FOXNSOX")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
data = jsonutils.loads(response.body)
self.assertEqual({
"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension.",
"alias": "FOXNSOX",
"links": []}, data['extension'])
def test_get_non_existing_extension_json(self):
app = compute.APIRouter(init_only=('extensions',))
request = webob.Request.blank("/fake/extensions/4")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
class ResourceExtensionTest(ExtensionTestCase):
def test_no_extension_present(self):
manager = StubExtensionManager(None)
app = compute.APIRouter(manager)
request = webob.Request.blank("/blah")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
def test_get_resources(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_get_resources_with_controller(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_bad_request(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
request.method = "POST"
response = request.get_response(app)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "All aboard the fail train!",
"code": 400
}
}
self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_resource(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/1")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"itemNotFound": {
"message": "The resource could not be found.",
"code": 404
}
}
self.assertThat(expected, matchers.DictMatches(body))
class InvalidExtension(object):
alias = "THIRD"
class ExtensionManagerTest(ExtensionTestCase):
response_body = "Try to say this Mr. Knox, sir..."
def test_get_resources(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/foxnsocks")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_invalid_extensions(self):
# Don't need the serialization middleware here because we're
# not testing any serialization
compute.APIRouter()
ext_mgr = compute_extensions.ExtensionManager()
ext_mgr.register(InvalidExtension())
self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
self.assertFalse(ext_mgr.is_loaded('THIRD'))
class ActionExtensionTest(ExtensionTestCase):
def _send_server_action_request(self, url, body):
app = compute.APIRouter(init_only=('servers',))
request = webob.Request.blank(url)
request.method = 'POST'
request.content_type = 'application/json'
request.body = jsonutils.dump_as_bytes(body)
response = request.get_response(app)
return response
def test_extended_action(self):
body = dict(add_tweedle=dict(name="test"))
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(200, response.status_int)
self.assertEqual("Tweedle Beetle Added.", response.body)
body = dict(delete_tweedle=dict(name="test"))
response = self._send_server_action_request(url, body)
self.assertEqual(200, response.status_int)
self.assertEqual("Tweedle Beetle Deleted.", response.body)
def test_invalid_action(self):
body = dict(blah=dict(name="test")) # Doesn't exist
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "There is no such action: blah",
"code": 400
}
}
self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_action(self):
body = dict(blah=dict(name="test"))
url = "/fake/fdsa/1/action"
response = self._send_server_action_request(url, body)
self.assertEqual(404, response.status_int)
def test_failed_action(self):
body = dict(fail=dict(name="test"))
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "Tweedle fail",
"code": 400
}
}
self.assertThat(expected, matchers.DictMatches(body))
class RequestExtensionTest(ExtensionTestCase):
def test_get_resources_with_stub_mgr(self):
class GooGoose(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# only handle JSON responses
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
req_ext = base_extensions.ControllerExtension(
StubControllerExtension(), 'flavors', GooGoose())
manager = StubExtensionManager(None, None, None, req_ext)
app = fakes.wsgi_app(ext_mgr=manager)
request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo")
request.environ['api.version'] = '2'
response = request.get_response(app)
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('bluegoo', response_data['flavor']['googoose'])
def test_get_resources_with_mgr(self):
app = fakes.wsgi_app(init_only=('flavors',))
request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue")
request.environ['api.version'] = '2'
response = request.get_response(app)
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['flavor']['googoose'])
self.assertEqual("Pig Bands!", response_data['big_bands'])
class ControllerExtensionTest(ExtensionTestCase):
def test_controller_extension_early(self):
controller = StubController(response_body)
res_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubEarlyExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_extension_late(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubController(dict(foo=response_body))
res_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_extension_late_inherited_resource(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubController(dict(foo=response_body))
parent_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=parent_ext,
controller_ext=cont_ext)
child_ext = base_extensions.ResourceExtension('beetles', controller,
inherits='tweedles')
manager.extra_resource_ext = child_ext
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/beetles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_action_extension_early(self):
controller = StubActionController(response_body)
actions = dict(action='POST')
res_ext = base_extensions.ResourceExtension('tweedles', controller,
member_actions=actions)
ext_controller = StubEarlyExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/foo/action")
request.method = 'POST'
request.headers['Content-Type'] = 'application/json'
request.body = jsonutils.dump_as_bytes(dict(fooAction=True))
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_action_extension_late(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubActionController(dict(foo=response_body))
actions = dict(action='POST')
res_ext = base_extensions.ResourceExtension('tweedles', controller,
member_actions=actions)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/foo/action")
request.method = 'POST'
request.headers['Content-Type'] = 'application/json'
request.body = jsonutils.dump_as_bytes(dict(fooAction=True))
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
class ExtensionControllerIdFormatTest(test.NoDBTestCase):
def _bounce_id(self, test_id):
class BounceController(object):
def show(self, req, id):
return id
res_ext = base_extensions.ResourceExtension('bounce',
BounceController())
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/bounce/%s" % test_id)
response = request.get_response(app)
return response.body
def test_id_with_xml_format(self):
result = self._bounce_id('foo.xml')
self.assertEqual('foo', result)
def test_id_with_json_format(self):
result = self._bounce_id('foo.json')
self.assertEqual('foo', result)
def test_id_with_bad_format(self):
result = self._bounce_id('foo.bad')
self.assertEqual('foo.bad', result)
| apache-2.0 |
jasonstack/cassandra | pylib/cqlshlib/util.py | 28 | 4494 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cProfile
import codecs
import pstats
from datetime import timedelta, tzinfo
from six import StringIO
try:
from line_profiler import LineProfiler
HAS_LINE_PROFILER = True
except ImportError:
HAS_LINE_PROFILER = False
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def split_list(items, pred):
"""
Split up a list (or other iterable) on the elements which satisfy the
given predicate 'pred'. Elements for which 'pred' returns true start a new
sublist for subsequent elements, which will accumulate in the new sublist
until the next satisfying element.
>>> split_list([0, 1, 2, 5, 99, 8], lambda n: (n % 2) == 0)
[[0], [1, 2], [5, 99, 8], []]
"""
thisresult = []
results = [thisresult]
for i in items:
thisresult.append(i)
if pred(i):
thisresult = []
results.append(thisresult)
return results
def find_common_prefix(strs):
"""
Given a list (iterable) of strings, return the longest common prefix.
>>> find_common_prefix(['abracadabra', 'abracadero', 'abranch'])
'abra'
>>> find_common_prefix(['abracadabra', 'abracadero', 'mt. fuji'])
''
"""
common = []
for cgroup in zip(*strs):
if all(x == cgroup[0] for x in cgroup[1:]):
common.append(cgroup[0])
else:
break
return ''.join(common)
def list_bifilter(pred, iterable):
"""
Filter an iterable into two output lists: the first containing all
elements of the iterable for which 'pred' returns true, and the second
containing all others. Order of the elements is otherwise retained.
>>> list_bifilter(lambda x: isinstance(x, int), (4, 'bingo', 1.2, 6, True))
([4, 6], ['bingo', 1.2, True])
"""
yes_s = []
no_s = []
for i in iterable:
(yes_s if pred(i) else no_s).append(i)
return yes_s, no_s
def identity(x):
return x
def trim_if_present(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
def get_file_encoding_bomsize(filename):
"""
Checks the beginning of a file for a Unicode BOM. Based on this check,
the encoding that should be used to open the file and the number of
bytes that should be skipped (to skip the BOM) are returned.
"""
bom_encodings = ((codecs.BOM_UTF8, 'utf-8-sig'),
(codecs.BOM_UTF16_LE, 'utf-16le'),
(codecs.BOM_UTF16_BE, 'utf-16be'),
(codecs.BOM_UTF32_LE, 'utf-32be'),
(codecs.BOM_UTF32_BE, 'utf-32be'))
firstbytes = open(filename, 'rb').read(4)
for bom, encoding in bom_encodings:
if firstbytes.startswith(bom):
file_encoding, size = encoding, len(bom)
break
else:
file_encoding, size = "utf-8", 0
return file_encoding, size
def profile_on(fcn_names=None):
if fcn_names and HAS_LINE_PROFILER:
pr = LineProfiler()
for fcn_name in fcn_names:
pr.add_function(fcn_name)
pr.enable()
return pr
pr = cProfile.Profile()
pr.enable()
return pr
def profile_off(pr, file_name):
pr.disable()
s = StringIO()
if HAS_LINE_PROFILER and isinstance(pr, LineProfiler):
pr.print_stats(s)
else:
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
ret = s.getvalue()
if file_name:
with open(file_name, 'w') as f:
print("Writing to %s\n" % (f.name, ))
f.write(ret)
return ret
| apache-2.0 |
helfertool/helfertool | src/toollog/migrations/0001_initial.py | 1 | 1664 | # Generated by Django 3.1.1 on 2020-09-05 18:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('registration', '0043_auto_20200409_1711'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(verbose_name='Timestamp')),
('level', models.CharField(max_length=16, verbose_name='Log level')),
('message', models.CharField(max_length=512, verbose_name='Message')),
('extras', models.JSONField(blank=True, null=True, verbose_name='Extra data')),
('module', models.CharField(max_length=128, verbose_name='Helfertool module')),
('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='registration.event', verbose_name='Event')),
('helper', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='registration.helper', verbose_name='Helper')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='helfertoollogentry', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'ordering': ['-timestamp'],
},
),
]
| agpl-3.0 |
quarckster/cfme_tests | fixtures/soft_assert.py | 5 | 8591 | """Soft assert context manager and assert function
A "soft assert" is an assertion that, if it fails, does not fail the entire test.
Soft assertions can be mixed with normal assertions as needed, and will be automatically
collected/reported after a test runs.
Functionality Overview
----------------------
1. If :py:func:`soft_assert` is used by a test, that test's call phase is wrapped in
a context manager. Entering that context sets up a thread-local store for failed assertions.
2. Inside the test, :py:func:`soft_assert` is a function with access to the thread-local store
of failed assertions, allowing it to store failed assertions during a test run.
3. After a test runs, the context manager wrapping the test's call phase exits, which inspects the
thread-local store of failed assertions, raising a
:py:class:`custom AssertionError <SoftAssertionError>` if any are found.
No effort is made to clear the thread-local store; rather it's explicitly overwritten with an empty
list by the context manager. Because the store is a :py:func:`list <python:list>`, failed assertions
will be reported in the order that they failed.
"""
from contextlib import contextmanager
from threading import local
from functools import partial
import fauxfactory
import pytest
from fixtures.artifactor_plugin import fire_art_test_hook
from cfme.utils.log import nth_frame_info
from cfme.utils.path import get_rel_path
import sys
import traceback
import cfme.utils
from cfme.utils.appliance import DummyAppliance, get_or_create_current_appliance
# Use a thread-local store for failed soft asserts, making it thread-safe
# in parallel testing and shared among the functions in this module.
_thread_locals = local()
@pytest.mark.hookwrapper(tryfirst=True)
def pytest_runtest_protocol(item, nextitem):
if 'soft_assert' in item.fixturenames:
_thread_locals.caught_asserts = []
yield
@pytest.mark.hookwrapper
def pytest_runtest_call(item):
"""pytest hook to handle :py:func:`soft_assert` fixture usage"""
yield
if 'soft_assert' in item.fixturenames and _thread_locals.caught_asserts:
raise SoftAssertionError(_thread_locals.caught_asserts)
class SoftAssertionError(AssertionError):
"""exception class containing failed assertions
Functions like :py:class:`AssertionError <python:exceptions.AssertionError>`, but
also stores the failed soft exceptions that it represents in order to properly
display them when cast as :py:func:`str <python:str>`
Args:
failed_assertions: List of collected assertion failure messages
where: Where the SoftAssert context was entered, can be omitted
Attributes:
failed_assertions: ``failed_assertions`` handed to the initializer,
useful in cases where inspecting the failed soft assertions is desired.
"""
def __init__(self, failed_assertions):
self.failed_assertions = failed_assertions
super(SoftAssertionError, self).__init__(str(self))
def __str__(self):
failmsgs = ['']
for failed_assert in self.failed_assertions:
failmsgs.append(failed_assert)
return '\n'.join(failmsgs)
@contextmanager
def _soft_assert_cm():
"""soft assert context manager
* clears the thread-local caught asserts before a test run
* inspects the thread-local caught asserts after a test run, raising an error if needed
"""
_thread_locals.caught_asserts = []
yield _thread_locals.caught_asserts
if _thread_locals.caught_asserts:
raise SoftAssertionError(_thread_locals.caught_asserts)
def handle_assert_artifacts(request, fail_message=None):
appliance = get_or_create_current_appliance()
if isinstance(appliance, DummyAppliance):
return
if not fail_message:
short_tb = '{}'.format(sys.exc_info()[1])
short_tb = short_tb.encode('base64')
var_tb = traceback.format_tb(sys.exc_info()[2])
full_tb = "".join(var_tb)
full_tb = full_tb.encode('base64')
else:
short_tb = full_tb = fail_message.encode('base64')
try:
ss = cfme.utils.browser.browser().get_screenshot_as_base64()
ss_error = None
except Exception as b_ex:
ss = None
if str(b_ex):
ss_error = '{}: {}'.format(type(b_ex).__name__, str(b_ex))
else:
ss_error = type(b_ex).__name__
if ss_error:
ss_error = ss_error.encode('base64')
# A simple id to match the artifacts together
sa_id = "softassert-{}".format(fauxfactory.gen_alpha(length=3).upper())
from fixtures.pytest_store import store
node = request.node
fire_art_test_hook(
node, 'filedump',
description="Soft Assert Traceback", contents=full_tb,
file_type="soft_traceback", display_type="danger", display_glyph="align-justify",
contents_base64=True, group_id=sa_id, slaveid=store.slaveid)
fire_art_test_hook(
node, 'filedump',
description="Soft Assert Short Traceback", contents=short_tb,
file_type="soft_short_tb", display_type="danger", display_glyph="align-justify",
contents_base64=True, group_id=sa_id, slaveid=store.slaveid)
if ss is not None:
fire_art_test_hook(
node, 'filedump',
description="Soft Assert Exception screenshot",
file_type="screenshot", mode="wb", contents_base64=True, contents=ss,
display_glyph="camera", group_id=sa_id, slaveid=store.slaveid)
if ss_error is not None:
fire_art_test_hook(
node, 'filedump',
description="Soft Assert Screenshot error", mode="w",
contents_base64=True, contents=ss_error, display_type="danger", group_id=sa_id,
slaveid=store.slaveid)
@contextmanager
def _catch_assert_cm(request):
"""assert catching context manager
* Catches a single AssertionError, and turns it into a soft assert
"""
try:
yield
except AssertionError as ex:
handle_assert_artifacts(request)
caught_assert = _annotate_failure(str(ex))
_thread_locals.caught_asserts.append(caught_assert)
# Some helper functions for creating or interacting with the caught asserts
def _get_caught_asserts():
return _thread_locals.caught_asserts
def _clear_caught_asserts():
# delete all items of the caught_asserts list
del _thread_locals.caught_asserts[:]
def _annotate_failure(fail_message=''):
# frames
# 0: call to nth_frame_info
# 1: _annotate_failure (this function)
# 2: _annotate_failure caller (soft assert func or CM)
# 3: failed assertion
frameinfo = nth_frame_info(3)
if not fail_message:
fail_message = str(frameinfo.code_context[0]).strip()
filename = get_rel_path(frameinfo.filename)
path = '{}:{!r}'.format(filename, frameinfo.lineno)
return '{} ({})'.format(fail_message, path)
@pytest.fixture
def soft_assert(request):
"""soft assert fixture, used to defer AssertionError to the end of a test run
Usage:
# contents of test_soft_assert.py, for example
def test_uses_soft_assert(soft_assert):
soft_assert(True)
soft_assert(False, 'failure message')
# soft_assert.catch_assert will intercept AssertionError
# and turn it into a soft assert
with soft_assert.catch_assert():
assert None
# Soft asserts can be cleared at any point within a test:
soft_assert.clear_asserts()
# If more in-depth interaction is desired with the caught_asserts, the list of failure
# messages can be retrieved. This will return the directly mutable caught_asserts list:
caught_asserts = soft_assert.caught_asserts()
The test above will report two soft assertion failures, with the following message::
SoftAssertionError:
failure message (test_soft_assert.py:3)
soft_assert(None) (test_soft_assert.py:8)
"""
def soft_assert_func(expr, fail_message=''):
if not expr:
handle_assert_artifacts(request, fail_message=fail_message)
caught_assert = _annotate_failure(fail_message)
_thread_locals.caught_asserts.append(caught_assert)
return bool(expr)
# stash helper functions on soft_assert for easy access
soft_assert_func.catch_assert = partial(_catch_assert_cm, request)
soft_assert_func.caught_asserts = _get_caught_asserts
soft_assert_func.clear_asserts = _clear_caught_asserts
return soft_assert_func
| gpl-2.0 |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/django/contrib/gis/forms/fields.py | 97 | 4697 | from __future__ import unicode_literals
import warnings
from django import forms
from django.utils import six
from django.utils.translation import ugettext_lazy as _
# While this couples the geographic forms to the GEOS library,
# it decouples from database (by not importing SpatialBackend).
from django.contrib.gis.geos import GEOSException, GEOSGeometry, fromstr
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required' : _('No geometry value provided.'),
'invalid_geom' : _('Invalid geometry value.'),
'invalid_geom_type' : _('Invalid geometry type.'),
'transform_error' : _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
if 'null' in kwargs:
kwargs.pop('null', True)
warnings.warn("Passing 'null' keyword argument to GeometryField is deprecated.",
DeprecationWarning, stacklevel=2)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def _has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
| mit |
wanderine/nipype | nipype/algorithms/tests/test_modelgen.py | 15 | 5582 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from copy import deepcopy
import os
from shutil import rmtree
from tempfile import mkdtemp
from nibabel import Nifti1Image
import numpy as np
from nipype.testing import (assert_equal,
assert_raises, assert_almost_equal)
from nipype.interfaces.base import Bunch, TraitError
from nipype.algorithms.modelgen import (SpecifyModel, SpecifySparseModel,
SpecifySPMModel)
def test_modelgen1():
tempdir = mkdtemp()
filename1 = os.path.join(tempdir, 'test1.nii')
filename2 = os.path.join(tempdir, 'test2.nii')
Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename1)
Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename2)
s = SpecifyModel()
s.inputs.input_units = 'scans'
set_output_units = lambda: setattr(s.inputs, 'output_units', 'scans')
yield assert_raises, TraitError, set_output_units
s.inputs.functional_runs = [filename1, filename2]
s.inputs.time_repetition = 6
s.inputs.high_pass_filter_cutoff = 128.
info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]], amplitudes=None,
pmod=None, regressors=None, regressor_names=None, tmod=None),
Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]], amplitudes=None,
pmod=None, regressors=None, regressor_names=None, tmod=None)]
s.inputs.subject_info = info
res = s.run()
yield assert_equal, len(res.outputs.session_info), 2
yield assert_equal, len(res.outputs.session_info[0]['regress']), 0
yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([12, 300, 600, 1080])
rmtree(tempdir)
def test_modelgen_spm_concat():
tempdir = mkdtemp()
filename1 = os.path.join(tempdir, 'test1.nii')
filename2 = os.path.join(tempdir, 'test2.nii')
Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1)
Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2)
s = SpecifySPMModel()
s.inputs.input_units = 'secs'
s.inputs.concatenate_runs = True
setattr(s.inputs, 'output_units', 'secs')
yield assert_equal, s.inputs.output_units, 'secs'
s.inputs.functional_runs = [filename1, filename2]
s.inputs.time_repetition = 6
s.inputs.high_pass_filter_cutoff = 128.
info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]),
Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
s.inputs.subject_info = deepcopy(info)
res = s.run()
yield assert_equal, len(res.outputs.session_info), 1
yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
yield assert_equal, np.sum(res.outputs.session_info[0]['regress'][0]['val']), 30
yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
setattr(s.inputs, 'output_units', 'scans')
yield assert_equal, s.inputs.output_units, 'scans'
s.inputs.subject_info = deepcopy(info)
res = s.run()
yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])/6
s.inputs.concatenate_runs = False
s.inputs.subject_info = deepcopy(info)
s.inputs.output_units = 'secs'
res = s.run()
yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0])
rmtree(tempdir)
def test_modelgen_sparse():
tempdir = mkdtemp()
filename1 = os.path.join(tempdir, 'test1.nii')
filename2 = os.path.join(tempdir, 'test2.nii')
Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename1)
Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename2)
s = SpecifySparseModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = [filename1, filename2]
s.inputs.time_repetition = 6
info = [Bunch(conditions=['cond1'], onsets=[[0, 50, 100, 180]], durations=[[2]]),
Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
s.inputs.subject_info = info
s.inputs.volumes_in_cluster = 1
s.inputs.time_acquisition = 2
s.inputs.high_pass_filter_cutoff = np.inf
res = s.run()
yield assert_equal, len(res.outputs.session_info), 2
yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
yield assert_equal, len(res.outputs.session_info[0]['cond']), 0
s.inputs.stimuli_as_impulses = False
res = s.run()
yield assert_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 1.0
s.inputs.model_hrf = True
res = s.run()
yield assert_almost_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384
yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
s.inputs.use_temporal_deriv = True
res = s.run()
yield assert_equal, len(res.outputs.session_info[0]['regress']), 2
yield assert_almost_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384
yield assert_almost_equal, res.outputs.session_info[1]['regress'][1]['val'][5], 0.007671459162258378
rmtree(tempdir)
| bsd-3-clause |
timrae/drivepy | anritsu/spectrumanalyzer.py | 1 | 3835 | from __future__ import division
from numpy import *
from drivepy import visaconnection
# Mapping of index to resolution bandwidth in Hz
RBW_DICT={0:30, 1:100, 2:300, 3:1e3, 4:3e3, 5:10e3, 6:30e3, 7:100e3, 8:300e3, 9:1e6, 13:10, 14:3e6}
VBW_DICT={0:1, 1:10, 2:100, 3:1e3, 4:10e3, 5:100e3, 6:float("inf"), 7:1e6, 8:3, 9:30, 10:300, 11:3e3, 12:30e3, 13:300e3, 14:3e6}
class SpectrumAnalyzer(object):
""" Class for the Anritsu spectrum analyzer which provides high level commands for reading spectrums"""
def __init__(self,addr="GPIB::2",timeout=60):
self._sa=VisaConnection(addr,timeout)
self._sa.write("*RST")
self._numPoints = 501;
def setSpan(self,span):
""" Sets the measurement span for SA in GHz"""
self._sa.write("SP "+str(span)+"GHZ")
def setCenter(self,center):
""" Sets the center wavelength for the SA in GHz"""
self._sa.write("CF "+str(center)+"GHz")
def setNumPoints(self,numPoints):
""" Sets the number of sampling points """
self._numPoints = numPoints
def setAttenuator(self, autoMode = False, attn = 10):
""" Set autoMode (True/False) and attn value in steps of 10dB"""
if autoMode:
self._sa.write("AAT 1")
else:
self._sa.write("AAT 0")
self._sa.write("AT "+str(attn))
def setSweepTime(self, autoMode = False, time = 20):
""" Set autoMode (true/false) and sweep time in ms"""
if autoMode:
self._sa.write("AST 1")
else:
self._sa.write("AST 0")
self._sa.write("ST "+str(time)+"MS")
def setRbw(self,index):
""" Set resolution bandwidth of spectrum analyzer -- see RBW_DICT for mapping"""
self._sa.write("RBW "+str(index))
def setVbw(self,index):
""" Set video bandwidth of spectrum analyzer -- see VBW_DICT for mapping"""
self._sa.write("VBW "+str(index))
def getRbw(self):
""" Return resolution bandwidth of specan in Hz"""
rbwIndex=int(self._sa.readQuery("RBW?")[4:])
return RBW_DICT[rbwIndex]
def getNoiseBandwidth(self):
""" Return the noise bandwidth of specan """
return self.getRbw()*1.2
def obtainSpectrum(self):
""" Obtain a spectrum from the OSA. Based on Example VB-5 in the user manual for the Q8384 """
self._sa.write("TS") # Start a sweep
self._sa.write("BIN 0") # Set format to ASCII
y = zeros(self._numPoints) # Create empty array to hold data
# Get each of the 501 data points
y = array([self.dbmToWatts(float(m)/100) for m in self._sa.readQuery("XMA? 0,501").split(",")])
x = linspace(self.getStartFreq(), self.getStopFreq(), self._numPoints)
return (x,y)
def dbmToWatts(self,dbm):
return 10**(dbm/10)/1000
def getStartFreq(self):
return float(self._sa.readQuery("STF?")[4:])
def getStopFreq(self):
return float(self._sa.readQuery("SOF?")[4:])
class VisaConnection(visaconnection.VisaConnection):
""" Abstraction of the VISA connection for consistency between implementation of instrument classes """
def __init__(self,addr,t):
super(VisaConnection,self).__init__(addr)
# TODO: set timeout
def getFloatArray(self,request):
""" retrieve an array of floats as specified by request string ("OSD0"|"OSD1") """
n=int(self.lib.ask("ODN?"))
self.write(request) # tell OSA to send wavelength or intensity depending on request string
data=self.lib.read_values()
if len(data)!=n:
raise CommError, "received different number of data points than expected from spectrum analyzer"
return array(data)
class CommError(Exception): pass
| gpl-3.0 |
ruddra/django-oscar | sites/demo/urls.py | 3 | 1030 | from django.conf.urls import patterns, include
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from stores.app import application as stores_app
from stores.dashboard.app import application as dashboard_app
from apps.app import application
from datacash.dashboard.app import application as datacash_app
# These need to be imported into this namespace
from oscar.views import handler500, handler404, handler403
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
# Stores extension
(r'^stores/', include(stores_app.urls)),
(r'^dashboard/stores/', include(dashboard_app.urls)),
# PayPal extension
(r'^checkout/paypal/', include('paypal.express.urls')),
# Datacash extension
(r'^dashboard/datacash/', include(datacash_app.urls)),
(r'', include(application.urls)),
)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| bsd-3-clause |
andrei-karalionak/ggrc-core | src/ggrc_workflows/migrations/versions/20140722203407_4b3316aa1acf_move_existing_workflows_to_new_contexts.py | 6 | 11677 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Move existing Workflows to new contexts
Create Date: 2014-07-22 20:34:07.052212
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
import sqlalchemy as sa
from datetime import datetime
from sqlalchemy.sql import table, column, select, and_
# revision identifiers, used by Alembic.
revision = '4b3316aa1acf'
down_revision = '292222c25829'
contexts_table = table(
'contexts',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('description', sa.Text),
column('related_object_id', sa.Integer),
column('related_object_type', sa.String),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
)
roles_table = table(
'roles',
column('id', sa.Integer),
column('name', sa.String),
)
user_roles_table = table(
'user_roles',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('role_id', sa.Integer),
column('person_id', sa.Integer),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
)
context_implications_table = table(
'context_implications',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('source_context_id', sa.Integer),
column('context_scope', sa.String),
column('source_context_scope', sa.String),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
)
workflows_table = table(
'workflows',
column('id', sa.Integer),
column('context_id', sa.Integer),
)
workflow_objects_table = table(
'workflow_objects',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('workflow_id', sa.Integer),
)
workflow_people_table = table(
'workflow_people',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('workflow_id', sa.Integer),
column('person_id', sa.Integer),
)
workflow_tasks_table = table(
'workflow_tasks',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('workflow_id', sa.Integer),
)
task_groups_table = table(
'task_groups',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('workflow_id', sa.Integer),
)
task_group_objects_table = table(
'task_group_objects',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('task_group_id', sa.Integer),
)
task_group_tasks_table = table(
'task_group_tasks',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('task_group_id', sa.Integer),
)
cycles_table = table(
'cycles',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('workflow_id', sa.Integer),
)
cycle_task_groups_table = table(
'cycle_task_groups',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('cycle_id', sa.Integer),
)
cycle_task_entries_table = table(
'cycle_task_entries',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('cycle_id', sa.Integer),
)
cycle_task_group_objects_table = table(
'cycle_task_group_objects',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('cycle_id', sa.Integer),
)
cycle_task_group_object_tasks_table = table(
'cycle_task_group_object_tasks',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('cycle_id', sa.Integer),
)
object_files_table = table(
'object_files',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('fileable_id', sa.Integer),
column('fileable_type', sa.String),
)
object_folders_table = table(
'object_folders',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('folderable_id', sa.Integer),
column('folderable_type', sa.String),
)
object_owners_table = table(
'object_owners',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('person_id', sa.Integer),
column('ownable_id', sa.Integer),
column('ownable_type', sa.String),
)
def get_role(name):
connection = op.get_bind()
return connection.execute(
select([roles_table.c.id]).where(roles_table.c.name == name)).fetchone()
def upgrade():
current_datetime = datetime.now()
# Get the roles we'll need later
workflow_owner_role = get_role('WorkflowOwner')
workflow_member_role = get_role('WorkflowMember')
# Get all current workflows
connection = op.get_bind()
workflows = connection.execute(
select([workflows_table.c.id])
.where(workflows_table.c.context_id == None)) # noqa
for workflow in workflows:
workflow_id = workflow.id
# Create the Workflow context
connection.execute(
contexts_table.insert().values(
context_id=None,
description='',
related_object_id=workflow_id,
related_object_type='Workflow',
modified_by_id=1,
created_at=current_datetime,
updated_at=current_datetime,
))
# Get the context id
context = connection.execute(
select([contexts_table.c.id]).where(
and_(
contexts_table.c.related_object_id == workflow_id,
contexts_table.c.related_object_type == 'Workflow')
)).fetchone()
context_id = context.id
# Move the Workflow into the new context
op.execute(workflows_table.update().values(context_id=context_id)
.where(workflows_table.c.id == workflow_id))
# Now, select *all* workflows, since the rest applies to all equally
workflows = connection.execute(
select([workflows_table.c.id, workflows_table.c.context_id]))
for workflow in workflows:
workflow_id = workflow.id
context_id = workflow.context_id
# Create the Context Implications to/from the Workflow context
op.execute(context_implications_table.insert().values(
source_context_id=context_id,
source_context_scope='Workflow',
context_id=None,
context_scope=None,
modified_by_id=1,
created_at=current_datetime,
updated_at=current_datetime
))
op.execute(context_implications_table.insert().values(
source_context_id=None,
source_context_scope=None,
context_id=context_id,
context_scope='Workflow',
modified_by_id=1,
created_at=current_datetime,
updated_at=current_datetime
))
# Add role assignments for owners and delete the object_owner relationships
owners = connection.execute(
select([object_owners_table.c.id, object_owners_table.c.person_id])
.where(
and_(
object_owners_table.c.ownable_id == workflow_id,
object_owners_table.c.ownable_type == 'Workflow')
)).fetchall()
for owner in owners:
connection.execute(
user_roles_table.insert().values(
context_id=context_id,
role_id=workflow_owner_role.id,
person_id=owner.person_id,
modified_by_id=1,
created_at=current_datetime,
updated_at=current_datetime,
))
connection.execute(
object_owners_table.delete().where(
object_owners_table.c.id == owner.id))
# Add role assignments for WorkflowPerson objects
members = connection.execute(
select([workflow_people_table.c.person_id])
.where(workflow_people_table.c.workflow_id == workflow_id)
).fetchall()
for member in members:
connection.execute(
user_roles_table.insert().values(
context_id=context_id,
role_id=workflow_member_role.id,
person_id=member.person_id,
modified_by_id=1,
created_at=current_datetime,
updated_at=current_datetime,
))
'''
directly_connected_tables = [
workflow_objects_table,
workflow_people_table,
workflow_tasks_table,
task_groups_table,
cycles_table,
]
polymorphically_connected_tables = [
object_files_table,
object_folders_table,
object_owners_table,
]
cycle_connected_tables = [
cycle_task_groups_table,
cycle_task_entries_table,
cycle_task_group_objects_table,
cycle_task_group_object_tasks_table,
]
'''
# Update rows for directly-connected tables
op.execute(workflow_objects_table.update().values(context_id=context_id)
.where(workflow_objects_table.c.workflow_id == workflow_id))
op.execute(workflow_people_table.update().values(context_id=context_id)
.where(workflow_people_table.c.workflow_id == workflow_id))
op.execute(workflow_tasks_table.update().values(context_id=context_id)
.where(workflow_tasks_table.c.workflow_id == workflow_id))
op.execute(task_groups_table.update().values(context_id=context_id)
.where(task_groups_table.c.workflow_id == workflow_id))
op.execute(
task_group_objects_table.update()
.values(context_id=context_id)
.where(task_group_objects_table.c.task_group_id.in_(
select([task_groups_table.c.id])
.where(task_groups_table.c.workflow_id == workflow_id))))
op.execute(
task_group_tasks_table.update()
.values(context_id=context_id)
.where(task_group_tasks_table.c.task_group_id.in_(
select([task_groups_table.c.id])
.where(task_groups_table.c.workflow_id == workflow_id))))
op.execute(cycles_table.update().values(context_id=context_id)
.where(cycles_table.c.workflow_id == workflow_id))
# Update rows for polymorphically-connected tables
op.execute(object_files_table.update().values(context_id=context_id)
.where(
and_(
object_files_table.c.fileable_id == workflow_id,
object_files_table.c.fileable_type == 'Workflow')))
op.execute(object_folders_table.update().values(context_id=context_id)
.where(
and_(
object_folders_table.c.folderable_id == workflow_id,
object_folders_table.c.folderable_type == 'Workflow')))
# Update rows for cycle-connected tables
op.execute(
cycle_task_entries_table.update()
.values(context_id=context_id)
.where(cycle_task_entries_table.c.cycle_id.in_(
select([cycles_table.c.id])
.where(cycles_table.c.workflow_id == workflow_id))))
op.execute(
cycle_task_groups_table.update()
.values(context_id=context_id)
.where(cycle_task_groups_table.c.cycle_id.in_(
select([cycles_table.c.id])
.where(cycles_table.c.workflow_id == workflow_id))))
op.execute(
cycle_task_group_objects_table.update()
.values(context_id=context_id)
.where(cycle_task_group_objects_table.c.cycle_id.in_(
select([cycles_table.c.id])
.where(cycles_table.c.workflow_id == workflow_id))))
op.execute(
cycle_task_group_object_tasks_table.update()
.values(context_id=context_id)
.where(cycle_task_group_object_tasks_table.c.cycle_id.in_(
select([cycles_table.c.id])
.where(cycles_table.c.workflow_id == workflow_id))))
def downgrade():
pass
| apache-2.0 |
awduda/awduda.github.io | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 353 | 1139 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| mit |
alesdaniel/pydns | main.py | 1 | 6266 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Script de actualizacion automatica para Hurricane Electric DNS
# Testeado en Python 3.4 windows, Debian
#BSD
#Copyright (c) 2016, Ales Daniel alesdaniel77@gmail.com
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of pydns nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# 28/03/2016 - Version Inicial - Daniel.
# 07/04/2016 - Agrega log - Daniel.
import sys
import re
import ssl
import logging
import socket
import urllib.request, urllib.parse, urllib.error
pagina = ''
ips = ''
def actualiza_ip():
global ips
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
datos = {}
#Datos de pagina a actualizar IPs
datos['hostname'] = 'rsync.petro-tandil.com.ar'
datos['password'] = 'pass'
datos['myip'] = ips[0]
pag = urllib.parse.urlencode(datos)
print(pag)
url='https://dyn.dns.he.net/nic/update?'
urlc=url + pag
context = ssl._create_unverified_context()
print(urlc)
try:
datos = urllib.request.urlopen(urlc, context=context)
except urllib.error.URLError as e:
logging.error("actualiza_ip() " + e)
print(e);
except socket.error as e:
logging.error("actualiza_ip() " + e)
print(e);
except socket.timeout as e:
logging.error("actualiza_ip() " + e)
print(e);
except UnicodeEncodeError as e:
logging.error("actualiza_ip() " + e)
print(e);
except http.client.BadStatusLine as e:
logging.error("actualiza_ip() " + e)
print(e);
except http.client.IncompleteRead as e:
logging.error("actualiza_ip() " + e)
print(e);
except urllib.error.HTTPError as e:
logging.error("actualiza_ip() " + e)
print(e);
#https: // dyn.dns.he.net / nic / update?hostname = dyn.example.com & password = password & myip = 192.168.0.1
#Compara que la ultima ip sea igual a la ultima grabada
def consulta(ips):
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
try:
a = open('ip.txt', 'r+')
except IOError:
a = open('ip.txt', 'w+')
str = a.read()
if str == ips:
a.closed
return True
else:
a.close()
a = open('ip.txt', 'w+')
a.write(ips)
logging.error("Actualizacion IP: " + ips)
a.closed
return False
# Busca dentro del html o texto devuelto la direccion ip
def busca_ip():
global ips
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
ips = re.findall(r'[0-9]+(?:\.[0-9]+){3}', pagina)
print(ips[0])
try:
socket.inet_aton(ips[0])
except TypeError:
print("type")
logging.error("busca_ip() type " + ips[0])
exit(1)
except socket.error:
print("sock")
logging.error("busca_ip() sock " + ips[0])
exit(1)
if consulta(ips[0]):
pass
else:
actualiza_ip()
def descarga():
global pagina
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='py_dns.log',
level=logging.ERROR)
try:
#html = urllib.request.urlopen("http://www.see-my-ip.com/")
html = urllib.request.urlopen("http://checkip.dyndns.org/")
pagina = html.read().decode("latin1", 'ignore')
except urllib.error.URLError as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except socket.error as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except socket.timeout as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except UnicodeEncodeError as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except http.client.BadStatusLine as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except http.client.IncompleteRead as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
except urllib.error.HTTPError as e:
print(e);
logging.error("descarga() " + e)
pagina = ''
if len(pagina) > 0:
print(pagina)
else:
logging.error("descarga() len(pagina) = 0")
exit(1)
if __name__ == "__main__":
if sys.version_info < (3, 0, 0):
sys.stderr.write("You need python 3.0 or later to run this script\n")
exit(1)
descarga()
busca_ip()
| bsd-3-clause |
skycucumber/Messaging-Gateway | src/Command/ModifySuperbox.py | 1 | 2300 | '''
Created on 2013-8-12
@author: E525649
'''
from BaseCommand import CBaseCommand
from sqlalchemy.exc import SQLAlchemyError
from DB import SBDB,SBDB_ORM
from Command import BaseCommand
from sqlalchemy import and_
import logging
class CModifySuperbox(CBaseCommand):
'''
classdocs
'''
command_id=0x00040007
def __init__(self,data=None,protocol=None):
'''
Constructor
'''
CBaseCommand.__init__(self, data, protocol)
def Run(self):
with self.protocol.lockCmd:
if not self.Authorized():
self.SendUnauthorizedResp()
return
CBaseCommand.Run(self)
with SBDB.session_scope() as session :
apartment_id=self.body[BaseCommand.PN_APARTMENTID]
sb_id=self.body[BaseCommand.PN_SUPERBOXID]
sb_code=self.body.get(BaseCommand.PN_SB_CODE,None)
name=self.body.get(BaseCommand.PN_NAME,None)
respond=self.GetResp()
if sb_id is None:
respond.SetErrorCode(BaseCommand.CS_PARAMLACK)
else:
try:
apartment=None
if sb_code is None:
apartment=SBDB.IncreaseVersion(session, apartment_id)
else:
apartment=SBDB.IncreaseVersions(session, 0,apartment_id)
superbox,apartment_superbox=session.query(SBDB_ORM.Superbox,SBDB_ORM.Apartment_Superbox).join(SBDB_ORM.Apartment_Superbox).filter(and_(SBDB_ORM.Superbox.id==sb_id,SBDB_ORM.Apartment_Superbox.apartment_id==apartment_id)).first()
if name is not None: apartment_superbox.name=name
if sb_code is not None: superbox.uni_code=sb_code
respond.body[BaseCommand.PN_VERSION]=apartment.version
session.commit()
except SQLAlchemyError,e:
respond.SetErrorCode(BaseCommand.CS_DBEXCEPTION)
logging.error("transport %d:%s",id(self.protocol.transport),e)
session.rollback()
respond.Send()
| gpl-2.0 |
Danisan/odoo-1 | addons/l10n_br/__openerp__.py | 430 | 3125 | # -*- encoding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2009 Renato Lima - Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Brazilian - Accounting',
'category': 'Localization/Account Charts',
'description': """
Base module for the Brazilian localization
==========================================
This module consists in:
- Generic Brazilian chart of accounts
- Brazilian taxes such as:
- IPI
- ICMS
- PIS
- COFINS
- ISS
- IR
- IRPJ
- CSLL
The field tax_discount has also been added in the account.tax.template and
account.tax objects to allow the proper computation of some Brazilian VATs
such as ICMS. The chart of account creation wizard has been extended to
propagate those new data properly.
It's important to note however that this module lack many implementations to
use OpenERP properly in Brazil. Those implementations (such as the electronic
fiscal Invoicing which is already operational) are brought by more than 15
additional modules of the Brazilian Launchpad localization project
https://launchpad.net/openerp.pt-br-localiz and their dependencies in the
extra addons branch. Those modules aim at not breaking with the remarkable
OpenERP modularity, this is why they are numerous but small. One of the
reasons for maintaining those modules apart is that Brazilian Localization
leaders need commit rights agility to complete the localization as companies
fund the remaining legal requirements (such as soon fiscal ledgers,
accounting SPED, fiscal SPED and PAF ECF that are still missing as September
2011). Those modules are also strictly licensed under AGPL V3 and today don't
come with any additional paid permission for online use of 'private modules'.
""",
'license': 'AGPL-3',
'author': 'Akretion, OpenERP Brasil',
'website': 'http://openerpbrasil.org',
'version': '0.6',
'depends': ['account','account_chart'],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account_tax_code_template.xml',
'data/account_chart_template.xml',
'data/account_tax_template.xml',
'account_view.xml',
'l10n_br_wizard.xml',
],
'installable': True,
}
| agpl-3.0 |
40223136/w17test2 | static/Brython3.1.1-20150328-091302/Lib/locale.py | 624 | 1918 | def getdefaultlocale():
return __BRYTHON__.language,None
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
return None, None
| gpl-3.0 |
tensorflow/tpu | models/official/detection/utils/object_detection/shape_utils.py | 2 | 3708 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow.compat.v1 as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
| apache-2.0 |
newrocknj/horizon | tools/install_venv_common.py | 166 | 5958 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
| apache-2.0 |
RealVNC/Android-kernel-mako-NCM | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
mwcraig/conda-build | conda_build/scripts.py | 2 | 1842 | '''
Module for creating entry points and scripts for PyPI packages.
'''
from __future__ import absolute_import, division, print_function
import re
import os
import sys
import shutil
from os.path import dirname, isdir, join
import conda.config as cc
from conda_build.config import config
PY_TMPL = """\
if __name__ == '__main__':
import sys
from %s import %s
sys.exit(%s())
"""
bin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'
entry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):(\w+)\s*$')
def iter_entry_points(items):
for item in items:
m = entry_pat.match(item)
if m is None:
sys.exit("Error cound not match entry point: %r" % item)
yield m.groups()
def create_entry_point(path, module, func):
pyscript = PY_TMPL % (module, func, func)
if sys.platform == 'win32':
with open(path + '-script.py', 'w') as fo:
fo.write(pyscript)
shutil.copyfile(join(dirname(__file__), 'cli-%d.exe' % cc.bits),
path + '.exe')
else:
with open(path, 'w') as fo:
fo.write('#!%s\n' % config.build_python)
fo.write(pyscript)
os.chmod(path, int('755', 8))
def create_entry_points(items):
if not items:
return
bin_dir = join(config.build_prefix, bin_dirname)
if not isdir(bin_dir):
os.mkdir(bin_dir)
for cmd, module, func in iter_entry_points(items):
create_entry_point(join(bin_dir, cmd), module, func)
def prepend_bin_path(env, prefix, prepend_prefix=False):
env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']
if sys.platform == "win32":
env['PATH'] = join(prefix, "Library", "bin") + os.pathsep + env['PATH']
if prepend_prefix:
env['PATH'] = prefix + os.pathsep + env['PATH']
return env
| bsd-3-clause |
Cojacfar/Maker | comm/lib/python2.7/site-packages/pip/_vendor/distlib/util.py | 163 | 49824 | #
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__VENV_LAUNCHER__'
in os.environ):
result = os.environ['__VENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
result = os.path.join(result, suffix)
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return result
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
| gpl-2.0 |
laszlocsomor/tensorflow | tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py | 4 | 26267 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class DatasetConstructorTest(test.TestCase):
def testTensorDataset(self):
"""Test an dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDataset(self):
"""Test an dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
for i in range(4):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDatasetWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtypes.int32, iterator.output_types["foo"])
self.assertEqual(dtypes.float32, iterator.output_types["bar"])
self.assertEqual((), iterator.output_shapes["foo"])
self.assertEqual((1,), iterator.output_shapes["bar"])
with self.test_session() as sess:
sess.run(init_op)
for i in range(3):
results = sess.run(get_next)
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseTensorSliceDataset(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.test_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# pylint: disable=g-long-lambda,unnecessary-lambda
def testNestedStructure(self):
components = (np.array([1, 2, 3]), (np.array([4., 5.]), np.array([6., 7.])),
np.array([8, 9, 10]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.shuffle(10, 10)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.repeat(-1)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.filter(lambda x, y, z: True)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.take(5)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),
(y[0], y[1])))
)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.batch(32)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(dataset.output_shapes, [
s.as_list()
for s in nest.flatten(dataset.output_shapes)
]))
iterator = dataset.make_one_shot_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
iterator = dataset.make_initializable_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3]), (np.array(
[4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12]))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([], ([], []), []), dataset.output_shapes)
def testNestedDict(self):
components = {"a": {"aa": 1, "ab": [2.0, 2.0]}, "b": [3, 3, 3]}
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int32, dataset.output_types["a"]["aa"])
self.assertEquals(dtypes.float32, dataset.output_types["a"]["ab"])
self.assertEquals(dtypes.int32, dataset.output_types["b"])
self.assertEquals([], dataset.output_shapes["a"]["aa"])
self.assertEquals([2], dataset.output_shapes["a"]["ab"])
self.assertEquals([3], dataset.output_shapes["b"])
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3])
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([2, 3], dataset.output_shapes)
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
self.assertEquals(dtypes.int64, get_next.dtype)
self.assertEquals([3], get_next.shape)
def _testFromGenerator(self, generator, elem_sequence, num_repeats):
iterator = (
dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)
.repeat(num_repeats)
.prefetch(5)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
for _ in range(2): # Run twice to test reinitialization.
sess.run(init_op)
for _ in range(num_repeats):
for elem in elem_sequence:
self.assertAllEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def _testFromGeneratorOneShot(self, generator, elem_sequence, num_repeats):
iterator = (
dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)
.repeat(num_repeats)
.prefetch(5)
.make_one_shot_iterator())
get_next = iterator.get_next()
with self.test_session() as sess:
for _ in range(num_repeats):
for elem in elem_sequence:
self.assertAllEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorUsingFunction(self):
def generator():
for i in range(1, 100):
yield [i] * i
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
self._testFromGeneratorOneShot(generator, elem_sequence, 1)
self._testFromGeneratorOneShot(generator, elem_sequence, 5)
def testFromGeneratorUsingList(self):
generator = lambda: [[i] * i for i in range(1, 100)]
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromGeneratorUsingNdarray(self):
generator = lambda: np.arange(100, dtype=np.int64)
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromGeneratorUsingGeneratorExpression(self):
# NOTE(mrry): Generator *expressions* are not repeatable (or in
# general reusable), because they eagerly evaluate the `for`
# expression as `iter(range(1, 100))` and discard the means of
# reconstructing `range(1, 100)`. Wrapping the generator
# expression in a `lambda` makes it repeatable.
generator = lambda: ([i] * i for i in range(1, 100))
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromMultipleConcurrentGenerators(self):
num_inner_repeats = 5
num_outer_repeats = 100
def generator():
for i in range(1, 10):
yield ([i] * i, [i, i ** 2, i ** 3])
input_list = list(generator())
# The interleave transformation is essentially a flat map that
# draws from multiple input datasets concurrently (in a cyclic
# fashion). By placing `Datsaet.from_generator()` inside an
# interleave, we test its behavior when multiple iterators are
# active at the same time; by additionally prefetching inside the
# interleave, we create the possibility of parallel (modulo GIL)
# invocations to several iterators created by the same dataset.
def interleave_fn(_):
return (dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64, dtypes.int64),
output_shapes=([None], [3]))
.repeat(num_inner_repeats).prefetch(5))
iterator = (
dataset_ops.Dataset.range(num_outer_repeats)
.interleave(interleave_fn, cycle_length=10,
block_length=len(input_list))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(num_inner_repeats * num_outer_repeats):
for elem in input_list:
val0, val1 = sess.run(get_next)
self.assertAllEqual(elem[0], val0)
self.assertAllEqual(elem[1], val1)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorsRunningInParallel(self):
num_parallel_iterators = 3
# Define shared state that multiple iterator instances will access to
# demonstrate their concurrent activity.
lock = threading.Lock()
condition = threading.Condition(lock)
next_ticket = [0] # GUARDED_BY(lock)
def generator():
# NOTE(mrry): We yield one element before the barrier, because
# the current implementation of `Dataset.interleave()` must
# fetch one element from each incoming dataset to start the
# prefetching.
yield 0
# Define a barrier that `num_parallel_iterators` iterators must enter
# before any can proceed. Demonstrates that multiple iterators may be
# active at the same time.
condition.acquire()
ticket = next_ticket[0]
next_ticket[0] += 1
if ticket == num_parallel_iterators - 1:
# The last iterator to join the barrier notifies the others.
condition.notify_all()
else:
# Wait until the last iterator enters the barrier.
while next_ticket[0] < num_parallel_iterators:
condition.wait()
condition.release()
yield 1
# As in `testFromMultipleConcurrentGenerators()`, we use a combination of
# `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple
# iterators to be active concurrently.
def interleave_fn(_):
return dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2)
iterator = (
dataset_ops.Dataset.range(num_parallel_iterators)
.interleave(
interleave_fn, cycle_length=num_parallel_iterators, block_length=1)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for elem in [0, 1]:
for _ in range(num_parallel_iterators):
self.assertAllEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorImplicitConversion(self):
def generator():
yield [1]
yield [2]
yield [3]
for dtype in [dtypes.int8, dtypes.int32, dtypes.int64]:
iterator = (dataset_ops.Dataset.from_generator(
generator, output_types=dtype, output_shapes=[1])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtype, get_next.dtype)
with self.test_session() as sess:
sess.run(init_op)
for expected in [[1], [2], [3]]:
next_val = sess.run(get_next)
self.assertEqual(dtype.as_numpy_dtype, next_val.dtype)
self.assertAllEqual(expected, next_val)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorTypeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield "ERROR"
yield np.array([7, 8, 9], dtype=np.int64)
iterator = (dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
self.assertAllEqual([4, 5, 6], sess.run(get_next))
with self.assertRaisesOpError(r"invalid literal for long\(\)"):
sess.run(get_next)
self.assertAllEqual([7, 8, 9], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorShapeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield np.array([7, 8, 9, 10], dtype=np.int64)
yield np.array([11, 12, 13], dtype=np.int64)
iterator = (dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
self.assertAllEqual([4, 5, 6], sess.run(get_next))
with self.assertRaisesOpError(r"element of shape \(3,\) was expected"):
sess.run(get_next)
self.assertAllEqual([11, 12, 13], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSplitPipelineFailsWithPlacementError(self):
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
dataset = dataset_ops.Dataset.from_tensors(0)
# Define a pipeline that attempts to use variables on two
# different devices.
#
# Initialize the variables before creating to iterator, to avoid the
# placement algorithm overriding the DT_RESOURCE colocation constraints.
with ops.device("/cpu:0"):
var_0 = resource_variable_ops.ResourceVariable(initial_value=0)
dataset = dataset.map(lambda x: x + var_0.read_value())
sess.run(var_0.initializer)
with ops.device("/cpu:1"):
var_1 = resource_variable_ops.ResourceVariable(initial_value=0)
dataset = dataset.map(lambda x: x + var_1.read_value())
sess.run(var_1.initializer)
iterator = dataset.make_initializable_iterator()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to access resource located in device"):
sess.run(iterator.initializer)
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, new.output_types)
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
class DatasetConstructorSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_tensor_dataset(self, variable_array):
components = (variable_array, np.array([1, 2, 3]), np.array(37.0))
return dataset_ops.Dataset.from_tensors(components)
def testFromTensorsCore(self):
# Equal length components
arr = np.array(1)
num_outputs = 1
diff_arr = np.array(2)
self.run_core_tests(lambda: self._build_tensor_dataset(arr),
lambda: self._build_tensor_dataset(diff_arr),
num_outputs)
def _build_tensor_slices_dataset(self, components):
return dataset_ops.Dataset.from_tensor_slices(components)
def testFromTensorSlicesCore(self):
# Equal length components
components = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0]))
diff_comp = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[5], [6], [7], [8]]), 22),
np.array([1.0, 2.0, 3.0, 4.0]))
dict_components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
self.run_core_tests(lambda: self._build_tensor_slices_dataset(components),
lambda: self._build_tensor_slices_dataset(diff_comp), 4)
self.run_core_tests(
lambda: self._build_tensor_slices_dataset(dict_components), None, 3)
def _build_sparse_tensor_slice_dataset(self, slices):
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
def testFromSparseTensorSlicesCore(self):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
diff_slices = [[1., 2.], [2.], [2., 3., 4.], [], [], []]
self.run_core_tests(
lambda: self._build_sparse_tensor_slice_dataset(slices),
lambda: self._build_sparse_tensor_slice_dataset(diff_slices),
9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ee08b397/LeetCode-4 | 218 The Skyline Problem.py | 2 | 4282 | # -*- coding: utf-8 -*-
"""
A city's skyline is the outer contour of the silhouette formed by all the buildings in that city when viewed from a
distance. Now suppose you are given the locations and height of all the buildings as shown on a cityscape photo
(Figure A), write a program to output the skyline formed by these buildings collectively (Figure B).
Buildings Skyline Contour
The geometric information of each building is represented by a triplet of integers [Li, Ri, Hi], where Li and Ri are the
x coordinates of the left and right edge of the ith building, respectively, and Hi is its height. It is guaranteed that
0 ≤ Li, Ri ≤ INT_MAX, 0 < Hi ≤ INT_MAX, and Ri - Li > 0. You may assume all buildings are perfect rectangles grounded on
an absolutely flat surface at height 0.
For instance, the dimensions of all buildings in Figure A are recorded as: [ [2 9 10], [3 7 15], [5 12 12], [15 20 10],
[19 24 8] ] .
The output is a list of "key points" (red dots in Figure B) in the format of [ [x1,y1], [x2, y2], [x3, y3], ... ] that
uniquely defines a skyline. A key point is the left endpoint of a horizontal line segment. Note that the last key point,
where the rightmost building ends, is merely used to mark the termination of the skyline, and always has zero height.
Also, the ground in between any two adjacent buildings should be considered part of the skyline contour.
For instance, the skyline in Figure B should be represented as:[ [2 10], [3 15], [7 12], [12 0], [15 10], [20 8], [24,
0] ].
Notes:
The number of buildings in any input list is guaranteed to be in the range [0, 10000].
The input list is already sorted in ascending order by the left x position Li.
The output list must be sorted by the x position.
There must be no consecutive horizontal lines of equal height in the output skyline. For instance, [...[2 3], [4 5],
[7 5], [11 5], [12 7]...] is not acceptable; the three lines of height 5 should be merged into one in the final output
as such: [...[2 3], [4 5], [12 7], ...]
"""
__author__ = 'Daniel'
from collections import defaultdict, namedtuple
import heapq
class Building(object):
def __init__(self, h):
self.h = h
self.deleted = False # lazy deletion
def __cmp__(self, other):
# Reverse order by height to get max-heap
assert isinstance(other, Building)
return other.h - self.h
# An event represents the buildings that start and end at a particular
# x-coordinate.
Event = namedtuple('Event', 'starts ends')
class Solution:
def getSkyline(self, buildings):
"""
Sweep line
Treat a building as entering line and leaving line
:type buildings: list[list[int]]
:rtype: list[list[int]]
"""
# Map from x-coordinate to event.
events = defaultdict(lambda: Event(starts=[], ends=[]))
for left, right, height in buildings:
building = Building(height)
events[left].starts.append(building) # possible multiple building at the same x-coordinate.
events[right].ends.append(building)
cur_heap = [] # Heap of buildings currently standing.
cur_max_h = 0 # current max height of standing buildings.
ret = []
# Process events in order by x-coordinate.
for x, event in sorted(events.items()): # sort the dictionary by key
for building in event.starts:
heapq.heappush(cur_heap, building)
for building in event.ends:
building.deleted = True
# Pop any finished buildings from the top of the heap.
# To avoid using multiset - lazy deletion.
while cur_heap and cur_heap[0].deleted:
heapq.heappop(cur_heap)
# Top of heap (if any) is the highest standing building, so
# its height is the current height of the skyline.
new_h = cur_heap[0].h if cur_heap else 0
if new_h != cur_max_h:
cur_max_h = new_h
ret.append([x, cur_max_h])
return ret
if __name__ == "__main__":
assert Solution().getSkyline([[2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8]]) == \
[[2, 10], [3, 15], [7, 12], [12, 0], [15, 10], [20, 8], [24, 0]] | mit |
dltn/tortellini | server-firmware/Processing/DataPusher/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py | 36 | 4720 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class ClientCredentialsGrant(GrantTypeBase):
"""`Client Credentials Grant`_
The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner that have been previously
arranged with the authorization server (the method of which is beyond
the scope of this specification).
The client credentials grant type MUST only be used by confidential
clients::
+---------+ +---------------+
: : : :
: :>-- A - Client Authentication --->: Authorization :
: Client : : Server :
: :<-- B ---- Access Token ---------<: :
: : : :
+---------+ +---------------+
Figure 6: Client Credentials Flow
The flow illustrated in Figure 6 includes the following steps:
(A) The client authenticates with the authorization server and
requests an access token from the token endpoint.
(B) The authorization server authenticates the client, and if valid,
issues an access token.
.. _`Client Credentials Grant`: http://tools.ietf.org/html/rfc6749#section-4.4
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_token_response(self, request, token_handler):
"""Return token or error in JSON format.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request. %s.', e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=False)
log.debug('Issuing token to client id %r (%r), %r.',
request.client_id, request.client, token)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
if not getattr(request, 'grant_type'):
raise errors.InvalidRequestError('Request is missing grant type.',
request=request)
if not request.grant_type == 'client_credentials':
raise errors.UnsupportedGrantTypeError(request=request)
for param in ('grant_type', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
request=request)
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
else:
if not hasattr(request.client, 'client_id'):
raise NotImplementedError('Authenticate client must set the '
'request.client.client_id attribute '
'in authenticate_client.')
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
log.debug('Authorizing access to user %r.', request.user)
request.client_id = request.client_id or request.client.client_id
self.validate_scopes(request)
| mit |
gibxxi/nzbToMedia | libs/mutagen/ogg.py | 10 | 19096 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from mutagen import FileType
from mutagen._util import cdata, resize_bytes, MutagenError
from ._compat import cBytesIO, reraise, chr_, izip, xrange
class error(IOError, MutagenError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
* version -- stream structure version (currently always 0)
* position -- absolute stream position (default -1)
* serial -- logical stream serial number (default 0)
* sequence -- page sequence number within logical stream (default 0)
* offset -- offset this page was read from (default None)
* complete -- if the last packet on this page is complete (default True)
* packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags,
self.position, self.serial, self.sequence,
crc, segments) = struct.unpack("<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != b"OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, b"OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in bytearray(lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = [fileobj.read(l) for l in lacings]
if [len(p) for p in self.packets] != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append(b"\xff" * quot + chr_(rem))
lacing_data = b"".join(lacing_data)
if not self.complete and lacing_data.endswith(b"\x00"):
lacing_data = lacing_data[:-1]
data.append(chr_(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = b"".join(data)
# Python's CRC is swapped relative to Ogg's needs.
# crc32 returns uint prior to py2.6 on some platforms, so force uint
crc = (~zlib.crc32(data.translate(cdata.bitswap), -1)) & 0xffffffff
# Although we're using to_uint_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_uint_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
@property
def size(self):
"""Total frame size."""
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
def __set_flag(self, bit, val):
mask = 1 << bit
if val:
self.__type_flags |= mask
else:
self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
@staticmethod
def renumber(fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try:
page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
@staticmethod
def to_packets(pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append([b""])
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else:
sequence += 1
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets]
@classmethod
def _from_packets_try_preserve(cls, packets, old_pages):
"""Like from_packets but in case the size and number of the packets
is the same as in the given pages the layout of the pages will
be copied (the page size and number will match).
If the packets don't match this behaves like::
OggPage.from_packets(packets, sequence=old_pages[0].sequence)
"""
old_packets = cls.to_packets(old_pages)
if [len(p) for p in packets] != [len(p) for p in old_packets]:
# doesn't match, fall back
return cls.from_packets(packets, old_pages[0].sequence)
new_data = b"".join(packets)
new_pages = []
for old in old_pages:
new = OggPage()
new.sequence = old.sequence
new.complete = old.complete
new.continued = old.continued
new.position = old.position
for p in old.packets:
data, new_data = new_data[:len(p)], new_data[len(p):]
new.packets.append(data)
new_pages.append(new)
assert not new_data
return new_pages
@staticmethod
def from_packets(packets, sequence=0, default_size=4096,
wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append(b"")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = b""
if page.packets:
pages.append(page)
return pages
@classmethod
def replace(cls, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
if not len(old_pages) or not len(new_pages):
raise ValueError("empty pages list not allowed")
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in izip(new_pages,
xrange(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1
new_data = [cls.write(p) for p in new_pages]
# Add dummy data or merge the remaining data together so multiple
# new pages replace an old one
pages_diff = len(old_pages) - len(new_data)
if pages_diff > 0:
new_data.extend([b""] * pages_diff)
elif pages_diff < 0:
new_data[pages_diff - 1:] = [b"".join(new_data[pages_diff - 1:])]
# Replace pages one by one. If the sizes match no resize happens.
offset_adjust = 0
new_data_end = None
assert len(old_pages) == len(new_data)
for old_page, data in izip(old_pages, new_data):
offset = old_page.offset + offset_adjust
data_size = len(data)
resize_bytes(fileobj, old_page.size, data_size, offset)
fileobj.seek(offset, 0)
fileobj.write(data)
new_data_end = offset + data_size
offset_adjust += (data_size - old_page.size)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
cls.renumber(fileobj, serial, sequence)
@staticmethod
def find_last(fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try:
fileobj.seek(-256 * 256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
with open(filename, "rb") as fileobj:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
self.info._post_tags(fileobj)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
# TODO: we should delegate the deletion to the subclass and not through
# _inject.
with open(filename, "rb+") as fileobj:
try:
self.tags._inject(fileobj, lambda x: 0)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
def add_tags(self):
raise self._Error
def save(self, filename=None, padding=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = open(filename, "rb+")
try:
try:
self.tags._inject(fileobj, padding)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
finally:
fileobj.close()
| gpl-3.0 |
40223125/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/xml/dom/__init__.py | 873 | 4019 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
| gpl-3.0 |
cchang738/parquet-mr | dev/merge_parquet_pr.py | 7 | 13553 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Parquet git folder and that you
# have added remotes corresponding to both (i) the github apache Parquet
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import tempfile
import urllib2
import getpass
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Parquet git development area
PARQUET_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = PARQUET_HOME.rsplit("/", 1)[1]
print "PARQUET_HOME = " + PARQUET_HOME
print "PROJECT_NAME = " + PROJECT_NAME
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD")
GITHUB_BASE = "https://github.com/apache/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/apache/" + PROJECT_NAME
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(PARQUET_HOME)
def get_json(url):
try:
return json.load(urllib2.urlopen(url))
except urllib2.HTTPError as e:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
try:
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print 'Command failed: %s' % cmd
print 'With output:'
print '--------------'
print e.output
print '--------------'
raise e
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body != None:
merge_message_flags += ["-m", body]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
run_cmd("git cherry-pick -sx %s" % merge_hash)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def exctract_jira_id(title):
m = re.search(r'^(PARQUET-[0-9]+)\b.*$', title)
if m and m.groups > 0:
return m.group(1)
else:
fail("PR title should be prefixed by a jira id \"PARQUET-XXX: ...\", found: \"%s\"" % title)
def check_jira(title):
jira_id = exctract_jira_id(title)
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
def resolve_jira(title, merge_branches, comment):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
default_jira_id = exctract_jira_id(title)
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("PARQUET")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions=jira_fix_versions, comment=comment)
print "Succesfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
if not JIRA_USERNAME:
JIRA_USERNAME = raw_input("Env JIRA_USERNAME not set, please enter your JIRA username:")
if not JIRA_PASSWORD:
JIRA_PASSWORD = getpass.getpass("Env JIRA_PASSWORD not set, please enter your JIRA password:")
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
# Julien: I commented this out as we don't have any "branch-*" branch yet
#latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
check_jira(title)
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print "Found: %s" % message
maybe_cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
continue_maybe("Would you like to update the associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira(title, merged_refs, jira_comment)
else:
print "Could not find jira-python library. Run 'sudo pip install jira-python' to install."
print "Exiting without trying to close the associated JIRA."
| apache-2.0 |
JianyuWang/nova | nova/tests/functional/api_sample_tests/test_remote_consoles.py | 20 | 5351 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-remote-consoles"
def _get_flags(self):
f = super(ConsolesSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.consoles.Consoles')
return f
def setUp(self):
super(ConsolesSampleJsonTests, self).setUp()
self.flags(enabled=True, group='vnc')
self.flags(enabled=True, group='spice')
self.flags(enabled=True, group='rdp')
self.flags(enabled=True, group='serial_console')
def test_get_vnc_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-vnc-console-post-req',
{'action': 'os-getVNCConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-vnc-console-post-resp', subs, response, 200)
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-spice-console-post-req',
{'action': 'os-getSPICEConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-spice-console-post-resp', subs,
response, 200)
def test_get_rdp_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-rdp-console-post-req',
{'action': 'os-getRDPConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-rdp-console-post-resp', subs,
response, 200)
def test_get_serial_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-serial-console-post-req',
{'action': 'os-getSerialConsole'})
subs = self._get_regexes()
subs["url"] = \
"((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-serial-console-post-resp', subs,
response, 200)
class ConsolesV26SampleJsonTests(test_servers.ServersSampleBase):
request_api_version = '2.6'
extension_name = "os-remote-consoles"
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.6 which will run the original tests
# by appending '(v2_6)' in test_id.
scenarios = [('v2_6', {})]
def setUp(self):
super(ConsolesV26SampleJsonTests, self).setUp()
self.http_regex = "(https?://)([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*"
def test_create_console(self):
uuid = self._post_server()
body = {'protocol': 'vnc', 'type': 'novnc'}
response = self._do_post('servers/%s/remote-consoles' % uuid,
'create-vnc-console-req', body,
api_version='2.6')
subs = self._get_regexes()
subs["url"] = self.http_regex
self._verify_response('create-vnc-console-resp', subs, response, 200)
class ConsolesV28SampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-remote-consoles"
request_api_version = '2.8'
scenarios = [('v2_8', {})]
_api_version = 'v2'
def setUp(self):
super(ConsolesV28SampleJsonTests, self).setUp()
self.http_regex = "(https?://)([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*"
self.flags(enabled=True, group='mks')
def test_create_mks_console(self):
uuid = self._post_server()
body = {'protocol': 'mks', 'type': 'webmks'}
response = self._do_post('servers/%s/remote-consoles' % uuid,
'create-mks-console-req', body,
api_version='2.8')
subs = self._get_regexes()
subs["url"] = self.http_regex
self._verify_response('create-mks-console-resp', subs, response, 200)
| apache-2.0 |
rtrigoso/ghost-somepolymath | node_modules/pygmentize-bundled/vendor/pygments/external/markdown-processor.py | 118 | 1946 | # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
from markdown import Markdown
md = Markdown()
md.textPreprocessors.insert(0, CodeBlockPreprocessor())
html = md.convert(someText)
markdown is then a callable that can be passed to the context of
a template and used in that template, for example.
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown import TextPreprocessor
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(TextPreprocessor):
pattern = re.compile(
r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
return self.pattern.sub(
repl, lines)
| mit |
ywcui1990/nupic | examples/opf/experiments/multistep/first_order_0/description.py | 10 | 1657 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/first_order_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0, 'steps': '1,2,3'},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmParams': { }},
'predictionSteps': [1, 2, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
kangfend/django | scripts/manage_translations.py | 277 | 7141 | #!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from argparse import ArgumentParser
from subprocess import PIPE, Popen, call
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django.core"
else:
return "django.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep '^[-+]msgid' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip())
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
if resources is not None:
print("`update_catalogs` will always process all resources.")
contrib_dirs = _get_locale_dirs(None, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating en catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'])
print("Updating en JS catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'], domain='djangojs')
# Output changed stats
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
for name, dir_ in contrib_dirs:
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and lang not in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
target_langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en'])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
target_langs = languages
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in target_langs:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat --no-location -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = ArgumentParser()
parser.add_argument('cmd', nargs=1)
parser.add_argument("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_argument("-l", "--languages", action='append',
help="limit operation to the specified languages")
options = parser.parse_args()
if options.cmd[0] in RUNABLE_SCRIPTS:
eval(options.cmd[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
| bsd-3-clause |
plotly/python-api | packages/python/plotly/plotly/validators/funnel/hoverlabel/_font.py | 2 | 1858 | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="funnel.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
| mit |
masneyb/cavedbmanager | cavedb/urls.py | 1 | 2912 | # SPDX-License-Identifier: Apache-2.0
from django.conf.urls import url, include
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.conf import settings
import cavedb.views
#pylint: disable=unused-argument
def forward_to_admin(request):
return HttpResponseRedirect('/admin/')
admin.site.site_header = settings.ADMIN_SITE_HEADER
admin.autodiscover()
#pylint: disable=line-too-long
urlpatterns = [
url(r'^$', forward_to_admin),
url(r'^admin/', include(admin.site.urls)),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/generate$',
cavedb.views.generate_bulletin),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/map/(?P<map_name>[\w\d\._-]+)$',
cavedb.views.show_all_regions_gis_map),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/region/(?P<region_id>\d+)/map/(?P<map_name>[\w\d\._-]+)$',
cavedb.views.show_region_gis_map),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/pdf$', cavedb.views.show_pdf),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/color_pdf$', cavedb.views.show_color_pdf),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/draft_pdf$', cavedb.views.show_draft_pdf),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/todo$', cavedb.views.show_todo_txt),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/kml$', cavedb.views.show_kml),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/text$', cavedb.views.show_text),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/gpx$', cavedb.views.show_gpx),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/csv$', cavedb.views.show_csv),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/mxf$', cavedb.views.show_mxf),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/shp$', cavedb.views.show_shp),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/dvd$', cavedb.views.show_dvd),
url(r'^cavedb/bulletin/(?P<bulletin_id>\d+)/log$', cavedb.views.show_log),
url(r'^cavedb/statewide_docs/(?P<doc_type>\d+)/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_statewide_doc),
url(r'^cavedb/bulletin_attachments/(?P<bulletin_id>\d+)/cover/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_bulletin_cover),
url(r'^cavedb/bulletin_attachments/(?P<bulletin_id>\d+)/attachments/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_bulletin_attachment),
url(r'^cavedb/bulletin_attachments/(?P<bulletin_id>\d+)/gis_lineplot/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_bulletin_gis_lineplot),
url(r'^cavedb/feature_attachments/(?P<feature_id>\d+)/photos/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_feature_photo),
url(r'^cavedb/feature_attachments/(?P<feature_id>\d+)/attachments/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_feature_attachment),
url(r'^cavedb/feature_attachments/(?P<feature_id>\d+)/gis_lineplot/(?P<filename>[\w\d\s\&\._-]+)$',
cavedb.views.show_feature_gis_lineplot),
]
| apache-2.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 303 | 9236 | from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| lgpl-3.0 |
agrista/odoo-saas | addons/account/partner.py | 5 | 16273 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
import time
from openerp.osv import fields, osv
from openerp import api
class account_fiscal_position(osv.osv):
_name = 'account.fiscal.position'
_description = 'Fiscal Position'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Fiscal Position', required=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a fiscal position without deleting it."),
'company_id': fields.many2one('res.company', 'Company'),
'account_ids': fields.one2many('account.fiscal.position.account', 'position_id', 'Account Mapping', copy=True),
'tax_ids': fields.one2many('account.fiscal.position.tax', 'position_id', 'Tax Mapping', copy=True),
'note': fields.text('Notes'),
'auto_apply': fields.boolean('Automatic', help="Apply automatically this fiscal position if the conditions match."),
'vat_required': fields.boolean('VAT required', help="Apply only if partner has a VAT number."),
'country_id': fields.many2one('res.country', 'Country', help="Apply when the shipping or invoicing country matches. Takes precedence over positions matching on a country group."),
'country_group_id': fields.many2one('res.country.group', 'Country Group', help="Apply when the shipping or invoicing country is in this country group, and no position matches the country directly."),
}
_defaults = {
'active': True,
}
def _check_country(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.country_id and obj.country_group_id:
return False
return True
_constraints = [
(_check_country, 'You can not select a country and a group of countries', ['country_id', 'country_group_id']),
]
@api.v7
def map_tax(self, cr, uid, fposition_id, taxes, context=None):
if not taxes:
return []
if not fposition_id:
return map(lambda x: x.id, taxes)
result = set()
for t in taxes:
ok = False
for tax in fposition_id.tax_ids:
if tax.tax_src_id.id == t.id:
if tax.tax_dest_id:
result.add(tax.tax_dest_id.id)
ok=True
if not ok:
result.add(t.id)
return list(result)
@api.v8 # noqa
def map_tax(self, taxes):
result = self.env['account.tax'].browse()
for tax in taxes:
tax_count = 0
for t in self.tax_ids:
if t.tax_src_id == tax:
tax_count += 1
if t.tax_dest_id:
result |= t.tax_dest_id
if not tax_count:
result |= tax
return result
@api.v7
def map_account(self, cr, uid, fposition_id, account_id, context=None):
if not fposition_id:
return account_id
for pos in fposition_id.account_ids:
if pos.account_src_id.id == account_id:
account_id = pos.account_dest_id.id
break
return account_id
@api.v8
def map_account(self, account):
for pos in self.account_ids:
if pos.account_src_id == account:
return pos.account_dest_id
return account
def get_fiscal_position(self, cr, uid, company_id, partner_id, delivery_id=None, context=None):
if not partner_id:
return False
# This can be easily overriden to apply more complex fiscal rules
part_obj = self.pool['res.partner']
partner = part_obj.browse(cr, uid, partner_id, context=context)
# partner manually set fiscal position always win
if partner.property_account_position:
return partner.property_account_position.id
# if no delivery use invocing
if delivery_id:
delivery = part_obj.browse(cr, uid, delivery_id, context=context)
else:
delivery = partner
domain = [
('auto_apply', '=', True),
'|', ('vat_required', '=', False), ('vat_required', '=', partner.vat_subjected),
]
fiscal_position_ids = self.search(cr, uid, domain + [('country_id', '=', delivery.country_id.id)], context=context, limit=1)
if fiscal_position_ids:
return fiscal_position_ids[0]
fiscal_position_ids = self.search(cr, uid, domain + [('country_group_id.country_ids', '=', delivery.country_id.id)], context=context, limit=1)
if fiscal_position_ids:
return fiscal_position_ids[0]
fiscal_position_ids = self.search(cr, uid, domain + [('country_id', '=', None), ('country_group_id', '=', None)], context=context, limit=1)
if fiscal_position_ids:
return fiscal_position_ids[0]
return False
class account_fiscal_position_tax(osv.osv):
_name = 'account.fiscal.position.tax'
_description = 'Taxes Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position', 'Fiscal Position', required=True, ondelete='cascade'),
'tax_src_id': fields.many2one('account.tax', 'Tax Source', required=True),
'tax_dest_id': fields.many2one('account.tax', 'Replacement Tax')
}
_sql_constraints = [
('tax_src_dest_uniq',
'unique (position_id,tax_src_id,tax_dest_id)',
'A tax fiscal position could be defined only once time on same taxes.')
]
class account_fiscal_position_account(osv.osv):
_name = 'account.fiscal.position.account'
_description = 'Accounts Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position', 'Fiscal Position', required=True, ondelete='cascade'),
'account_src_id': fields.many2one('account.account', 'Account Source', domain=[('type','<>','view')], required=True),
'account_dest_id': fields.many2one('account.account', 'Account Destination', domain=[('type','<>','view')], required=True)
}
_sql_constraints = [
('account_src_dest_uniq',
'unique (position_id,account_src_id,account_dest_id)',
'An account fiscal position could be defined only once time on same accounts.')
]
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
_description = 'Partner'
def _credit_debit_get(self, cr, uid, ids, field_names, arg, context=None):
ctx = context.copy()
ctx['all_fiscalyear'] = True
query = self.pool.get('account.move.line')._query_get(cr, uid, context=ctx)
cr.execute("""SELECT l.partner_id, a.type, SUM(l.debit-l.credit)
FROM account_move_line l
LEFT JOIN account_account a ON (l.account_id=a.id)
WHERE a.type IN ('receivable','payable')
AND l.partner_id IN %s
AND l.reconcile_id IS NULL
AND """ + query + """
GROUP BY l.partner_id, a.type
""",
(tuple(ids),))
maps = {'receivable':'credit', 'payable':'debit' }
res = {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0)
for pid,type,val in cr.fetchall():
if val is None: val=0
res[pid][maps[type]] = (type=='receivable') and val or -val
return res
def _asset_difference_search(self, cr, uid, obj, name, type, args, context=None):
if not args:
return []
having_values = tuple(map(itemgetter(2), args))
where = ' AND '.join(
map(lambda x: '(SUM(bal2) %(operator)s %%s)' % {
'operator':x[1]},args))
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute(('SELECT pid AS partner_id, SUM(bal2) FROM ' \
'(SELECT CASE WHEN bal IS NOT NULL THEN bal ' \
'ELSE 0.0 END AS bal2, p.id as pid FROM ' \
'(SELECT (debit-credit) AS bal, partner_id ' \
'FROM account_move_line l ' \
'WHERE account_id IN ' \
'(SELECT id FROM account_account '\
'WHERE type=%s AND active) ' \
'AND reconcile_id IS NULL ' \
'AND '+query+') AS l ' \
'RIGHT JOIN res_partner p ' \
'ON p.id = partner_id ) AS pl ' \
'GROUP BY pid HAVING ' + where),
(type,) + having_values)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in',map(itemgetter(0), res))]
def _credit_search(self, cr, uid, obj, name, args, context=None):
return self._asset_difference_search(cr, uid, obj, name, 'receivable', args, context=context)
def _debit_search(self, cr, uid, obj, name, args, context=None):
return self._asset_difference_search(cr, uid, obj, name, 'payable', args, context=context)
def _invoice_total(self, cr, uid, ids, field_name, arg, context=None):
result = {}
account_invoice_report = self.pool.get('account.invoice.report')
for partner in self.browse(cr, uid, ids, context=context):
domain = [('partner_id', 'child_of', partner.id)]
invoice_ids = account_invoice_report.search(cr, uid, domain, context=context)
invoices = account_invoice_report.browse(cr, uid, invoice_ids, context=context)
result[partner.id] = sum(inv.user_currency_price_total for inv in invoices)
return result
def _journal_item_count(self, cr, uid, ids, field_name, arg, context=None):
MoveLine = self.pool('account.move.line')
AnalyticAccount = self.pool('account.analytic.account')
return {
partner_id: {
'journal_item_count': MoveLine.search_count(cr, uid, [('partner_id', '=', partner_id)], context=context),
'contracts_count': AnalyticAccount.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
}
for partner_id in ids
}
def has_something_to_reconcile(self, cr, uid, partner_id, context=None):
'''
at least a debit, a credit and a line older than the last reconciliation date of the partner
'''
cr.execute('''
SELECT l.partner_id, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND p.id = %s
AND l.reconcile_id IS NULL
AND (p.last_reconciliation_date IS NULL OR l.date > p.last_reconciliation_date)
AND l.state <> 'draft'
GROUP BY l.partner_id''', (partner_id,))
res = cr.dictfetchone()
if res:
return bool(res['debit'] and res['credit'])
return False
def mark_as_reconciled(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'last_reconciliation_date': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
_columns = {
'vat_subjected': fields.boolean('VAT Legal Statement', help="Check this box if the partner is subjected to the VAT. It will be used for the VAT legal statement."),
'credit': fields.function(_credit_debit_get,
fnct_search=_credit_search, string='Total Receivable', multi='dc', help="Total amount this customer owes you."),
'debit': fields.function(_credit_debit_get, fnct_search=_debit_search, string='Total Payable', multi='dc', help="Total amount you have to pay to this supplier."),
'debit_limit': fields.float('Payable Limit'),
'total_invoiced': fields.function(_invoice_total, string="Total Invoiced", type='float', groups='account.group_account_invoice'),
'contracts_count': fields.function(_journal_item_count, string="Contracts", type='integer', multi="invoice_journal"),
'journal_item_count': fields.function(_journal_item_count, string="Journal Items", type="integer", multi="invoice_journal"),
'property_account_payable': fields.property(
type='many2one',
relation='account.account',
string="Account Payable",
domain="[('type', '=', 'payable')]",
help="This account will be used instead of the default one as the payable account for the current partner",
required=True),
'property_account_receivable': fields.property(
type='many2one',
relation='account.account',
string="Account Receivable",
domain="[('type', '=', 'receivable')]",
help="This account will be used instead of the default one as the receivable account for the current partner",
required=True),
'property_account_position': fields.property(
type='many2one',
relation='account.fiscal.position',
string="Fiscal Position",
help="The fiscal position will determine taxes and accounts used for the partner.",
),
'property_payment_term': fields.property(
type='many2one',
relation='account.payment.term',
string ='Customer Payment Term',
help="This payment term will be used instead of the default one for sale orders and customer invoices"),
'property_supplier_payment_term': fields.property(
type='many2one',
relation='account.payment.term',
string ='Supplier Payment Term',
help="This payment term will be used instead of the default one for purchase orders and supplier invoices"),
'ref_companies': fields.one2many('res.company', 'partner_id',
'Companies that refers to partner'),
'last_reconciliation_date': fields.datetime(
'Latest Full Reconciliation Date', copy=False,
help='Date on which the partner accounting entries were fully reconciled last time. '
'It differs from the last date where a reconciliation has been made for this partner, '
'as here we depict the fact that nothing more was to be reconciled at this date. '
'This can be achieved in 2 different ways: either the last unreconciled debit/credit '
'entry of this partner was reconciled, either the user pressed the button '
'"Nothing more to reconcile" during the manual reconciliation process.')
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['debit_limit', 'property_account_payable', 'property_account_receivable', 'property_account_position',
'property_payment_term', 'property_supplier_payment_term', 'last_reconciliation_date']
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nirenzang/Serpent-Pyethereum-Tutorial | pyethereum/build/lib/tools/mk_ecpairing_tests.py | 2 | 6476 | from ethereum.tools import tester
from ethereum import opcodes
from ethereum.utils import int_to_big_endian, encode_int32, big_endian_to_int
from ethereum.tools import new_statetest_utils
import json
import py_pairing
from ethereum.opcodes import GPAIRINGBASE as GPB
from ethereum.opcodes import GPAIRINGPERPOINT as GPP
c = tester.Chain(env='metropolis')
c.head_state.gas_limit = 10**8
kode = """
h: bytes32
def foo(x: bytes <= 1920) -> bytes <= 32:
o = raw_call(0x0000000000000000000000000000000000000008, x, gas=99999999, outsize=32)
self.h = sha3(o)
return o
"""
x1 = c.contract(kode, language='viper')
# Generate a point on the G2 curve, but not in the correct subgroup
fake_point = None
FQ2_one = py_pairing.FQ2.one()
big_order = py_pairing.curve_order * (py_pairing.field_modulus * 2 - py_pairing.curve_order)
G1_zero = (py_pairing.FQ.one(), py_pairing.FQ.one(), py_pairing.FQ.zero())
G2_zero = (FQ2_one, FQ2_one, py_pairing.FQ2.zero())
for i in range(200):
x = py_pairing.FQ2([8, i])
ysquared = x ** 3 + py_pairing.b2
y = ysquared ** ((py_pairing.field_modulus ** 2 + 15) // 32)
if y ** 2 == ysquared:
assert py_pairing.multiply((x, y, FQ2_one), big_order) == G2_zero
assert py_pairing.multiply((x, y, FQ2_one), py_pairing.curve_order) != G2_zero
fake_point = (x, y, FQ2_one)
break
def mk_ecpairing_data(pts):
o = b''
for p, q in pts:
np, nq = py_pairing.normalize(p), py_pairing.normalize(q)
o += encode_int32(np[0].n) + encode_int32(np[1].n) + \
encode_int32(nq[0].coeffs[1]) + encode_int32(nq[0].coeffs[0]) + \
encode_int32(nq[1].coeffs[1]) + encode_int32(nq[1].coeffs[0])
return o
def perturb(inp, pos, by):
return inp[:pos] + encode_int32(big_endian_to_int(inp[pos: pos + 32]) + by) + inp[pos + 32:]
def intrinsic_gas_of_data(d):
return opcodes.GTXDATAZERO * d.count(0) + opcodes.GTXDATANONZERO * (len(d) - d.count(0))
def mk_test(encoded, execgas, expect):
pre = tester.mk_state_test_prefill(c)
try:
o = x1.foo(encoded, startgas=21000 + intrinsic_gas_of_data(x1.translator.encode('foo', [encoded])) + execgas)
except tester.TransactionFailed:
o = False
if o is False:
if expect != 'error':
raise Exception('OOG')
elif o == encode_int32(1):
if expect != 'yes':
raise Exception('False positive')
elif o == encode_int32(0):
if expect != 'no':
raise Exception('False negative')
else:
raise Exception("wtf: %r" % o)
o = tester.mk_state_test_postfill(c, pre)
o2 = tester.mk_state_test_postfill(c, pre, filler_mode=True)
assert new_statetest_utils.verify_state_test(o)
return o, o2
tests = []
G1, G2 = py_pairing.G1, py_pairing.G2
m = py_pairing.multiply
co = py_pairing.curve_order
fm = py_pairing.field_modulus
tests.append((b'', GPB - 1, 'error', 'empty_data_insufficient_gas'))
tests.append((b'', GPB + 30000, 'yes', 'empty_data'))
tests.append((mk_ecpairing_data([(G1, G2)]), GPB + 30000, 'error', 'one_point_insufficient_gas'))
tests.append((mk_ecpairing_data([(G1, G2)]), GPB + GPP + 30000, 'no', 'one_point_fail'))
tests.append((mk_ecpairing_data([(G1_zero, G2)]), GPB + GPP + 30000, 'yes', 'one_point_with_g1_zero'))
tests.append((mk_ecpairing_data([(G1, G2_zero)]), GPB + GPP + 30000, 'yes', 'one_point_with_g2_zero'))
tests.append((mk_ecpairing_data([(G1, G2)])[:191], GPB + GPP + 30000, 'error', 'bad_length_191'))
tests.append((mk_ecpairing_data([(G1, G2)]) + b'\x00', GPB + GPP + 30000, 'error', 'bad_length_193'))
tests.append((mk_ecpairing_data([(G1, G2), (G1, G2)]), GPB + GPP * 2 + 30000, 'no', 'two_point_fail_1'))
tests.append((mk_ecpairing_data([(G1, G2_zero), (G1, G2)]), GPB + GPP * 2 + 30000, 'no', 'two_points_with_one_g2_zero'))
tests.append((mk_ecpairing_data([(G1, G2), (m(G1, co - 1), G2)]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_1'))
tests.append((mk_ecpairing_data([(G1, G2), (m(G1, co - 1), G2)]), GPB + GPP + 30000, 'error', 'two_point_oog'))
tests.append((mk_ecpairing_data([(G1, G2), (G1, m(G2, co - 1))]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_2'))
tests.append((mk_ecpairing_data([(G1, m(G2, 2)), (m(G1, co - 2), G2)]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_3'))
tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, co - 999))]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_4'))
tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, 998))]), GPB + GPP * 2 + 30000, 'no', 'two_point_fail_2'))
tests.append((mk_ecpairing_data([(G1, G2_zero), (G1_zero, G2)]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_5'))
tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, co - 999)), (G1, G2_zero)]),
GPB + GPP * 3 + 30000, 'yes', 'three_point_match_1'))
tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, 999)), (G1, G2)]),
GPB + GPP * 3 + 30000, 'no','three_point_fail_1'))
tests.append((mk_ecpairing_data([(G1_zero, fake_point)]), GPB + GPP + 30000, 'error', 'one_point_not_in_subgroup'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 0, 1), GPB + GPP + 30000, 'error', 'perturb_zeropoint_by_one'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 0, co), GPB + GPP + 30000, 'error', 'perturb_zeropoint_by_curve_order'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 0, fm), GPB + GPP + 30000, 'error', 'perturb_zeropoint_by_field_modulus'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 64, 1), GPB + GPP + 30000, 'error', 'perturb_g2_by_one'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 96, co), GPB + GPP + 30000, 'error', 'perturb_g2_by_curve_order'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 128, fm), GPB + GPP + 30000, 'error', 'perturb_g2_by_field_modulus'))
tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 160, fm), GPB + GPP + 30000, 'error', 'perturb_g2_by_field_modulus_again'))
testout = {}
testout_filler = {}
for encoded, execgas, expect, desc in tests:
print('testing', encoded, execgas, expect, desc)
o1, o2 = mk_test(encoded, execgas, expect)
testout["ecpairing_" + desc] = o1
o2["explanation"] = "Puts the given data into the ECPAIRING precompile"
testout_filler["ecpairing_" + desc] = o2
open('ecpairing_tests.json', 'w').write(json.dumps(testout, indent=4))
open('ecpairing_tests_filler.json', 'w').write(json.dumps(testout_filler, indent=4))
| gpl-3.0 |
ferabra/edx-platform | common/djangoapps/student/management/commands/assigngroups.py | 170 | 3059 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserTestGroup
import random
import sys
import datetime
from textwrap import dedent
import json
from pytz import UTC
def group_from_value(groups, v):
''' Given group: (('a',0.3),('b',0.4),('c',0.3)) And random value
in [0,1], return the associated group (in the above case, return
'a' if v<0.3, 'b' if 0.3<=v<0.7, and 'c' if v>0.7
'''
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
return g # For round-off errors
class Command(BaseCommand):
help = dedent("""\
Assign users to test groups. Takes a list of groups:
a:0.3,b:0.4,c:0.3 file.txt "Testing something"
Will assign each user to group a, b, or c with
probability 0.3, 0.4, 0.3. Probabilities must
add up to 1.
Will log what happened to file.txt.
""")
def handle(self, *args, **options):
if len(args) != 3:
print "Invalid number of options"
sys.exit(-1)
# Extract groups from string
group_strs = [x.split(':') for x in args[0].split(',')]
groups = [(group, float(value)) for group, value in group_strs]
print "Groups", groups
## Confirm group probabilities add up to 1
total = sum(zip(*groups)[1])
print "Total:", total
if abs(total - 1) > 0.01:
print "Total not 1"
sys.exit(-1)
## Confirm groups don't already exist
for group in dict(groups):
if UserTestGroup.objects.filter(name=group).count() != 0:
print group, "already exists!"
sys.exit(-1)
group_objects = {}
f = open(args[1], "a+")
## Create groups
for group in dict(groups):
utg = UserTestGroup()
utg.name = group
utg.description = json.dumps({"description": args[2]},
{"time": datetime.datetime.now(UTC).isoformat()})
group_objects[group] = utg
group_objects[group].save()
## Assign groups
users = list(User.objects.all())
count = 0
for user in users:
if count % 1000 == 0:
print count
count = count + 1
v = random.uniform(0, 1)
group = group_from_value(groups, v)
group_objects[group].users.add(user)
f.write(u"Assigned user {name} ({id}) to {group}\n".format(
name=user.username,
id=user.id,
group=group
).encode('utf-8'))
## Save groups
for group in group_objects:
group_objects[group].save()
f.close()
# python manage.py assigngroups summary_test:0.3,skip_summary_test:0.7 log.txt "Do previews of future materials help?"
# python manage.py assigngroups skip_capacitor:0.3,capacitor:0.7 log.txt "Do we show capacitor in linearity tutorial?"
| agpl-3.0 |
pavelchristof/gomoku-ai | tensorflow/python/framework/versions.py | 127 | 1607 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
VERSION = __version__
GIT_VERSION = __git_version__
COMPILER_VERSION = __compiler_version__
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"COMPILER_VERSION",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
]
| apache-2.0 |
RannyeriDev/Solfege | solfege/soundcard/oss_sequencer2.py | 4 | 3288 | # GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
from solfege.soundcard import oss_common
from solfege.soundcard import solfege_c_midi
from solfege.mpd.track import MidiEventStream
class OSSSequencer2Synth(oss_common.AbstractSynth):
def __init__(self, device, devnum, verbose_init):
oss_common.AbstractSynth.__init__(self, device, devnum, verbose_init)
# FIXME-LEARNTHIS: is the value 96 special in any way,
# or can I use whatever value i want???
solfege_c_midi.sndctl_tmr_timebase(96)
solfege_c_midi.sndctl_tmr_tempo(60)
def set_patch(self):
"""
Set
"""
pass
def play_track(self, *tracks):
self.play_midieventstream(MidiEventStream(*tracks))
def play_midieventstream(self, midieventstream):
m = solfege_c_midi
m.sndctl_seq_reset()
for c in range(self.NUM_CHANNELS):
m.seq_set_patch(self.m_devnum, c, 0)
m.sndctl_tmr_timebase(96)
m.sndctl_tmr_tempo(60)
m.seq_start_timer()
self.handle_midi_events(midieventstream)
def handle_midi_events(self, midieventstream):
m = solfege_c_midi
for e in midieventstream:
if e[0] == midieventstream.TEMPO:
t = e[1] * 4 / e[2]
if t < 256:
m.sndctl_tmr_timebase(96)
m.sndctl_tmr_tempo(t)
else:
if t > 511:
logging.debug("devmusicsynth.py: warning: bpm > 511")
m.sndctl_tmr_timebase(96*2)
m.sndctl_tmr_tempo(int(t/2))
elif e[0] == midieventstream.NOTE_ON:
m.seq_start_note(self.m_devnum, e[1], e[2], e[3])
elif e[0] == midieventstream.NOTE_OFF:
m.seq_stop_note(self.m_devnum, e[1], e[2], e[3])
elif e[0] == midieventstream.NOTELEN_TIME:
# 96 is a const, also used in soundcard.initialize that
# I don't understand.
m.seq_delta_time(int(96*4*e[1]))
elif e[0] == midieventstream.SET_PATCH:
m.seq_set_patch(self.m_devnum, e[1], e[2])
elif e[0] == midieventstream.BENDER:
m.seq_bender(self.m_devnum, e[1], e[2])
elif e[0] == midieventstream.VOLUME:
m.seq_set_volume(self.m_devnum, e[1], e[2])
else:
raise Exception("oss_sequencer2: Corrupt midieventstream error")
m.seqbuf_dump()
| gpl-3.0 |
efiop/dvc | dvc/hash_info.py | 1 | 1355 | from collections import OrderedDict
from dataclasses import dataclass, field
from typing import Optional
HASH_DIR_SUFFIX = ".dir"
@dataclass
class HashInfo:
PARAM_SIZE = "size"
PARAM_NFILES = "nfiles"
name: Optional[str]
value: Optional[str]
size: Optional[int] = field(default=None, compare=False)
nfiles: Optional[int] = field(default=None, compare=False)
def __bool__(self):
return bool(self.value)
def __str__(self):
return f"{self.name}: {self.value}"
def __hash__(self):
return hash((self.name, self.value))
@classmethod
def from_dict(cls, d):
_d = d.copy() if d else {}
size = _d.pop(cls.PARAM_SIZE, None)
nfiles = _d.pop(cls.PARAM_NFILES, None)
if not _d:
return cls(None, None)
((name, value),) = _d.items()
return cls(name, value, size=size, nfiles=nfiles)
def to_dict(self):
ret = OrderedDict()
if not self:
return ret
ret[self.name] = self.value
if self.size is not None:
ret[self.PARAM_SIZE] = self.size
if self.nfiles is not None:
ret[self.PARAM_NFILES] = self.nfiles
return ret
@property
def isdir(self):
if not self:
return False
return self.value.endswith(HASH_DIR_SUFFIX)
| apache-2.0 |
creimers/cmsplugin-filer | cmsplugin_filer_image/migrations/0001_initial.py | 11 | 14895 |
from south.db import db
from django.db import models
from cmsplugin_filer_image.models import *
class Migration:
depends_on = (
("filer", "0008_polymorphic__del_field_file__file_type_plugin_name"),
("cms", "0039_auto__del_field_page_moderator_state"),
)
def forwards(self, orm):
# Adding model 'FilerImage'
db.create_table('cmsplugin_filerimage', (
('cmsplugin_ptr', orm['cmsplugin_filer_image.FilerImage:cmsplugin_ptr']),
('image', orm['cmsplugin_filer_image.FilerImage:image']),
('alt_text', orm['cmsplugin_filer_image.FilerImage:alt_text']),
('caption', orm['cmsplugin_filer_image.FilerImage:caption']),
('use_autoscale', orm['cmsplugin_filer_image.FilerImage:use_autoscale']),
('width', orm['cmsplugin_filer_image.FilerImage:width']),
('height', orm['cmsplugin_filer_image.FilerImage:height']),
('float', orm['cmsplugin_filer_image.FilerImage:float']),
('free_link', orm['cmsplugin_filer_image.FilerImage:free_link']),
('page_link', orm['cmsplugin_filer_image.FilerImage:page_link']),
('description', orm['cmsplugin_filer_image.FilerImage:description']),
))
db.send_create_signal('cmsplugin_filer_image', ['FilerImage'])
def backwards(self, orm):
# Deleting model 'FilerImage'
db.delete_table('cmsplugin_filerimage')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cmsplugin_filer_image.filerimage': {
'Meta': {'db_table': "'cmsplugin_filerimage'"},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'float': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'free_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']"}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'use_autoscale': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_field': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_image']
| bsd-3-clause |
pv/scikit-learn | doc/sphinxext/github_link.py | 314 | 2661 | from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except subprocess.CalledProcessError:
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode('utf-8')
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn,
start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ''
return url_fmt.format(revision=revision, package=package,
path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt)
| bsd-3-clause |
ahmadiga/min_edx | cms/djangoapps/contentstore/views/tests/test_user.py | 123 | 12492 | """
Tests for contentstore/views/user.py.
"""
import json
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from django.contrib.auth.models import User
from student.models import CourseEnrollment
from student.roles import CourseStaffRole, CourseInstructorRole
from student import auth
class UsersTestCase(CourseTestCase):
def setUp(self):
super(UsersTestCase, self).setUp()
self.ext_user = User.objects.create_user(
"joe", "joe@comedycentral.com", "haha")
self.ext_user.is_active = True
self.ext_user.is_staff = False
self.ext_user.save()
self.inactive_user = User.objects.create_user(
"carl", "carl@comedycentral.com", "haha")
self.inactive_user.is_active = False
self.inactive_user.is_staff = False
self.inactive_user.save()
self.index_url = self.course_team_url()
self.detail_url = self.course_team_url(email=self.ext_user.email)
self.inactive_detail_url = self.course_team_url(email=self.inactive_user.email)
self.invalid_detail_url = self.course_team_url(email='nonexistent@user.com')
def course_team_url(self, email=None):
return reverse_course_url(
'course_team_handler', self.course.id,
kwargs={'email': email} if email else {}
)
def test_index(self):
resp = self.client.get(self.index_url, HTTP_ACCEPT='text/html')
# ext_user is not currently a member of the course team, and so should
# not show up on the page.
self.assertNotContains(resp, self.ext_user.email)
def test_index_member(self):
auth.add_users(self.user, CourseStaffRole(self.course.id), self.ext_user)
resp = self.client.get(self.index_url, HTTP_ACCEPT='text/html')
self.assertContains(resp, self.ext_user.email)
def test_detail(self):
resp = self.client.get(self.detail_url)
self.assertEqual(resp.status_code, 200)
result = json.loads(resp.content)
self.assertEqual(result["role"], None)
self.assertTrue(result["active"])
def test_detail_inactive(self):
resp = self.client.get(self.inactive_detail_url)
self.assertEqual(resp.status_code, 200)
result = json.loads(resp.content)
self.assertFalse(result["active"])
def test_detail_invalid(self):
resp = self.client.get(self.invalid_detail_url)
self.assertEqual(resp.status_code, 404)
result = json.loads(resp.content)
self.assertIn("error", result)
def test_detail_post(self):
resp = self.client.post(
self.detail_url,
data={"role": ""},
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
# no content: should not be in any roles
self.assertFalse(auth.user_has_role(ext_user, CourseStaffRole(self.course.id)))
self.assertFalse(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
self.assert_not_enrolled()
def test_detail_post_staff(self):
resp = self.client.post(
self.detail_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseStaffRole(self.course.id)))
self.assertFalse(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
self.assert_enrolled()
def test_detail_post_staff_other_inst(self):
auth.add_users(self.user, CourseInstructorRole(self.course.id), self.user)
resp = self.client.post(
self.detail_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseStaffRole(self.course.id)))
self.assertFalse(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
self.assert_enrolled()
# check that other user is unchanged
user = User.objects.get(email=self.user.email)
self.assertTrue(auth.user_has_role(user, CourseInstructorRole(self.course.id)))
self.assertFalse(CourseStaffRole(self.course.id).has_user(user))
def test_detail_post_instructor(self):
resp = self.client.post(
self.detail_url,
data=json.dumps({"role": "instructor"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
self.assertFalse(CourseStaffRole(self.course.id).has_user(ext_user))
self.assert_enrolled()
def test_detail_post_missing_role(self):
resp = self.client.post(
self.detail_url,
data=json.dumps({"toys": "fun"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("error", result)
self.assert_not_enrolled()
def test_detail_post_no_json(self):
resp = self.client.post(
self.detail_url,
data={"role": "staff"},
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseStaffRole(self.course.id)))
self.assertFalse(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
self.assert_enrolled()
def test_detail_delete_staff(self):
auth.add_users(self.user, CourseStaffRole(self.course.id), self.ext_user)
resp = self.client.delete(
self.detail_url,
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertFalse(auth.user_has_role(ext_user, CourseStaffRole(self.course.id)))
def test_detail_delete_instructor(self):
auth.add_users(self.user, CourseInstructorRole(self.course.id), self.ext_user, self.user)
resp = self.client.delete(
self.detail_url,
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertFalse(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
def test_delete_last_instructor(self):
auth.add_users(self.user, CourseInstructorRole(self.course.id), self.ext_user)
resp = self.client.delete(
self.detail_url,
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("error", result)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
def test_post_last_instructor(self):
auth.add_users(self.user, CourseInstructorRole(self.course.id), self.ext_user)
resp = self.client.post(
self.detail_url,
data={"role": "staff"},
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("error", result)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseInstructorRole(self.course.id)))
def test_permission_denied_self(self):
auth.add_users(self.user, CourseStaffRole(self.course.id), self.user)
self.user.is_staff = False
self.user.save()
self_url = self.course_team_url(email=self.user.email)
resp = self.client.post(
self_url,
data={"role": "instructor"},
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 403)
result = json.loads(resp.content)
self.assertIn("error", result)
def test_permission_denied_other(self):
auth.add_users(self.user, CourseStaffRole(self.course.id), self.user)
self.user.is_staff = False
self.user.save()
resp = self.client.post(
self.detail_url,
data={"role": "instructor"},
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 403)
result = json.loads(resp.content)
self.assertIn("error", result)
def test_staff_can_delete_self(self):
auth.add_users(self.user, CourseStaffRole(self.course.id), self.user)
self.user.is_staff = False
self.user.save()
self_url = self.course_team_url(email=self.user.email)
resp = self.client.delete(self_url)
self.assertEqual(resp.status_code, 204)
# reload user from DB
user = User.objects.get(email=self.user.email)
self.assertFalse(auth.user_has_role(user, CourseStaffRole(self.course.id)))
def test_staff_cannot_delete_other(self):
auth.add_users(self.user, CourseStaffRole(self.course.id), self.user, self.ext_user)
self.user.is_staff = False
self.user.save()
resp = self.client.delete(self.detail_url)
self.assertEqual(resp.status_code, 403)
result = json.loads(resp.content)
self.assertIn("error", result)
# reload user from DB
ext_user = User.objects.get(email=self.ext_user.email)
self.assertTrue(auth.user_has_role(ext_user, CourseStaffRole(self.course.id)))
def test_user_not_initially_enrolled(self):
# Verify that ext_user is not enrolled in the new course before being added as a staff member.
self.assert_not_enrolled()
def test_remove_staff_does_not_unenroll(self):
# Add user with staff permissions.
self.client.post(
self.detail_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assert_enrolled()
# Remove user from staff on course. Will not un-enroll them from the course.
resp = self.client.delete(
self.detail_url,
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
self.assert_enrolled()
def test_staff_to_instructor_still_enrolled(self):
# Add user with staff permission.
self.client.post(
self.detail_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assert_enrolled()
# Now add with instructor permission. Verify still enrolled.
resp = self.client.post(
self.detail_url,
data=json.dumps({"role": "instructor"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
self.assertEqual(resp.status_code, 204)
self.assert_enrolled()
def assert_not_enrolled(self):
""" Asserts that self.ext_user is not enrolled in self.course. """
self.assertFalse(
CourseEnrollment.is_enrolled(self.ext_user, self.course.id),
'Did not expect ext_user to be enrolled in course'
)
def assert_enrolled(self):
""" Asserts that self.ext_user is enrolled in self.course. """
self.assertTrue(
CourseEnrollment.is_enrolled(self.ext_user, self.course.id),
'User ext_user should have been enrolled in the course'
)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.