code stringlengths 281 23.7M |
|---|
def test_incomplete_graph(mocker: Any) -> None:
logger_module = (__name__.rsplit('.', 2)[0] + '.graph.logger')
mock_logger = mocker.patch(logger_module)
class MyMessage(Message):
int_field: int
class MyNode(Node):
A = Topic(MyMessage)
B = Topic(MyMessage)
(A)
(B)
def my_transformer(self, message: MyMessage) -> AsyncPublisher:
pass
class MyChildGroup(Group):
C = Topic(MyMessage)
MY_NODE1: MyNode
MY_NODE2: MyNode
def connections(self) -> Connections:
return ((self.C, self.MY_NODE1.A), (self.MY_NODE1.B, self.MY_NODE2.A))
class MyParentGroup(Group):
D = Topic(MyMessage)
E = Topic(MyMessage)
MY_CHILD1: MyChildGroup
MY_CHILD2: MyChildGroup
def connections(self) -> Connections:
return ((self.D, self.MY_CHILD1.C), (self.E, self.MY_CHILD2.MY_NODE1.A))
class MyGraph(Graph):
MY_COMPONENT: MyParentGroup
def logging(self) -> Dict[(str, Topic)]:
return {'D': self.MY_COMPONENT.D}
MyGraph()
expected_message = "MyGraph has unused topics:\n\t- MY_COMPONENT/D has no publishers\n\t- MY_COMPONENT/E has no publishers\n\t- MY_COMPONENT/MY_CHILD1/C has no publishers\n\t- MY_COMPONENT/MY_CHILD1/MY_NODE1/A has no publishers\n\t- MY_COMPONENT/MY_CHILD1/MY_NODE2/B has no subscribers\n\t- MY_COMPONENT/MY_CHILD2/C has no publishers\n\t- MY_COMPONENT/MY_CHILD2/MY_NODE1/A has no publishers\n\t- MY_COMPONENT/MY_CHILD2/MY_NODE2/B has no subscribers\nThis could mean that there are publishers and/or subscribers of Cthulhu streams that Labgraph doesn't know about, and/or that data in some topics is being discarded."
mock_logger.warning.assert_called_with(expected_message) |
class ModelsFactory():
def __init__(self):
pass
def get_by_name(model_name, *args, **kwargs):
model = None
if (model_name == 'LVD_voxels_SMPL'):
from .LVD_voxels_SMPL import Model
model = Model(*args, **kwargs)
elif (model_name == 'IPNet_voxels_SMPL'):
from .IPNet_voxels_SMPL import Model
model = Model(*args, **kwargs)
elif (model_name == 'LVD_voxels_MANO'):
from .LVD_voxels_MANO import Model
model = Model(*args, **kwargs)
elif (model_name == 'IPNet_voxels_MANO'):
from .IPNet_voxels_MANO import Model
model = Model(*args, **kwargs)
elif (model_name == 'LVD_images_SMPL'):
from .LVD_images_SMPL import Model
model = Model(*args, **kwargs)
elif (model_name == 'LVD_images_wmask_SMPL'):
from .LVD_images_wmask_SMPL import Model
model = Model(*args, **kwargs)
else:
raise ValueError(('Model %s not recognized.' % model_name))
print(('Model %s was created' % model_name))
return model |
def upgrade():
try:
op.add_column('EntityTypes', sa.Column('accepts_references', sa.Boolean))
except (sa.exc.OperationalError, sa.exc.ProgrammingError):
pass
try:
op.add_column('Links', sa.Column('original_filename', sa.String(256)))
except (sa.exc.OperationalError, sa.exc.ProgrammingError, sa.exc.InternalError):
pass |
def test_hicConvertFormat_h5_to_ginteractions():
outfile = NamedTemporaryFile(suffix='.ginteractions', delete=False)
outfile.close()
args = '--matrices {} --outFileName {} --inputFormat h5 --outputFormat ginteractions '.format(original_matrix_h5, outfile.name).split()
compute(hicConvertFormat.main, args, 5) |
class MrtPeer(stringify.StringifyMixin):
_HEADER_FMT = '!B4s'
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
IP_ADDR_FAMILY_BIT = (1 << 0)
AS_NUMBER_SIZE_BIT = (1 << 1)
_TYPE = {'ascii': ['bgp_id', 'ip_addr']}
def __init__(self, bgp_id, ip_addr, as_num, type_=0):
self.type = type_
self.bgp_id = bgp_id
self.ip_addr = ip_addr
self.as_num = as_num
def parse(cls, buf):
(type_, bgp_id) = struct.unpack_from(cls._HEADER_FMT, buf)
bgp_id = addrconv.ipv4.bin_to_text(bgp_id)
offset = cls.HEADER_SIZE
if (type_ & cls.IP_ADDR_FAMILY_BIT):
ip_addr_len = 16
else:
ip_addr_len = 4
ip_addr = ip.bin_to_text(buf[offset:(offset + ip_addr_len)])
offset += ip_addr_len
if (type_ & cls.AS_NUMBER_SIZE_BIT):
(as_num,) = struct.unpack_from('!I', buf, offset)
offset += 4
else:
(as_num,) = struct.unpack_from('!H', buf, offset)
offset += 2
return (cls(bgp_id, ip_addr, as_num, type_), buf[offset:])
def serialize(self):
if ip.valid_ipv6(self.ip_addr):
self.type |= self.IP_ADDR_FAMILY_BIT
ip_addr = ip.text_to_bin(self.ip_addr)
if ((self.type & self.AS_NUMBER_SIZE_BIT) or (self.as_num > 65535)):
self.type |= self.AS_NUMBER_SIZE_BIT
as_num = struct.pack('!I', self.as_num)
else:
as_num = struct.pack('!H', self.as_num)
buf = struct.pack(self._HEADER_FMT, self.type, addrconv.ipv4.text_to_bin(self.bgp_id))
return ((buf + ip_addr) + as_num) |
class BaseDiagnostics():
def __init__(self, samples: MonteCarloSamples):
self.samples = samples
self.statistics_dict = {}
self.plots_dict = {}
def _prepare_query_list(self, query_list: Optional[List[RVIdentifier]]=None) -> List[RVIdentifier]:
if (query_list is None):
return list(self.samples.keys())
for query in query_list:
if (not (query in self.samples)):
raise ValueError(f'query {self._stringify_query(query)} does not exist')
return query_list
def summaryfn(self, func: Callable, display_names: List[str]) -> Callable:
statistics_name = func.__name__
self.statistics_dict[statistics_name] = (func, display_names)
return self._standalone_summary_stat_function(statistics_name, func)
def _prepare_summary_stat_input(self, query: RVIdentifier, chain: Optional[int]=None):
query_samples = self.samples[query]
if (query_samples.shape[0] != 1):
query_samples = query_samples.squeeze()
if (chain is not None):
query_samples = query_samples[chain].unsqueeze(0)
return query_samples
def _create_table(self, query: RVIdentifier, results: List[Tensor], func_list: List[str]) -> pd.DataFrame:
out_pd = pd.DataFrame()
if (len(results) > 0):
single_result_set = results[0]
if ((single_result_set is not None) and (len(single_result_set) > 0)):
for flattened_index in range(single_result_set[0].numel()):
index = np.unravel_index(flattened_index, tuple(single_result_set[0].size()))
row_data = []
rowname = f'{self._stringify_query(query)}{list(index)}'
for result in results:
num_of_sets = result.size()[0]
for set_num in range(num_of_sets):
row_data.append(result[set_num][index].item())
cur = pd.DataFrame([row_data], columns=func_list, index=[rowname])
if out_pd.empty:
out_pd = cur
else:
out_pd = pd.concat([out_pd, cur])
return out_pd
def _stringify_query(self, query: RVIdentifier) -> str:
return f'{query.function.__name__}{query.arguments}'
def _execute_summary_stat_funcs(self, query: RVIdentifier, func_dict: Dict[(str, Tuple[(Callable, str)])], chain: Optional[int]=None, raise_warning: bool=False):
frames = pd.DataFrame()
query_results = []
func_list = []
queried_samples = self._prepare_summary_stat_input(query, chain)
for (_k, (func, display_names)) in func_dict.items():
result = func(queried_samples)
if (result is None):
if raise_warning:
warnings.warn(f'{display_names} cannot be calculated for the provided samples')
continue
if (len(display_names) <= 1):
result = result.unsqueeze(0)
query_results.append(result)
func_list.extend(display_names)
out_df = self._create_table(query, query_results, func_list)
if frames.empty:
frames = out_df
else:
frames = pd.concat([frames, out_df])
return frames
def summary(self, query_list: Optional[List[RVIdentifier]]=None, chain: Optional[int]=None) -> pd.DataFrame:
frames = pd.DataFrame()
query_list = self._prepare_query_list(query_list)
for query in query_list:
out_df = self._execute_summary_stat_funcs(query, self.statistics_dict, chain)
frames = pd.concat([frames, out_df])
frames.sort_index(inplace=True)
return frames
def _prepare_plots_input(self, query: RVIdentifier, chain: Optional[int]=None) -> Tensor:
query_samples = self.samples[query]
if (chain is not None):
return query_samples[chain].unsqueeze(0)
return query_samples
def plotfn(self, func: Callable, display_name: str) -> Callable:
self.plots_dict[func.__name__] = (func, display_name)
return self._standalone_plot_function(func.__name__, func)
def _execute_plot_funcs(self, query: RVIdentifier, func_dict: Dict[(str, Tuple[(Callable, str)])], chain: Optional[int]=None, display: Optional[bool]=False):
figs = []
queried_samples = self._prepare_plots_input(query, chain)
for (_k, (func, display_name)) in func_dict.items():
(trace, labels) = common_plots.plot_helper(queried_samples, func)
title = f'{self._stringify_query(query)} {display_name}'
fig = self._display_results(trace, [(title + label) for label in labels], display)
figs.append(fig)
return figs
def plot(self, query_list: Optional[List[RVIdentifier]]=None, display: Optional[bool]=False, chain: Optional[int]=None):
figs = []
query_list = self._prepare_query_list(query_list)
for query in query_list:
fig = self._execute_plot_funcs(query, self.plots_dict, chain, display)
figs.extend(fig)
return figs
def _display_results(self, traces, labels: List[str], display: bool):
fig = make_subplots(rows=math.ceil((len(traces) / 2)), cols=2, subplot_titles=tuple(labels))
r = 1
for trace in traces:
for data in trace:
fig.add_trace(data, row=math.ceil((r / 2)), col=(((r - 1) % 2) + 1))
r += 1
if display:
plotly.offline.iplot(fig)
return fig
def _standalone_plot_function(self, func_name: str, func: Callable) -> Callable:
(func)
def _wrapper(query_list: List[RVIdentifier], chain: Optional[int]=None, display: Optional[bool]=False):
figs = []
query_list = self._prepare_query_list(query_list)
for query in query_list:
fig = self._execute_plot_funcs(query, {func_name: self.plots_dict[func_name]}, chain, display)
figs.extend(fig)
return figs
return _wrapper
def _standalone_summary_stat_function(self, func_name: str, func: Callable) -> Callable:
(func)
def _wrapper(query_list: List[RVIdentifier], chain: Optional[int]=None):
frames = pd.DataFrame()
query_list = self._prepare_query_list(query_list)
for query in query_list:
out_df = self._execute_summary_stat_funcs(query, {func_name: self.statistics_dict[func_name]}, chain, True)
frames = pd.concat([frames, out_df])
return frames
return _wrapper |
def run_file(file_path):
demo_name = os.path.basename(file_path)
screenshot_name = (demo_name.split('.')[0] + '.png')
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
globals = {'__name__': '__main__', '__file__': file_path}
with replace_configure_traits(screenshot_name), mock.patch('sys.argv', [file_path]):
exec(content, globals) |
class PocketTestCase(unittest.TestCase):
def test_main_initial(self):
pocket.WF._version.major = 'x'
CachedData['__workflow_update_status'] = {'available': True}
CachedData['pocket_tags'] = ['tag1']
sys.argv = ['pocket.py', '']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), len(pocket.CATEGORIES))
def test_main_search_all(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'e.com']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 2)
def test_main_mylist(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:mylist ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 3)
def test_main_archive(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:archive ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertTrue(len(pocket.WF._items), 1)
self.assertTrue(('archive.com' in pocket.WF._items[0].subtitle))
def test_main_favorite(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:favorites ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertTrue(len(pocket.WF._items), 1)
self.assertTrue(('fniephaus.com' in pocket.WF._items[0].subtitle))
def test_main_articles(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:articles ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertTrue(len(pocket.WF._items), 1)
self.assertTrue(('google.com' in pocket.WF._items[0].subtitle))
def test_main_videos(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:videos ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertTrue(len(pocket.WF._items), 1)
self.assertTrue(('archive.com' in pocket.WF._items[0].subtitle))
def test_main_images(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:images ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertTrue(len(pocket.WF._items), 1)
self.assertTrue(('github.com' in pocket.WF._items[0].subtitle))
def test_main_mytags(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
CachedData['pocket_tags'] = ['tag1', 'tag2', 'tag3', 'tag4']
sys.argv = ['pocket.py', 'in:mytags ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 4)
self.assertTrue(('tag1' in pocket.WF._items[0].title))
def test_main_mytags_search(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
CachedData['pocket_tags'] = ['interesting', 'tag2', 'tag3', 'tag4']
sys.argv = ['pocket.py', 'in:mytags interes']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 1)
self.assertTrue(('interesting' in pocket.WF._items[0].title))
def test_main_random(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
sys.argv = ['pocket.py', 'in:random ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 3)
def test_main_single_tag(self):
CachedData['__workflow_update_status'] = {'available': False}
CachedData['pocket_list'] = test_data.get_normal()
CachedData['pocket_tags'] = ['mytag']
sys.argv = ['pocket.py', 'in:mytags #mytag']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 1)
self.assertTrue(('google.com' in pocket.WF._items[0].subtitle))
def test_main_error(self):
CachedData['__workflow_update_status'] = {'available': True}
CachedData['pocket_error'] = 'AuthException'
sys.argv = ['pocket.py', '']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.main(None)
def test_main_empty(self):
CachedData['__workflow_update_status'] = {'available': False}
sys.argv = ['pocket.py', 'in:mylist ']
def send_feedback():
pass
pocket.WF.send_feedback = send_feedback
pocket.WF._items = []
pocket.main(None)
self.assertEquals(len(pocket.WF._items), 1)
self.assertTrue(('Pocket list is empty' in pocket.WF._items[0].title))
def test_register_magic_arguments(self):
pocket.WF = pocket.Workflow()
self.assertTrue(('deauth' not in pocket.WF.magic_arguments))
pocket.register_magic_arguments()
pocket.WF.magic_arguments['deauth']()
self.assertTrue(('deauth' in pocket.WF.magic_arguments))
self.assertEquals(pocket.WF.magic_prefix, 'wf:')
def test_get_links(self):
def cached_data(self, key, max_age=None):
if ('pocket_list' not in CachedData):
CachedData['pocket_list'] = 12345
return None
return CachedData.get(key)
pocket.Workflow.cached_data = cached_data
self.assertEquals(pocket.get_links(), 12345)
CachedData.clear()
self.assertEquals(pocket.get_links(tries=0), {})
def test_filter_and_add_items(self):
self.assertEquals(len(pocket.WF._items), 0)
pocket.filter_and_add_items(links={}, user_input='')
self.assertEquals(len(pocket.WF._items), 1)
self.assertEquals(pocket.WF._items[0].title, 'No links found for "".')
pocket.WF._items = []
pocket.filter_and_add_items(links=[{'item_id': '1', 'given_title': 'test', 'given_url': 'url', 'time_added': '10'}], user_input='')
self.assertEquals(len(pocket.WF._items), 1)
self.assertEquals(pocket.WF._items[0].title, 'test')
pocket.WF._items = []
pocket.filter_and_add_items(links=[{'item_id': '1', 'given_title': 'test', 'resolved_title': 'test', 'given_url': 'url', 'time_added': '10', 'tags': {'alfred': {'item_id': '4444', 'tag': 'alfred'}}}], user_input='notfound')
self.assertEquals(len(pocket.WF._items), 1)
self.assertEquals(pocket.WF._items[0].title, 'No links found for "notfound".')
def test_get_auth_url(self):
expected_start = '
self.assertTrue(pocket.get_auth_url().startswith(expected_start))
def setUp(self):
pocket = pocket_backup
CachedData.clear()
Passwords.clear()
pocket.Workflow.alfred_env = {'theme_background': 'rgba(40,40,40,0.1)'}
logging.disable(logging.CRITICAL)
def cached_data(self, key, max_age=None):
return CachedData.get(key)
pocket.Workflow.cached_data = cached_data
def cache_data(self, key, data):
CachedData[key] = data
pocket.Workflow.cache_data = cache_data
def get_password(self, key):
return Passwords.get(key)
pocket.Workflow.get_password = get_password
def delete_password(self, key):
if (key in Passwords):
del Passwords[key]
pocket.Workflow.delete_password = delete_password
def refresh_list():
pass
pocket.refresh_list = refresh_list |
def generate_feature(b, bin_dir, debug_dir, bap_dir):
try:
config = Config()
config.BINARY_NAME = b
config.BINARY_PATH = os.path.join(bin_dir, b)
config.DEBUG_INFO_PATH = os.path.join(debug_dir, b)
if (bap_dir != ''):
config.BAP_FILE_PATH = os.path.join(bap_dir, b)
with open(config.BINARY_PATH, 'rb') as elffile, open(config.DEBUG_INFO_PATH, 'rb') as debug_elffile:
b = Binary(config, elffile, debug_elffile)
return b.get_features()
except:
return ([], [], [], []) |
(HelpEntry)
class HelpEntryAdmin(admin.ModelAdmin):
inlines = [HelpTagInline]
list_display = ('id', 'db_key', 'db_help_category', 'db_lock_storage', 'db_date_created')
list_display_links = ('id', 'db_key')
search_fields = ['^db_key', 'db_entrytext']
ordering = ['db_help_category', 'db_key']
list_filter = ['db_help_category']
save_as = True
save_on_top = True
list_select_related = True
view_on_site = False
form = HelpEntryForm
fieldsets = ((None, {'fields': (('db_key', 'db_help_category'), 'db_entrytext', 'db_lock_storage')}),) |
def fast_urandom16(urandom=[], locker=threading.RLock()):
try:
return urandom.pop()
except IndexError:
try:
locker.acquire()
ur = os.urandom((16 * 1024))
urandom += [ur[i:(i + 16)] for i in range(16, (1024 * 16), 16)]
return ur[0:16]
finally:
locker.release() |
def getHumans():
global humans
answer = DEFAULT
while (answer not in TYPES):
print("a: 'X' human, 'O' computer")
print("b: 'O' computer, 'X' human")
print("c: 'X' human, 'O' human")
print("d: 'X' computer, 'O' computer")
answer = input("Please choose ('X' begins): ").lower()
humans = TYPES[answer] |
def make_arg_parser():
parser = argparse.ArgumentParser(usage='python -m docxtpl [-h] [-o] [-q] {} {} {}'.format(TEMPLATE_ARG, JSON_ARG, OUTPUT_ARG), description='Make docx file from existing template docx and json data.')
parser.add_argument(TEMPLATE_ARG, type=str, help='The path to the template docx file.')
parser.add_argument(JSON_ARG, type=str, help='The path to the json file with the data.')
parser.add_argument(OUTPUT_ARG, type=str, help='The filename to save the generated docx.')
parser.add_argument(('-' + OVERWRITE_ARG[0]), ('--' + OVERWRITE_ARG), action='store_true', help='If output file already exists, overwrites without asking for confirmation')
parser.add_argument(('-' + QUIET_ARG[0]), ('--' + QUIET_ARG), action='store_true', help='Do not display unnecessary messages')
return parser |
def test_returning_a_pathlib_path(local_dummy_directory):
def t1() -> FlyteDirectory:
return pathlib.Path(local_dummy_directory)
def wf1() -> FlyteDirectory:
return t1()
wf_out = wf1()
assert isinstance(wf_out, FlyteDirectory)
os.listdir(wf_out)
assert wf_out._downloaded
with open(os.path.join(wf_out.path, 'file'), 'r') as fh:
assert (fh.read() == 'Hello world')
shutil.rmtree(wf_out)
wf_out.download()
assert (not os.path.exists(wf_out.path)) |
class SectorItem(scrapy.Item):
id = scrapy.Field()
start_date = scrapy.Field()
name = scrapy.Field()
link = scrapy.Field()
type = scrapy.Field()
producer = scrapy.Field()
news_title = scrapy.Field()
news_link = scrapy.Field()
count = scrapy.Field()
leadings = scrapy.Field() |
def test_transaction_span_stack_trace_min_duration_overrides_old_config(elasticapm_client):
elasticapm_client.config.update(version='1', span_stack_trace_min_duration=20, span_frames_min_duration=1)
elasticapm_client.begin_transaction('test_type')
with elasticapm.capture_span('noframes', duration=0.01):
pass
with elasticapm.capture_span('frames', duration=0.04):
pass
elasticapm_client.end_transaction('test')
spans = elasticapm_client.events[constants.SPAN]
assert (len(spans) == 2)
assert (spans[0]['name'] == 'noframes')
assert ('stacktrace' not in spans[0])
assert (spans[1]['name'] == 'frames')
assert (spans[1]['stacktrace'] is not None)
elasticapm_client.config.update(version='1', span_stack_trace_min_duration=5, span_frames_min_duration=1)
elasticapm_client.begin_transaction('test_type')
with elasticapm.capture_span('yesframes', duration=0.01):
pass
with elasticapm.capture_span('frames', duration=0.04):
pass
elasticapm_client.end_transaction('test')
spans = elasticapm_client.events[constants.SPAN]
assert (len(spans) == 4)
assert (spans[2]['name'] == 'yesframes')
assert (spans[2]['stacktrace'] is not None)
assert (spans[3]['name'] == 'frames')
assert (spans[3]['stacktrace'] is not None) |
class OptionSeriesVariablepieDragdropDraghandle(Options):
def className(self):
return self._config_get('highcharts-drag-handle')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#fff')
def color(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('rgba(0, 0, 0, 0.6)')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(901)
def zIndex(self, num: float):
self._config(num, js_type=False) |
class PrometheusPodsMetrics(PrometheusAPI):
def __init__(self):
super().__init__()
def podExists(self, pod, namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': False}
try:
query = f'sum(container_last_seen{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric's value:
{query}'''
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'''Query did not return any data:
{query}'''
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = True
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podMetrics(self, pod, node='.*', container='.*', namespace='default'):
output = {}
output['cpu'] = {'cpuLoadAvg10s': self.podCpuLoadAvg_10s(pod, node, container, namespace), 'cpuUsageAVG10mMilicores': self.podCpuUsageAvg_10m(pod, node, container, namespace), 'cpuUsageSystemAVG10mMilicores': self.podCpuUsageSystemAvg_10m(pod, node, container, namespace), 'cpuUsageUserAVG10mMilicores': self.podCpuUsageUserAvg_10m(pod, node, container, namespace), 'cpuQuotaMilicores': self.podCpuLimit(pod, node, container, namespace)}
output['memory'] = {'MemLimitBytes': self.podMemLimit(pod, node, container, namespace), 'MemCachedBytes': self.podMemCache(pod, node, container, namespace), 'MemUsageBytes': self.podMemUsage(pod, node, container, namespace), 'MemUsageMaxBytes': self.podMemUsageMax(pod, node, container, namespace)}
return output
def podMemUsage(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_memory_working_set_bytes{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = int(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podMemUsagePerContainers(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': {}}
try:
query = f'sum(container_memory_working_set_bytes{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace, container)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
for container in result.get('data').get('result'):
output['result'][container.get('metric').get('container')] = float(container.get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podMemUsagePerContainers_range(self, pod='.*', node='.*', container='.*', namespace='default', range_='3h'):
output = {'success': False, 'fail_reason': '', 'result': []}
try:
query = f'container_memory_working_set_bytes{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}[{range_}]'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
timestamp_value = result.get('data').get('result')[0].get('values')
for i in timestamp_value:
output['result'].append(round(helper_.bytes_to_mb(float(i[1]))))
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podMemUsageMax(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_memory_max_usage_bytes{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = int(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podMemLimit(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_spec_memory_limit_bytes{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = int(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podMemCache(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_memory_cache{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = int(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podSwapLimit(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_spec_memory_swap_limit_bytes{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = int(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podCpuLoadAvg_10s(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_cpu_load_average_10s{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = int(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podCpuUsageAvg_10m(self, pod='.*', node='.*', container='.*', namespace='default', avg='10m'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(irate(container_cpu_usage_seconds_total{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}[{avg}])) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = math.ceil(float(result.get('data').get('result')[0].get('value')[1]))
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podCpuUsageSystemAvg_10m(self, pod='.*', node='.*', container='.*', namespace='default', avg='10m'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(irate(container_cpu_system_seconds_total{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}[{avg}])) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = math.ceil(float(result.get('data').get('result')[0].get('value')[1]))
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podCpuUsageUserAvg_10m(self, pod='.*', node='.*', container='.*', namespace='default', avg='10m'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(irate(container_cpu_user_seconds_total{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}[{avg}])) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
output['result'] = math.ceil(float(result.get('data').get('result')[0].get('value')[1]))
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podCpuLimit(self, pod='.*', node='.*', container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(container_spec_cpu_quota{{image!="", container!="", container!="POD", namespace=~"{namespace}", pod=~"{pod}", container=~"{container}", {GlobalAttrs.kubernetes_exporter_node_label}=~"{node}"}}) by (pod, instance, namespace)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
result = int(result.get('data').get('result')[0].get('value')[1])
if (result > 0):
result = (result // 10)
result = (result // 10)
output['result'] = result
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podPVC(self, pod='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': {}}
try:
pvcs_names_query = f'sum(kube_pod_spec_volumes_persistentvolumeclaims_info{{namespace=~"{namespace}", pod=~"{pod}", container=~".*"}}) by (namespace, persistentvolumeclaim, volume, pod)'
pvc_names_result = self.run_query(pvcs_names_query)
if (not (pvc_names_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_names_query}"
return output
if (not pvc_names_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_names_query}'
return output
pvcs_dct = {}
for pvc in pvc_names_result.get('data').get('result'):
pvcs_dct[pvc.get('metric').get('persistentvolumeclaim')] = {'namespace': pvc.get('metric').get('namespace'), 'pod': pvc.get('metric').get('pod'), 'volume': pvc.get('metric').get('volume'), 'capacity': (- 1), 'used': (- 1), 'available': (- 1)}
for pvc in pvcs_dct.keys():
pvcs_capacity_query = f'sum(kubelet_volume_stats_capacity_bytes{{persistentvolumeclaim=~"{pvc}"}}) by (persistentvolumeclaim, namespace)'
pvcs_names_result = self.run_query(pvcs_capacity_query)
if (not (pvcs_names_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_capacity_query}"
return output
if (not pvcs_names_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_capacity_query}'
if pvcs_names_result.get('data').get('result'):
pvcs_dct[pvc]['capacity'] = int(pvcs_names_result.get('data').get('result')[0].get('value')[1])
pvcs_used_query = f'sum(kubelet_volume_stats_used_bytes{{persistentvolumeclaim=~"{pvc}"}}) by (persistentvolumeclaim, namespace)'
pvcs_used_result = self.run_query(pvcs_used_query)
if (not (pvcs_used_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_used_query}"
return output
if (not pvcs_used_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_used_query}'
if pvcs_used_result.get('data').get('result'):
pvcs_dct[pvc]['used'] = int(pvcs_used_result.get('data').get('result')[0].get('value')[1])
pvcs_available_query = f'sum(kubelet_volume_stats_available_bytes{{persistentvolumeclaim=~"{pvc}"}}) by (persistentvolumeclaim, namespace)'
pvcs_available_result = self.run_query(pvcs_available_query)
if (not (pvcs_available_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_available_query}"
return output
if (not pvcs_available_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_available_query}'
if pvcs_available_result.get('data').get('result'):
pvcs_dct[pvc]['available'] = int(pvcs_available_result.get('data').get('result')[0].get('value')[1])
output['result'] = pvcs_dct
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podNetworkReceiveBytes(self, pod='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': {}}
try:
query = f'sum(irate(container_network_receive_bytes_total{{container!="", namespace=~"{namespace}", pod=~"{pod}"}}[10m])) by (pod, instance, namespace, interface)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = interfaces
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podNetworkTransmitBytes(self, pod='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': {}}
try:
query = f'sum(irate(container_network_transmit_bytes_total{{container!="", namespace=~"{namespace}", pod=~"{pod}"}}[10m])) by (pod, instance, namespace, interface)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = interfaces
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podPVC_table(self, pod, namespace='default'):
pod_pvcs_dct = self.podPVC(pod, namespace)
if (not pod_pvcs_dct.get('success')):
return ' '
if (len(pod_pvcs_dct.get('result')) < 1):
return 'No PVCs used by the pod'
table = [['PVC', 'CAPACITY', 'USED', 'AVAILABLE']]
for (pvc, value) in pod_pvcs_dct.get('result').items():
pvc_name = '\n'.join(textwrap.wrap(pvc, width=23, replace_whitespace=False))
if (value.get('capacity') != (- 1)):
capacity = helper_.bytes_to_kb_mb_gb(value.get('capacity'))
else:
capacity = '?'
if (value.get('used') != (- 1)):
used = helper_.bytes_to_kb_mb_gb(value.get('used'))
else:
used = '?'
if (value.get('available') != (- 1)):
available = helper_.bytes_to_kb_mb_gb(value.get('available'))
else:
available = '?'
row = [pvc_name, capacity, used, available]
table.append(row)
out = tabulate(table, headers='firstrow', tablefmt='plain', showindex=False)
return out
def podUpTime(self, pod='.*', namespace='default', container='.*'):
output = {'success': False, 'fail_reason': '', 'result': 0}
try:
query = f'sum(time() - container_start_time_seconds{{pod="{pod}", container=~"{container}", namespace=~"{namespace}", container!="POD", image!=""}}) by (pod, instance, namespace, container)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = float(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podFileDescriptors(self, pod='.*', namespace='default', container='.*'):
output = {'success': False, 'fail_reason': '', 'result': 0}
try:
query = f'sum(container_file_descriptors{{pod="{pod}", container=~"{container}", namespace=~"{namespace}", container!="POD", image!=""}}) by (pod, instance, namespace, container)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = float(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podThreads(self, pod='.*', namespace='default', container='.*'):
output = {'success': False, 'fail_reason': '', 'result': 0}
try:
query = f'sum(container_threads{{pod="{pod}", container=~"{container}", namespace=~"{namespace}", container!="POD", image!=""}}) by (pod, instance, namespace, container)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = float(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podProcesses(self, pod='.*', namespace='default', container='.*'):
output = {'success': False, 'fail_reason': '', 'result': 0}
try:
query = f'sum(container_processes{{pod="{pod}", container=~"{container}", namespace=~"{namespace}", container!="POD", image!=""}}) by (pod, instance, namespace, container)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = float(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podStartTime(self, pod='.*', namespace='default', container='.*'):
output = {'success': False, 'fail_reason': '', 'result': 0}
try:
query = f'sum(container_start_time_seconds{{pod="{pod}", container!="POD", image!="", namespace=~"{namespace}", container=~"{container}"}}) by (pod, namespace, device, container)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {query}"
Logging.log.error(f"could not get metric's value: {query}")
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {query}'
Logging.log.error(f'Query did not return any data: {query}')
return output
interfaces = {}
for interface in result.get('data').get('result'):
interfaces[interface.get('metric').get('interface')] = float(interface.get('value')[1])
output['result'] = float(result.get('data').get('result')[0].get('value')[1])
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podDiskReadBytes(self, pod, container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(irate(container_fs_reads_bytes_total{{pod="{pod}", namespace=~"{namespace}", container=~"{container}"}}[10m])) by (pod, namespace, device)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric's value:
{query}'''
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'''Query did not return any data:
{query}'''
return output
devices = {}
for device in result.get('data').get('result'):
devices[device.get('metric').get('device')] = float(device.get('value')[1])
output['result'] = devices
output['success'] = True
except (KeyError, AttributeError) as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def podDiskWriteBytes(self, pod, container='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': ''}
try:
query = f'sum(irate(container_fs_writes_bytes_total{{pod="{pod}", namespace=~"{namespace}", container=~"{container}"}}[10m])) by (pod, namespace, device)'
result = self.run_query(query)
if (not (result.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric's value:
{query}'''
return output
if (not result.get('data').get('result')):
output['fail_reason'] = f'''Query did not return any data:
{query}'''
return output
devices = {}
for device in result.get('data').get('result'):
devices[device.get('metric').get('device')] = float(device.get('value')[1])
output['result'] = devices
output['success'] = True
except (KeyError, AttributeError) as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def topPod(self, namespace='default', sort_by_mem_usage=False):
output = {'success': False, 'fail_reason': '', 'result': {}}
try:
memory_limit_query = f'sum(container_spec_memory_limit_bytes{{namespace=~"{namespace}", image!="", container!="", container!="POD"}}) by (pod, instance, namespace)'
memory_usage_query = f'sum(container_memory_working_set_bytes{{namespace=~"{namespace}", image!="", container!="", container!="POD"}}) by (pod, instance, namespace)'
if sort_by_mem_usage:
memory_usage_query = f'sort_desc(sum(container_memory_working_set_bytes{{namespace=~"{namespace}", image!="", container!="", container!="POD"}}) by (pod, instance, namespace))'
memory_usage_max_query = f'sum(container_memory_max_usage_bytes{{namespace=~"{namespace}", image!="", container!="", container!="POD"}}) by (pod, instance, namespace)'
cpu_limit_query = f'sum(container_spec_cpu_quota{{namespace=~"{namespace}", image!="", container!="", container!="POD"}}) by (pod, instance, namespace)'
cpu_usage_query = f'sum(irate(container_cpu_usage_seconds_total{{namespace=~"{namespace}", image!="", container!="", container!="POD"}}[10m])) by (pod, instance, namespace)'
memory_limit = self.run_query(memory_limit_query)
if (not (memory_limit.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric value:
{memory_limit_query}'''
return output
memory_usage = self.run_query(memory_usage_query)
if (not (memory_usage.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric value:
{memory_usage_query}'''
return output
if (not memory_usage.get('data').get('result')):
output['fail_reason'] = f'''Query did not return any data:
{memory_usage_query}'''
return output
memory_usage_max = self.run_query(memory_usage_max_query)
if (not (memory_usage_max.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric value:
{memory_usage_max_query}'''
return output
if (not memory_usage_max.get('data').get('result')):
output['fail_reason'] = f'''Query did not return any data:
{memory_usage_max_query}'''
return output
cpu_limit = self.run_query(cpu_limit_query)
if (not (cpu_limit.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric value:
{cpu_limit_query}'''
return output
cpu_usage = self.run_query(cpu_usage_query)
if (not (cpu_usage.get('status') == 'success')):
output['fail_reason'] = f'''could not get metric value:
{cpu_usage_query}'''
return output
if (not cpu_usage.get('data').get('result')):
output['fail_reason'] = f'''Query did not return any data:
{cpu_usage_query}'''
return output
dct = {}
if ((len(memory_usage.get('data').get('result')) > 0) and (len(memory_limit.get('data').get('result')) > 0) and (len(memory_usage.get('data').get('result')) > 0)):
for pod_mem_usage in memory_usage.get('data').get('result'):
dct[pod_mem_usage.get('metric').get('pod')] = {'namespace': pod_mem_usage.get('metric').get('namespace'), 'instance': pod_mem_usage.get('metric').get('instance'), 'memory_usage': int(pod_mem_usage.get('value')[1]), 'memory_usage_max': 0, 'memory_limit': 0, 'cpu_limit': 0}
try:
for pod_mem_limit in memory_limit.get('data').get('result'):
dct[pod_mem_limit.get('metric').get('pod')]['memory_limit'] = int(pod_mem_limit.get('value')[1])
for pod_mem_usage_max in memory_usage_max.get('data').get('result'):
dct[pod_mem_usage_max.get('metric').get('pod')]['memory_usage_max'] = int(pod_mem_usage_max.get('value')[1])
for pod_cpu_limit in cpu_limit.get('data').get('result'):
dct[pod_cpu_limit.get('metric').get('pod')]['cpu_limit'] = int(pod_cpu_limit.get('value')[1][:(- 2)])
for pod_cpu_usage in cpu_usage.get('data').get('result'):
dct[pod_cpu_usage.get('metric').get('pod')]['cpu_usage'] = float(('%.2f' % float(pod_cpu_usage.get('value')[1])))
except Exception as e:
print(f'''ERROR -- got an error while listing pods
{e}''')
traceback.print_exc()
output['result'] = dct
output['success'] = True
except (KeyError, AttributeError) as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def topPodTable(self, namespace='default', sort_by_mem_usage=False):
pods_json = self.topPod(namespace=namespace, sort_by_mem_usage=sort_by_mem_usage)
if (not pods_json.get('success')):
print(f'''No pods found in the '{namespace}' namespace
{((bcolors.WARNING + pods_json.get('fail_reason')) + bcolors.ENDC)}''')
exit(1)
table = [['NAMESPACE', 'POD', 'MEM LIMIT', 'MEM USAGE', 'MEM USAGE%', 'MEM USAGE MAX', 'MEM FREE', 'CPU LIMIT', 'CPU USAGE']]
for (pod, value) in pods_json.get('result').items():
if (int(value.get('memory_limit')) == 0):
memory_limit = '---'
memory_free = '---'
memory_usage_percentage = '---'
else:
memory_limit = helper_.bytes_to_kb_mb_gb(value.get('memory_limit'))
if ((value.get('memory_limit') - value.get('memory_usage')) > 0):
memory_free = helper_.bytes_to_kb_mb_gb((value.get('memory_limit') - value.get('memory_usage')))
else:
memory_free = f"-{helper_.bytes_to_kb_mb_gb(((value.get('memory_limit') - value.get('memory_usage')) * (- 1)))}"
memory_usage_percentage = helper_.percentage(value.get('memory_usage'), value.get('memory_limit'))
if (int(value.get('cpu_limit')) == 0):
cpu_limit = '---'
cpu_usage = ''
else:
cpu_limit = (str(value.get('cpu_limit')) + 'm')
row = [value.get('namespace'), pod, memory_limit, helper_.bytes_to_kb_mb_gb(value.get('memory_usage')), memory_usage_percentage, helper_.bytes_to_kb_mb_gb(value.get('memory_usage_max')), memory_free, cpu_limit, (str(value.get('cpu_usage')) + 'm')]
table.append(row)
out = tabulate(table, headers='firstrow', tablefmt='plain', showindex=False)
print(out)
def topPvc(self, pod='.*', namespace='default'):
output = {'success': False, 'fail_reason': '', 'result': {}}
try:
pvcs_names_query = f'sum(kube_pod_spec_volumes_persistentvolumeclaims_info{{namespace=~"{namespace}", pod=~"{pod}", container=~".*"}}) by (namespace, persistentvolumeclaim, volume, pod)'
pvc_names_result = self.run_query(pvcs_names_query)
if (not (pvc_names_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_names_query}"
return output
if (not pvc_names_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_names_query}'
return output
pvcs_dct = {}
for pvc in pvc_names_result.get('data').get('result'):
pvcs_dct[pvc.get('metric').get('persistentvolumeclaim')] = {'namespace': pvc.get('metric').get('namespace'), 'pod': pvc.get('metric').get('pod'), 'volume': pvc.get('metric').get('volume'), 'capacity': (- 1), 'used': (- 1), 'available': (- 1)}
pvcs_capacity_query = f'sum(kubelet_volume_stats_capacity_bytes{{namespace=~"{namespace}", persistentvolumeclaim=~".*"}}) by (persistentvolumeclaim, namespace)'
pvcs_capacity_result = self.run_query(pvcs_capacity_query)
if (not (pvcs_capacity_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_capacity_query}"
return output
if (not pvcs_capacity_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_capacity_query}'
if pvcs_capacity_result.get('data').get('result'):
for pvc_json in pvcs_capacity_result.get('data').get('result'):
pvcs_dct[pvc_json.get('metric').get('persistentvolumeclaim')]['capacity'] = int(pvc_json.get('value')[1])
pvcs_used_query = f'sum(kubelet_volume_stats_used_bytes{{namespace=~"{namespace}", persistentvolumeclaim=~".*"}}) by (persistentvolumeclaim, namespace)'
pvcs_used_result = self.run_query(pvcs_used_query)
if (not (pvcs_used_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_used_query}"
return output
if (not pvcs_used_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_used_query}'
if pvcs_used_result.get('data').get('result'):
for pvc_json in pvcs_used_result.get('data').get('result'):
pvcs_dct[pvc_json.get('metric').get('persistentvolumeclaim')]['used'] = int(pvc_json.get('value')[1])
pvcs_available_query = f'sum(kubelet_volume_stats_available_bytes{{namespace=~"{namespace}", persistentvolumeclaim=~".*"}}) by (persistentvolumeclaim, namespace)'
pvcs_available_result = self.run_query(pvcs_available_query)
if (not (pvcs_available_result.get('status') == 'success')):
output['fail_reason'] = f"could not get metric's value: {pvcs_available_query}"
return output
if (not pvcs_available_result.get('data').get('result')):
output['fail_reason'] = f'Query did not return any data: {pvcs_available_query}'
if pvcs_available_result.get('data').get('result'):
for pvc_json in pvcs_available_result.get('data').get('result'):
pvcs_dct[pvc_json.get('metric').get('persistentvolumeclaim')]['available'] = int(pvc_json.get('value')[1])
output['result'] = pvcs_dct
output['success'] = True
except Exception as e:
output['success']: False
output['fail_reason'] = e
Logging.log.error(e)
Logging.log.exception(traceback.format_stack())
return output
def topPvcTable(self, namespace='default'):
pvc_json = self.topPvc(namespace=namespace)
if (not pvc_json.get('success')):
print(f'''No pvc's found in the '{namespace}' namespace
{((bcolors.WARNING + str(pvc_json.get('fail_reason'))) + bcolors.ENDC)}''')
exit(1)
table = [['NAMESPACE', 'PVC', 'VOLUME', 'CAPACITY', 'USED', 'USED%', 'FREE', 'FREE%']]
for (pvc, value) in pvc_json.get('result').items():
if (value.get('capacity') != (- 1)):
capacity = helper_.bytes_to_kb_mb_gb(value.get('capacity'))
else:
capacity = '?'
if (value.get('used') != (- 1)):
used = helper_.bytes_to_kb_mb_gb(value.get('used'))
used_percentage = helper_.percentage(value.get('used'), value.get('capacity'))
else:
used = '?'
used_percentage = '?'
if (value.get('available') != (- 1)):
available = helper_.bytes_to_kb_mb_gb(value.get('available'))
available_percentage = helper_.percentage(value.get('available'), value.get('capacity'))
else:
available = '?'
available_percentage = '?'
row = [value.get('namespace'), pvc, value.get('volume'), capacity, used, used_percentage, available, available_percentage]
table.append(row)
out = tabulate(table, headers='firstrow', tablefmt='plain', showindex=False)
print(out) |
def test_add_variable():
import flxtest.foo
import flxtest.bar
store = {}
m = JSModule('flxtest.foo', store)
assert (not m.variables)
m.add_variable('Foo')
assert ('Foo' in m.variables)
assert (not store['flxtest.lib1'].deps)
with capture_log('info') as log:
store['flxtest.lib1'].add_variable('spam')
assert (not log)
with capture_log('info') as log:
store['flxtest.lib2'].add_variable('spam')
assert ((len(log) == 1) and ('undefined variable' in log[0]))
with capture_log('info') as log:
store['flxtest.lib2'].add_variable('spam', is_global=True)
assert (not log)
m = JSModule('flxtest.bar', store)
m.add_variable('use_lib1')
with raises(RuntimeError):
m.add_variable('use_lib1_wrong')
m.add_variable('use_lib2')
our_time = time.time()
time.sleep(0.01)
m = JSModule('flxtest.bar', {})
time.sleep(0.01)
our_time = time.time()
m.get_js()
our_time = time.time()
time.sleep(0.01)
m.add_variable('use_lib1')
m.add_variable('AA')
our_time = time.time()
time.sleep(0.01)
m.add_variable('use_lib1')
m.add_variable('AA') |
class TestPrepareRerunAccessGraphEvent():
def test_rerun_access_graph_event_no_previous_graph(self, privacy_request, env_a_b_c, resources):
end_nodes = [c_traversal_node().address]
analytics_event = prepare_rerun_graph_analytics_event(privacy_request, env_a_b_c, end_nodes, resources, ActionType.access)
assert (analytics_event is None)
def test_rerun_access_graph_analytics_event(self, privacy_request, env_a_b, env_a_b_c, resources):
end_nodes = [b_traversal_node().address]
formatted_graph = format_graph_for_caching(env_a_b, end_nodes)
privacy_request.cache_access_graph(formatted_graph)
end_nodes = [c_traversal_node().address]
analytics_event = prepare_rerun_graph_analytics_event(privacy_request, env_a_b_c, end_nodes, resources, step=ActionType.access)
assert (analytics_event.docker is True)
assert (analytics_event.event == 'rerun_access_graph')
assert (analytics_event.event_created_at is not None)
assert (analytics_event.extra_data == {'prev_collection_count': 2, 'curr_collection_count': 3, 'added_collection_count': 1, 'removed_collection_count': 0, 'added_edge_count': 1, 'removed_edge_count': 0, 'already_processed_access_collection_count': 0, 'already_processed_erasure_collection_count': 0, 'skipped_added_edge_count': 0, 'privacy_request': privacy_request.id})
assert (analytics_event.error is None)
assert (analytics_event.status_code is None)
assert (analytics_event.endpoint is None)
assert (analytics_event.local_host is None)
def test_rerun_erasure_graph_analytics_event(self, privacy_request, env_a_b, env_a_b_c, resources):
end_nodes = [b_traversal_node().address]
formatted_graph = format_graph_for_caching(env_a_b, end_nodes)
privacy_request.cache_access_graph(formatted_graph)
end_nodes = [c_traversal_node().address]
analytics_event = prepare_rerun_graph_analytics_event(privacy_request, env_a_b_c, end_nodes, resources, step=ActionType.erasure)
assert (analytics_event.docker is True)
assert (analytics_event.event == 'rerun_erasure_graph')
assert (analytics_event.event_created_at is not None)
assert (analytics_event.extra_data == {'prev_collection_count': 2, 'curr_collection_count': 3, 'added_collection_count': 1, 'removed_collection_count': 0, 'added_edge_count': 1, 'removed_edge_count': 0, 'already_processed_access_collection_count': 0, 'already_processed_erasure_collection_count': 0, 'skipped_added_edge_count': 0, 'privacy_request': privacy_request.id})
assert (analytics_event.error is None)
assert (analytics_event.status_code is None)
assert (analytics_event.endpoint is None)
assert (analytics_event.local_host is None) |
def extractPhoebetranslationBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [("dragon's raja", "dragon's raja", 'translated'), ('Thriller Paradise', 'Thriller Paradise', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_task_failure(flask_celery):
apm_client = flask_celery.flask_apm_client.client
_celery.task()
def failing_task():
raise ValueError('foo')
t = failing_task.delay()
assert (t.status == 'FAILURE')
assert (len(apm_client.events[ERROR]) == 1)
error = apm_client.events[ERROR][0]
assert (error['culprit'] == 'tests.contrib.celery.flask_tests.failing_task')
assert (error['exception']['message'] == 'ValueError: foo')
assert (error['exception']['handled'] is False)
transaction = apm_client.events[TRANSACTION][0]
assert (transaction['name'] == 'tests.contrib.celery.flask_tests.failing_task')
assert (transaction['type'] == 'celery')
assert (transaction['result'] == 'FAILURE')
assert (transaction['outcome'] == 'failure') |
class MyApp(App):
BINDINGS = [('t', 'change_theme()', 'Muda o tema!'), ('s', 'exit()', 'Sai da aplicacao!')]
def action_change_theme(self):
self.dark = (not self.dark)
def action_exit(self):
self.exit()
def compose(self):
self.label = Label('[b]Sera que clicou?[/]')
(yield Header())
(yield self.label)
(yield Input('Digite algo!'))
with Horizontal():
(yield Button('Vermelho!', variant='error'))
(yield Button('Verde!', variant='success'))
(yield Button('Amarelo!', variant='warning'))
self.footer = Footer()
(yield self.footer)
def on_button_pressed(self, event: Button.Pressed):
self.label.update(f'[b]Clicado no {event.button.label}[/]')
def on_input_changed(self, event: Input.Changed):
self.label.update(f'[b]Texto no Input {event.input.value}[/]')
def on_input_submitted(self, event: Input.Submitted):
self.label.update(f'[b red]Texto no Input {event.input.value}[/]')
def on_key(self, event: Key):
self.log(f'on_key {event.key} foi precionada', event=event)
def on_click(self, event: Click):
self.log('on_click', event=event)
def on_mouse_scroll_up(self, event: MouseScrollUp):
self.log('on_mouse_scroll_up', event=event)
def on_mouse_scroll_down(self, event: MouseScrollDown):
self.log('on_mouse_scroll_down', event=event) |
class TestOFPQueuePropNone(unittest.TestCase):
property = {'buf': b'\x00\x00', 'val': ofproto.OFPQT_NONE}
len = {'buf': b'\x00\x08', 'val': ofproto.OFP_QUEUE_PROP_HEADER_SIZE}
zfill = (b'\x00' * 4)
c = OFPQueuePropNone()
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
cls = OFPQueuePropHeader._QUEUE_PROPERTIES[self.c.cls_prop_type]
eq_(self.property['val'], self.c.cls_prop_type)
eq_(self.property['val'], self.c.property)
eq_(self.property['val'], cls.cls_prop_type)
eq_(self.len['val'], self.c.cls_prop_len)
eq_(self.len['val'], self.c.len)
eq_(self.len['val'], cls.cls_prop_len)
def test_parser(self):
buf = ((self.property['buf'] + self.len['buf']) + self.zfill)
ok_(self.c.parser(buf, 0)) |
def main(args=None):
import argparse
from pathlib import Path
from fontTools import configLogger
parser = argparse.ArgumentParser('fonttools voltLib.voltToFea', description=main.__doc__)
parser.add_argument('input', metavar='INPUT', type=Path, help='input font/VTP file to process')
parser.add_argument('featurefile', metavar='OUTPUT', type=Path, help='output feature file')
parser.add_argument('-t', '--table', action='append', choices=TABLES, dest='tables', help='List of tables to write, by default all tables are written')
parser.add_argument('-q', '--quiet', action='store_true', help='Suppress non-error messages')
parser.add_argument('--traceback', action='store_true', help='Dont catch exceptions')
options = parser.parse_args(args)
configLogger(level=('ERROR' if options.quiet else 'INFO'))
file_or_path = options.input
font = None
try:
font = TTFont(file_or_path)
if ('TSIV' in font):
file_or_path = StringIO(font['TSIV'].data.decode('utf-8'))
else:
log.error('"TSIV" table is missing, font was not saved from VOLT?')
return 1
except TTLibError:
pass
converter = VoltToFea(file_or_path, font)
try:
fea = converter.convert(options.tables)
except NotImplementedError as e:
if options.traceback:
raise
location = getattr(e.args[0], 'location', None)
message = f'"{e}" is not supported'
if location:
(path, line, column) = location
log.error(f'{path}:{line}:{column}: {message}')
else:
log.error(message)
return 1
with open(options.featurefile, 'w') as feafile:
feafile.write(fea) |
def test_task_set_secrets(task_definition):
task_definition.set_secrets(((u'webserver', u'foo', u'baz'), (u'webserver', u'some-name', u'some-value')))
assert ({'name': 'dolor', 'valueFrom': 'sit'} in task_definition.containers[0]['secrets'])
assert ({'name': 'foo', 'valueFrom': 'baz'} in task_definition.containers[0]['secrets'])
assert ({'name': 'some-name', 'valueFrom': 'some-value'} in task_definition.containers[0]['secrets']) |
class OptionChartJsSankeyParsing(Options):
def from_(self):
return self._config_get()
_.setter
def from_(self, val: str):
self._config(val, name='from')
def to(self):
return self._config_get()
def to(self, val: str):
self._config(val)
def flow(self):
return self._config_get()
def flow(self, val: str):
self._config(val) |
def ensure_bam_sorted(bam_fname, by_name=False, span=50, fasta=None):
if by_name:
def out_of_order(read, prev):
return (not ((prev is None) or (prev.qname <= read.qname)))
else:
def out_of_order(read, prev):
return (not ((prev is None) or (read.tid != prev.tid) or (prev.pos <= read.pos)))
bam = pysam.AlignmentFile(bam_fname, 'rb', reference_filename=fasta)
last_read = None
for read in islice(bam, span):
if out_of_order(read, last_read):
return False
last_read = read
bam.close()
return True |
class OptionPlotoptionsTimelineLabel(Options):
def boxesToAvoid(self):
return self._config_get(None)
def boxesToAvoid(self, value: Any):
self._config(value, js_type=False)
def connectorAllowed(self):
return self._config_get(False)
def connectorAllowed(self, flag: bool):
self._config(flag, js_type=False)
def connectorNeighbourDistance(self):
return self._config_get(24)
def connectorNeighbourDistance(self, num: float):
self._config(num, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get('undefined')
def formatter(self, value: Any):
self._config(value, js_type=False)
def maxFontSize(self):
return self._config_get(None)
def maxFontSize(self, num: float):
self._config(num, js_type=False)
def minFontSize(self):
return self._config_get(None)
def minFontSize(self, num: float):
self._config(num, js_type=False)
def onArea(self):
return self._config_get(None)
def onArea(self, flag: bool):
self._config(flag, js_type=False)
def style(self) -> 'OptionPlotoptionsTimelineLabelStyle':
return self._config_sub_data('style', OptionPlotoptionsTimelineLabelStyle)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False) |
def run_bot() -> None:
application = ApplicationBuilder().token(config.telegram_token).concurrent_updates(True).rate_limiter(AIORateLimiter(max_retries=5)).
user_filter = filters.ALL
if (len(config.allowed_telegram_usernames) > 0):
usernames = [x for x in config.allowed_telegram_usernames if isinstance(x, str)]
any_ids = [x for x in config.allowed_telegram_usernames if isinstance(x, int)]
user_ids = [x for x in any_ids if (x > 0)]
group_ids = [x for x in any_ids if (x < 0)]
user_filter = ((filters.User(username=usernames) | filters.User(user_id=user_ids)) | filters.Chat(chat_id=group_ids))
application.add_handler(CommandHandler('start', start_handle, filters=user_filter))
application.add_handler(CommandHandler('help', help_handle, filters=user_filter))
application.add_handler(CommandHandler('help_group_chat', help_group_chat_handle, filters=user_filter))
application.add_handler(MessageHandler(((filters.TEXT & (~ filters.COMMAND)) & user_filter), message_handle))
application.add_handler(CommandHandler('retry', retry_handle, filters=user_filter))
application.add_handler(CommandHandler('new', new_dialog_handle, filters=user_filter))
application.add_handler(CommandHandler('cancel', cancel_handle, filters=user_filter))
application.add_handler(MessageHandler((filters.VOICE & user_filter), voice_message_handle))
application.add_handler(CommandHandler('mode', show_chat_modes_handle, filters=user_filter))
application.add_handler(CallbackQueryHandler(show_chat_modes_callback_handle, pattern='^show_chat_modes'))
application.add_handler(CallbackQueryHandler(set_chat_mode_handle, pattern='^set_chat_mode'))
application.add_handler(CommandHandler('settings', settings_handle, filters=user_filter))
application.add_handler(CallbackQueryHandler(set_settings_handle, pattern='^set_settings'))
application.add_handler(CommandHandler('balance', show_balance_handle, filters=user_filter))
application.add_error_handler(error_handle)
application.run_polling() |
class GETTGT():
def __init__(self, target, password, domain, options):
self.__password = password
self.__user = target
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = options.aesKey
self.__options = options
self.__kdcHost = options.dc_ip
self.__service = options.service
if (options.hashes is not None):
(self.__lmhash, self.__nthash) = options.hashes.split(':')
def saveTicket(self, ticket, sessionKey):
logging.info(('Saving ticket in %s' % (self.__user + '.ccache')))
from impacket.krb5.ccache import CCache
ccache = CCache()
ccache.fromTGT(ticket, sessionKey, sessionKey)
ccache.saveFile((self.__user + '.ccache'))
def run(self):
userName = Principal(self.__user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
(tgt, cipher, oldSessionKey, sessionKey) = getKerberosTGT(clientName=userName, password=self.__password, domain=self.__domain, lmhash=unhexlify(self.__lmhash), nthash=unhexlify(self.__nthash), aesKey=self.__aesKey, kdcHost=self.__kdcHost, serverName=self.__service)
self.saveTicket(tgt, oldSessionKey) |
.parametrize('settype', ['PARAMETER', 'COMPUTATION'])
def test_values_multidim_values(tmpdir, merge_files_oneLR, settype):
path = os.path.join(str(tmpdir), 'values-multidim-values.dlis')
content = [*assemble_set(settype), 'data/chap4-7/eflr/ndattrs/objattr/1-2-3-4-5-6-7-8-9-10-11-12.dlis.part', 'data/chap4-7/eflr/ndattrs/objattr/2-3.dlis.part', 'data/chap4-7/eflr/ndattrs/objattr/empty-OBNAME.dlis.part']
merge_files_oneLR(path, content)
with dlis.load(path) as (f, *_):
obj = f.object(settype, 'OBJECT', 10, 0)
assert (list(obj.values[0][1]) == [3, 4])
assert (list(obj.values[1][2]) == [11, 12])
assert (obj.dimension == [3, 2]) |
class HardTimeout(base_tests.SimpleDataPlane):
def runTest(self):
logging.info('Running Hard_Timeout test ')
of_ports = config['port_map'].keys()
of_ports.sort()
self.assertTrue((len(of_ports) > 1), 'Not enough ports for test')
delete_all_flows(self.controller)
logging.info('Inserting flow entry with hard_timeout set. Also send_flow_removed_message flag set')
logging.info('Expecting the flow entry to delete with given hard_timeout')
msg9 = ofp.message.flow_add()
msg9.match.wildcards = ofp.OFPFW_ALL
msg9.cookie = random.randint(0, )
msg9.buffer_id =
msg9.hard_timeout = 1
msg9.flags |= ofp.OFPFF_SEND_FLOW_REM
rv1 = self.controller.message_send(msg9)
self.assertTrue((rv1 != (- 1)), 'Error installing flow mod')
do_barrier(self.controller)
verify_tablestats(self, expect_active=1)
(response, pkt) = self.controller.poll(exp_msg=ofp.OFPT_FLOW_REMOVED, timeout=5)
self.assertTrue((response is not None), 'Did not receive flow removed message ')
self.assertEqual(ofp.OFPRR_HARD_TIMEOUT, response.reason, 'Flow table entry removal reason is not hard_timeout')
self.assertEqual(1, response.duration_sec, 'Flow was not alive for 1 sec')
sleep(1) |
class TestBurnKeyCommands(EfuseTestCase):
.skipif((arg_chip != 'esp32'), reason='ESP32-only')
def test_burn_key_3_key_blocks(self):
self.espefuse_py('burn_key -h')
self.espefuse_py(f'burn_key BLOCK1 {IMAGES_DIR}/192bit', check_msg='A fatal error occurred: Incorrect key file size 24. Key file must be 32 bytes (256 bits) of raw binary key data.', ret_code=2)
self.espefuse_py(f'burn_key BLOCK1 {IMAGES_DIR}/256bit BLOCK2 {IMAGES_DIR}/256bit_1 BLOCK3 {IMAGES_DIR}/256bit_2 --no-protect-key')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2')
self.espefuse_py(f'burn_key BLOCK1 {IMAGES_DIR}/256bit BLOCK2 {IMAGES_DIR}/256bit_1 BLOCK3 {IMAGES_DIR}/256bit_2')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2')
.skipif((arg_chip != 'esp32c2'), reason='ESP32-C2-only')
def test_burn_key_1_key_block(self):
self.espefuse_py('burn_key -h')
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/128bit XTS_AES_128_KEY', check_msg='A fatal error occurred: Incorrect key file size 16. Key file must be 32 bytes (256 bits) of raw binary key data.', ret_code=2)
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/256bit XTS_AES_128_KEY --no-read-protect')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit', reverse_order=True)
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/256bit XTS_AES_128_KEY')
output = self.espefuse_py('summary -d')
assert ('[3 ] read_regs: ' in output)
assert ('= ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? -/-' in output)
.skipif((arg_chip != 'esp32c2'), reason='ESP32-C2-only')
def test_burn_key_one_key_block_with_fe_and_sb_keys(self):
self.espefuse_py('burn_key -h')
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/256bit XTS_AES_128_KEY BLOCK_KEY0 {IMAGES_DIR}/128bit_key SECURE_BOOT_DIGEST', check_msg="A fatal error occurred: These keypurposes are incompatible ['XTS_AES_128_KEY', 'SECURE_BOOT_DIGEST']", ret_code=2)
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/128bit_key XTS_AES_128_KEY_DERIVED_FROM_128_EFUSE_BITS BLOCK_KEY0 {IMAGES_DIR}/128bit_key SECURE_BOOT_DIGEST --no-read-protect')
output = self.espefuse_py('summary -d')
assert ('[3 ] read_regs: 0c0d0e0f 08090a0b 0b0a0908 0f0e0d0c' in output)
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/128bit_key XTS_AES_128_KEY_DERIVED_FROM_128_EFUSE_BITS BLOCK_KEY0 {IMAGES_DIR}/128bit_key SECURE_BOOT_DIGEST')
output = self.espefuse_py('summary -d')
assert ('[3 ] read_regs: 0b0a0908 0f0e0d0c' in output)
assert ('= ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f -/-' in output)
assert ('= ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? -/-' in output)
assert ('= 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f R/-' in output)
.skipif((arg_chip not in ['esp32s2', 'esp32s3', 'esp32s3beta1', 'esp32c3', 'esp32h2beta1', 'esp32c6', 'esp32h2', 'esp32p4']), reason='Only chips with 6 keys')
def test_burn_key_with_6_keys(self):
cmd = f'burn_key BLOCK_KEY0 {IMAGES_DIR}/256bit XTS_AES_256_KEY_1 BLOCK_KEY1 {IMAGES_DIR}/256bit_1 XTS_AES_256_KEY_2 BLOCK_KEY2 {IMAGES_DIR}/256bit_2 XTS_AES_128_KEY'
if ((arg_chip in ['esp32c3', 'esp32c6']) or (arg_chip in ['esp32h2', 'esp32h2beta1'])):
cmd = cmd.replace('XTS_AES_256_KEY_1', 'XTS_AES_128_KEY')
cmd = cmd.replace('XTS_AES_256_KEY_2', 'XTS_AES_128_KEY')
self.espefuse_py((cmd + ' --no-read-protect --no-write-protect'))
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit', reverse_order=True)
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1', reverse_order=True)
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2', reverse_order=True)
self.espefuse_py(cmd)
output = self.espefuse_py('summary -d')
assert ('[4 ] read_regs: ' in output)
assert ('[5 ] read_regs: ' in output)
assert ('[6 ] read_regs: ' in output)
self.espefuse_py(f'burn_key BLOCK_KEY3 {IMAGES_DIR}/256bit SECURE_BOOT_DIGEST0 BLOCK_KEY4 {IMAGES_DIR}/256bit_1 SECURE_BOOT_DIGEST1 BLOCK_KEY5 {IMAGES_DIR}/256bit_2 SECURE_BOOT_DIGEST2')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2')
.skipif((arg_chip != 'esp32'), reason='3/4 coding scheme is only in esp32')
def test_burn_key_with_34_coding_scheme(self):
self._set_34_coding_scheme()
self.espefuse_py(f'burn_key BLOCK1 {IMAGES_DIR}/256bit', check_msg='A fatal error occurred: Incorrect key file size 32. Key file must be 24 bytes (192 bits) of raw binary key data.', ret_code=2)
self.espefuse_py(f'burn_key BLOCK1 {IMAGES_DIR}/192bit BLOCK2 {IMAGES_DIR}/192bit_1 BLOCK3 {IMAGES_DIR}/192bit_2 --no-protect-key')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/192bit')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/192bit_1')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/192bit_2')
self.espefuse_py(f'burn_key BLOCK1 {IMAGES_DIR}/192bit BLOCK2 {IMAGES_DIR}/192bit_1 BLOCK3 {IMAGES_DIR}/192bit_2')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/192bit')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/192bit_1')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/192bit_2')
.skipif((arg_chip not in ['esp32s2', 'esp32s3', 'esp32p4']), reason='512 bit keys are only supported on ESP32-S2, S3, and P4')
def test_burn_key_512bit(self):
self.espefuse_py(f'burn_key BLOCK_KEY0 {IMAGES_DIR}/256bit_1_256bit_2_combined XTS_AES_256_KEY --no-read-protect --no-write-protect')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1', reverse_order=True)
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2', reverse_order=True)
.skipif((arg_chip not in ['esp32s2', 'esp32s3', 'esp32p4']), reason='512 bit keys are only supported on ESP32-S2, S3, and P4')
def test_burn_key_512bit_non_consecutive_blocks(self):
self.espefuse_py(f'burn_key BLOCK_KEY2 {IMAGES_DIR}/256bit XTS_AES_128_KEY')
self.espefuse_py(f'burn_key BLOCK_KEY4 {IMAGES_DIR}/256bit SECURE_BOOT_DIGEST0')
self.espefuse_py(f'burn_key BLOCK_KEY1 {IMAGES_DIR}/256bit_1_256bit_2_combined XTS_AES_256_KEY --no-read-protect --no-write-protect')
self.espefuse_py(f'burn_key BLOCK_KEY5 {IMAGES_DIR}/256bit USER --no-read-protect --no-write-protect')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1', reverse_order=True)
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2', reverse_order=True)
assert ('[5 ] read_regs: bcbd11bf b8b9babb b4b5b6b7 b0b1b2b3 acadaeaf a8a9aaab a4a5a6a7 11a1a2a3' in output)
assert ('[7 ] read_regs: bcbd22bf b8b9babb b4b5b6b7 b0b1b2b3 acadaeaf a8a9aaab a4a5a6a7 22a1a2a3' in output)
.skipif((arg_chip not in ['esp32s2', 'esp32s3', 'esp32p4']), reason='512 bit keys are only supported on ESP32-S2, S3, and P4')
def test_burn_key_512bit_non_consecutive_blocks_loop_around(self):
self.espefuse_py(f'burn_key BLOCK_KEY2 {IMAGES_DIR}/256bit XTS_AES_128_KEY BLOCK_KEY3 {IMAGES_DIR}/256bit USER BLOCK_KEY4 {IMAGES_DIR}/256bit SECURE_BOOT_DIGEST0 BLOCK_KEY5 {IMAGES_DIR}/256bit SECURE_BOOT_DIGEST1 BLOCK_KEY1 {IMAGES_DIR}/256bit_1_256bit_2_combined XTS_AES_256_KEY --no-read-protect --no-write-protect')
output = self.espefuse_py('summary -d')
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_1', reverse_order=True)
self.check_data_block_in_log(output, f'{IMAGES_DIR}/256bit_2', reverse_order=True)
assert ('[5 ] read_regs: bcbd11bf b8b9babb b4b5b6b7 b0b1b2b3 acadaeaf a8a9aaab a4a5a6a7 11a1a2a3' in output)
assert ('[4 ] read_regs: bcbd22bf b8b9babb b4b5b6b7 b0b1b2b3 acadaeaf a8a9aaab a4a5a6a7 22a1a2a3' in output)
.skipif((arg_chip not in ['esp32h2', 'esp32p4']), reason='These chips support ECDSA_KEY')
def test_burn_key_ecdsa_key(self):
self.espefuse_py(f'burn_key BLOCK_KEY0 {S_IMAGES_DIR}/ecdsa192_secure_boot_signing_key_v2.pem ECDSA_KEY BLOCK_KEY1 {S_IMAGES_DIR}/ecdsa256_secure_boot_signing_key_v2.pem ECDSA_KEY')
output = self.espefuse_py('summary -d')
assert (2 == output.count('= ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? -/-'))
assert ('[4 ] read_regs: ' in output)
assert ('[5 ] read_regs: ' in output)
.skipif((arg_chip not in ['esp32h2', 'esp32p4']), reason='These chips support ECDSA_KEY')
def test_burn_key_ecdsa_key_check_byte_order(self):
self.espefuse_py(f'burn_key BLOCK_KEY0 {S_IMAGES_DIR}/ecdsa192_secure_boot_signing_key_v2.pem ECDSA_KEY BLOCK_KEY1 {S_IMAGES_DIR}/ecdsa256_secure_boot_signing_key_v2.pem ECDSA_KEY --no-read-protect')
output = self.espefuse_py('summary -d')
assert ('= c8 c4 5d 62 9e 05 05 bd cb 04 a4 7c 06 f5 86 14 cb 23 81 23 95 b7 71 4f 00 00 00 00 00 00 00 00 R/-' in output)
assert ('= fc 6b ec 75 64 37 7d 3b 88 8d 34 05 ed 91 06 1b 38 c2 50 84 7a 08 9d c3 66 6a 06 90 23 8b 54 b4 R/-' in output)
assert ('[4 ] read_regs: 625dc4c8 bd05059e 7ca404cb 1486f506 238123cb 4f71b795 ' in output)
assert ('[5 ] read_regs: 75ec6bfc 3b7d3764 05348d88 1b0691ed 8450c238 c39d087a 90066a66 b4548b23' in output) |
class TestsAchromatic(util.ColorAsserts, unittest.TestCase):
def test_achromatic(self):
self.assertEqual(Color('hsl', [270, 0.5, 0]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, 0.5, 1]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, 0, 0.5]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, 1e-06, 0.5]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, 0.5, 0.9999999]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, NaN, 0]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, NaN, 1]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, 0.0, NaN]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, 0.5, NaN]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, NaN, 0.5]).is_achromatic(), True)
self.assertEqual(Color('hsl', [270, NaN, NaN]).is_achromatic(), True) |
def test_pca_bigwig_lieberman_histoneMark_track():
pca1 = NamedTemporaryFile(suffix='.bw', delete=False)
pca2 = NamedTemporaryFile(suffix='.bw', delete=False)
pca1.close()
pca2.close()
matrix = (ROOT + 'small_test_matrix.h5')
extra_track = (ROOT + 'bigwig_chrx_2e6_5e6.bw')
chromosomes = 'chrX '
args = '--matrix {} --outputFileName {} {} -f bigwig --whichEigenvectors 1 2 --method lieberman --extraTrack {} --chromosomes {}'.format(matrix, pca1.name, pca2.name, extra_track, chromosomes).split()
compute(hicPCA.main, args, 5)
chrom_list = ['chrX']
assert are_files_equal_bigwig((ROOT + 'hicPCA/pca1_chip_track.bw'), pca1.name, chrom_list)
assert are_files_equal_bigwig((ROOT + 'hicPCA/pca2_chip_track.bw'), pca2.name, chrom_list)
os.unlink(pca1.name)
os.unlink(pca2.name) |
def test_slice():
s = search.Search()
assert ({'from': 3, 'size': 7} == s[3:10].to_dict())
assert ({'from': 0, 'size': 5} == s[:5].to_dict())
assert ({'from': 3, 'size': 10} == s[3:].to_dict())
assert ({'from': 0, 'size': 0} == s[0:0].to_dict())
assert ({'from': 20, 'size': 0} == s[20:0].to_dict()) |
.parametrize('abi,arguments,data,expected', (pytest.param(ABI_B, [0], None, '0xf0fdf', id='ABI_B, valid int args, no data'), pytest.param(ABI_B, [1], None, '0xf0fdf', id='ABI_B, valid int args, no data'), pytest.param(ABI_C, [1], None, '0xf0fdf', id='ABI_B, valid int args, no data'), pytest.param(ABI_C, [(b'a' + (b'\x00' * 31))], None, '0x9f3fab', id='ABI_C, valid byte args, no data'), pytest.param(ABI_C, [f"0x61{('00' * 31)}"], None, '0x9f3fab', id='ABI_C, valid hex args, no data')))
def test_contract_abi_encoding(w3, abi, arguments, data, expected):
contract = w3.eth.contract(abi=abi)
actual = contract.encodeABI('a', arguments, data=data)
assert (actual == expected) |
class MemberList(MethodView):
form = UserSearchForm
def get(self):
page = request.args.get('page', 1, type=int)
sort_by = request.args.get('sort_by', 'reg_date')
order_by = request.args.get('order_by', 'asc')
if (order_by == 'asc'):
order_func = asc
else:
order_func = desc
if (sort_by == 'reg_date'):
sort_obj = User.id
elif (sort_by == 'post_count'):
sort_obj = User.post_count
else:
sort_obj = User.username
users = User.query.order_by(order_func(sort_obj)).paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template('forum/memberlist.html', users=users, search_form=self.form())
def post(self):
page = request.args.get('page', 1, type=int)
sort_by = request.args.get('sort_by', 'reg_date')
order_by = request.args.get('order_by', 'asc')
if (order_by == 'asc'):
order_func = asc
else:
order_func = desc
if (sort_by == 'reg_date'):
sort_obj = User.id
elif (sort_by == 'post_count'):
sort_obj = User.post_count
else:
sort_obj = User.username
form = self.form()
if form.validate():
users = form.get_results().paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template('forum/memberlist.html', users=users, search_form=form)
users = User.query.order_by(order_func(sort_obj)).paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template('forum/memberlist.html', users=users, search_form=form) |
.usefixtures('database_interfaces')
class TestRestFirmware(RestTestBase):
def test_rest_firmware_existing(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
response = self.test_client.get('/rest/firmware', follow_redirects=True).data.decode()
assert ('uids' in response)
assert (test_firmware.uid in response)
def test_offset_to_empty_response(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
rv = self.test_client.get('/rest/firmware?offset=1', follow_redirects=True)
assert (b'uids' in rv.data)
assert (b'418a54d78550e8584291c96e5df352bfc1d43cf84e81187fef4962_787' not in rv.data)
def test_stable_response_on_bad_paging(self):
rv = self.test_client.get('/rest/firmware?offset=Y', follow_redirects=True)
assert (b'error_message' in rv.data)
assert (b'Malformed' in rv.data)
def test_rest_search_existing(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
query = urllib.parse.quote('{"device_class": "test class"}')
rv = self.test_client.get(f'/rest/firmware?query={query}', follow_redirects=True)
assert (b'uids' in rv.data)
assert (b'418a54d78550e8584291c96e5df352bfc1d43cf84e81187fef4962_787' in rv.data)
def test_rest_search_not_existing(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
query = urllib.parse.quote('{"device_class": "non-existing class"}')
rv = self.test_client.get(f'/rest/firmware?query={query}', follow_redirects=True)
assert (b'"uids": []' in rv.data)
def test_rest_upload_valid(self, monkeypatch):
monkeypatch.setattr('intercom.front_end_binding.InterComFrontEndBinding.get_available_analysis_plugins', (lambda _: ['dummy']))
rv = self.test_client.put('/rest/firmware', json=UPLOAD_DATA, follow_redirects=True)
assert (b'c1f95369a99b765e93c335067e77a7d91af3076d2d3d64aacd04e1e0a810b3ed_17' in rv.data)
assert (b'"status": 0' in rv.data)
def test_upload_unknown_plugin(self, monkeypatch):
monkeypatch.setattr('intercom.front_end_binding.InterComFrontEndBinding.get_available_analysis_plugins', (lambda _: ['plugin_1']))
data = {**UPLOAD_DATA, 'requested_analysis_systems': ['plugin_1', 'plugin_2']}
response = self.test_client.put('/rest/firmware', json=data, follow_redirects=True).json
assert ('error_message' in response)
assert ('The requested analysis plugins are not available' in response['error_message'])
assert ('plugin_2' in response['error_message'])
assert ('plugin_1' not in response['error_message'])
def test_rest_upload_invalid(self):
data = {**UPLOAD_DATA}
data.pop('version')
rv = self.test_client.put('/rest/firmware', json=data, follow_redirects=True)
assert (rv.json['message'] == 'Input payload validation failed')
assert ('version' in rv.json['errors'])
assert ("'version' is a required property" in rv.json['errors']['version'])
def test_rest_download_valid(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
rv = self.test_client.get(f'/rest/firmware/{test_firmware.uid}', follow_redirects=True)
assert (b'file_type' in rv.data)
assert (b'test_type' in rv.data)
assert (b'unpacker' in rv.data)
assert (b'used_unpack_plugin' in rv.data)
def test_rest_download_invalid_uid(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
rv = self.test_client.get('/rest/firmware/invalid%20uid', follow_redirects=True)
assert (b'No firmware with UID invalid uid' in rv.data)
def test_rest_download_invalid_data(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
rv = self.test_client.get('/rest/firmware/', follow_redirects=True)
assert (b'404 Not Found' in rv.data)
.skip(reason='Intercom not running, thus not a single plugin known')
def test_rest_update_analysis_success(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
update = urllib.parse.quote('["printable_strings"]')
rv = self.test_client.put(f'/rest/firmware/{test_firmware.uid}?update={update}', follow_redirects=True)
assert (test_firmware.uid.encode() in rv.data)
assert (b'"status": 0' in rv.data)
def test_rest_update_bad_query(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
rv = self.test_client.put(f'/rest/firmware/{test_firmware.uid}?update=not_a_list', follow_redirects=True)
assert (b'"status": 1' in rv.data)
assert (b'has to be a list' in rv.data)
def test_rest_download_with_summary(self, backend_db):
test_firmware = create_test_firmware(device_class='test class', device_name='test device', vendor='test vendor')
backend_db.add_object(test_firmware)
request_with_summary = self.test_client.get(f'/rest/firmware/{test_firmware.uid}?summary=true', follow_redirects=True)
assert (test_firmware.processed_analysis['dummy']['summary'][0].encode() in request_with_summary.data) |
def solve_tsp_simulated_annealing(distance_matrix: np.ndarray, x0: Optional[List[int]]=None, perturbation_scheme: str='two_opt', alpha: float=0.9, max_processing_time: Optional[float]=None, log_file: Optional[str]=None, verbose: bool=False) -> Tuple[(List, float)]:
(x, fx) = setup_initial_solution(distance_matrix, x0)
temp = _initial_temperature(distance_matrix, x, fx, perturbation_scheme)
max_processing_time = (max_processing_time or inf)
log_file_handler = (open(log_file, 'w', encoding='utf-8') if log_file else None)
n = len(x)
k_inner_min = n
k_inner_max = (MAX_INNER_ITERATIONS_MULTIPLIER * n)
k_noimprovements = 0
tic = default_timer()
stop_early = False
while ((k_noimprovements < MAX_NON_IMPROVEMENTS) and (not stop_early)):
k_accepted = 0
for k in range(k_inner_max):
if ((default_timer() - tic) > max_processing_time):
_print_message(TIME_LIMIT_MSG, verbose, log_file_handler)
stop_early = True
break
xn = _perturbation(x, perturbation_scheme)
fn = compute_permutation_distance(distance_matrix, xn)
if _acceptance_rule(fx, fn, temp):
(x, fx) = (xn, fn)
k_accepted += 1
k_noimprovements = 0
msg = f'Temperature {temp}. Current value: {fx} k: {(k + 1)}/{k_inner_max} k_accepted: {k_accepted}/{k_inner_min} k_noimprovements: {k_noimprovements}'
_print_message(msg, verbose, log_file_handler)
if (k_accepted >= k_inner_min):
break
temp *= alpha
k_noimprovements += (k_accepted == 0)
if log_file_handler:
log_file_handler.close()
return (x, fx) |
('ner.openai.fetch', input_path=('Path to jsonl data to annotate', 'positional', None, Path), output_path=('Path to save the output', 'positional', None, Path), labels=('Labels (comma delimited)', 'positional', None, (lambda s: s.split(','))), lang=('Language to use for tokenizer.', 'option', 'l', str), model=('GPT-3 model to use for completion', 'option', 'm', str), examples_path=('Examples file to help define the task', 'option', 'e', Path), max_examples=('Max examples to include in prompt', 'option', 'n', int), prompt_path=('Path to jinja2 prompt template', 'option', 'p', Path), batch_size=('Batch size to send to OpenAI API', 'option', 'b', int), segment=('Split sentences', 'flag', 'S', bool), verbose=('Print extra information to terminal', 'option', 'flag', bool))
def ner_openai_fetch(input_path: Path, output_path: Path, labels: List[str], lang: str='en', model: str='text-davinci-003', batch_size: int=10, segment: bool=False, examples_path: Optional[Path]=None, prompt_path: Path=DEFAULT_PROMPT_PATH, max_examples: int=2, verbose: bool=False):
(api_key, api_org) = get_api_credentials(model)
examples = read_prompt_examples(examples_path, example_class=NERPromptExample)
nlp = spacy.blank(lang)
if segment:
nlp.add_pipe('sentencizer')
openai = OpenAISuggester(response_parser=make_ner_response_parser(labels=labels, lang=lang), openai_model=model, labels=labels, max_examples=max_examples, prompt_template=load_template(prompt_path), segment=segment, verbose=verbose, openai_api_org=api_org, openai_api_key=api_key, openai_n=1, openai_retry_timeout_s=10, openai_read_timeout_s=20, openai_n_retries=10, prompt_example_class=NERPromptExample)
for eg in examples:
openai.add_example(eg)
stream = list(srsly.read_jsonl(input_path))
stream = openai(tqdm.tqdm(stream), batch_size=batch_size, nlp=nlp)
srsly.write_jsonl(output_path, stream) |
def verify_file(filename, sample, formula):
try:
with open(filename, 'r') as f:
data = f.read()
except Exception as exc:
print_logs('Unable to open {0}: {1}'.format(filename, exc))
return False
basename = os.path.basename(filename)
ext = get_extension(filename)
demo = (sample[ext] if (ext != '') else sample[basename])
if (ext == 'py'):
p = formula.get('py_files')
(data, _) = p.subn('', data, 1)
data = data.splitlines()
if (len(demo) > len(data)):
print_logs('File {0} smaller than sample file({1} < {2})'.format(filename, len(data), len(demo)))
return False
data = data[:len(demo)]
if (demo != data):
print_logs('Header in {} does not match sample file, diff:'.format(filename))
if config.debug_logs:
for line in difflib.unified_diff(demo, data, 'sample', filename, lineterm=''):
print_logs(line)
return False
return True |
def list_gtk_themes():
builtin_themes = [theme[:(- 1)] for theme in Gio.resources_enumerate_children('/org/gtk/libgtk/theme', Gio.ResourceFlags.NONE)]
theme_search_dirs = [(Path(data_dir) / 'themes') for data_dir in GLib.get_system_data_dirs()]
theme_search_dirs.append((Path(GLib.get_user_data_dir()) / 'themes'))
theme_search_dirs.append((Path(GLib.get_home_dir()) / '.themes'))
fs_themes = []
for theme_search_dir in theme_search_dirs:
if (not theme_search_dir.exists()):
continue
for theme_dir in theme_search_dir.iterdir():
if ((not ((theme_dir / 'gtk-3.0') / 'gtk.css').exists()) and (not ((theme_dir / 'gtk-3.0') / 'gtk-dark.css').exists()) and (not ((theme_dir / 'gtk-3.20') / 'gtk.css').exists()) and (not ((theme_dir / 'gtk-3.20') / 'gtk-dark.css').exists())):
continue
fs_themes.append(theme_dir.as_posix().split('/')[(- 1)])
return sorted(set((builtin_themes + fs_themes))) |
def process_file(input_filename, output_filename, variables, die_on_missing_variable, remove_template):
if ((not input_filename) and (not remove_template)):
raise Fatal('--keep-template only makes sense if you specify an input file')
if die_on_missing_variable:
undefined = jinja2.StrictUndefined
else:
undefined = jinja2.Undefined
if (input_filename and (not output_filename)):
if (not input_filename.endswith(EXTENSION)):
raise Fatal(('If no output filename is given, input filename must end in %s' % EXTENSION))
output_filename = input_filename[:(- len(EXTENSION))]
if (not output_filename):
raise Fatal('Output filename is empty')
if input_filename:
output = _render_file(input_filename, variables, undefined)
else:
output = _render_string(stdin_read(), variables, undefined)
if (output_filename and (output_filename != '-')):
with open(output_filename, 'wb') as f:
f.write(output.encode('utf-8'))
else:
stdout_write(output)
if (input_filename and remove_template):
os.unlink(input_filename) |
class OptionSeriesPackedbubbleSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class BlueprintShortTest(TestCase):
def test_blueprint(self):
movie_night = pizza_short.m(quantity=10)()
self.assertEqual(len(movie_night), 10)
self.assertEqual(movie_night[0].chef.first_name, 'Chef 0')
self.assertEqual(movie_night[1].chef.first_name, 'Chef 1')
def test_blueprint_build(self):
movie_night = pizza_short.b(quantity=10)()
self.assertEqual(len(movie_night), 10)
self.assertEqual(movie_night[0].chef.first_name, 'Chef 0')
self.assertEqual(movie_night[1].chef.first_name, 'Chef 1')
self.assertEqual(movie_night[0].thickness, 1)
self.assertEqual(movie_night[1].thickness, 1)
movie_night = pizza_short.b(quantity=10, make_fks=True)()
self.assertEqual(len(movie_night), 10)
self.assertEqual(movie_night[0].chef.first_name, 'Chef 0')
self.assertEqual(movie_night[1].chef.first_name, 'Chef 1')
self.assertEqual(movie_night[0].thickness, 1)
self.assertEqual(movie_night[1].thickness, 1)
def test_blueprint_build_override(self):
movie_night = pizza.b(quantity=10)(thickness=2)
self.assertEqual(len(movie_night), 10)
self.assertEqual(movie_night[0].thickness, 2)
self.assertEqual(movie_night[1].thickness, 2)
movie_night = pizza_short.b(quantity=10)(thickness=2)
self.assertEqual(len(movie_night), 10)
self.assertEqual(movie_night[0].thickness, 2)
self.assertEqual(movie_night[1].thickness, 2) |
_op([BlockCursorA, CustomWindowExprA('block_cursor'), NameA, BoolA])
def stage_mem(proc, block_cursor, win_expr, new_buf_name, accum=False):
(buf_name, w_exprs) = win_expr
(ir, fwd) = scheduling.DoStageMem(block_cursor._impl, buf_name, w_exprs, new_buf_name, use_accum_zero=accum)
return Procedure(ir, _provenance_eq_Procedure=proc, _forward=fwd) |
def plotHistogram(figure, plot_context: 'PlotContext', case_to_data_map, _observation_data):
config = plot_context.plotConfig()
case_list = plot_context.cases()
if (not case_list):
dummy_case_name = 'default'
case_list = [dummy_case_name]
case_to_data_map = {dummy_case_name: pd.DataFrame()}
case_count = len(case_list)
plot_context.x_axis = plot_context.VALUE_AXIS
plot_context.Y_axis = plot_context.COUNT_AXIS
if (config.xLabel() is None):
config.setXLabel('Value')
if (config.yLabel() is None):
config.setYLabel('Count')
use_log_scale = plot_context.log_scale
data = {}
minimum = None
maximum = None
categories = set()
max_element_count = 0
categorical = False
for (case, datas) in case_to_data_map.items():
if datas.empty:
data[case] = pd.Series(dtype='float64')
continue
data[case] = datas[0]
if (data[case].dtype == 'object'):
try:
data[case] = pd.to_numeric(data[case], errors='ignore')
except AttributeError:
data[case] = data[case].convert_objects(convert_numeric=True)
if (data[case].dtype == 'object'):
categorical = True
if categorical:
categories = categories.union(set(data[case].unique()))
else:
current_min = data[case].min()
current_max = data[case].max()
minimum = (current_min if (minimum is None) else min(minimum, current_min))
maximum = (current_max if (maximum is None) else max(maximum, current_max))
max_element_count = max(max_element_count, len(data[case].index))
categories = sorted(categories)
bin_count = int(ceil(sqrt(max_element_count)))
axes = {}
for (index, case) in enumerate(case_list):
axes[case] = figure.add_subplot(case_count, 1, (index + 1))
axes[case].set_title(f'{config.title()} ({case})')
if use_log_scale:
axes[case].set_xscale('log')
if (not data[case].empty):
if categorical:
_plotCategoricalHistogram(axes[case], config, data[case], case, categories)
else:
_plotHistogram(axes[case], config, data[case], case, bin_count, use_log_scale, minimum, maximum)
config.nextColor()
PlotTools.showGrid(axes[case], plot_context)
min_count = 0
max_count = max((subplot.get_ylim()[1] for subplot in axes.values()))
custom_limits = plot_context.plotConfig().limits
if (custom_limits.count_maximum is not None):
max_count = custom_limits.count_maximum
if (custom_limits.count_minimum is not None):
min_count = custom_limits.count_minimum
for subplot in axes.values():
subplot.set_ylim(min_count, max_count)
subplot.set_xlim(custom_limits.value_minimum, custom_limits.value_maximum) |
def by_time(t: float, after: bool=True) -> FilterFunc:
def func(events: List[Event]) -> List[Event]:
if (not (events == sorted(events, key=(lambda event: event.time)))):
raise ValueError('Event lists must be chronological')
new: List[Event] = []
for event in events:
if (after and (event.time > t)):
new.append(event)
elif ((not after) and (event.time < t)):
new.append(event)
return new
return func |
def extractNekosandnekosBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def strip_additional_properties(version: Version, api_contents: dict) -> dict:
stripped = {}
target_schema = get_schema_file(version, api_contents['type'])
for (field, field_schema) in target_schema['properties'].items():
if (field in api_contents):
stripped[field] = api_contents[field]
jsonschema.validate(stripped, target_schema)
return stripped |
def has_permissions(token_data: Dict[(str, Any)], client: ClientDetail, endpoint_scopes: SecurityScopes) -> bool:
has_direct_scope: bool = _has_direct_scopes(token_data=token_data, client=client, endpoint_scopes=endpoint_scopes)
has_role: bool = _has_scope_via_role(token_data=token_data, client=client, endpoint_scopes=endpoint_scopes)
return (has_direct_scope or has_role) |
class TotalNetSensor(BaseSensor):
name = 'totalnet'
desc = _('Total Network activity.')
def get_value(self, sensor_data):
return self._fetch_net()
def _fetch_net(self):
current = [0, 0]
for (_, iostat) in list(ps.net_io_counters(pernic=True).items()):
current[0] += iostat.bytes_recv
current[1] += iostat.bytes_sent
mgr = SensorManager()
current[0] /= mgr.get_interval()
current[1] /= mgr.get_interval()
return ' {:>9s}'.format(bytes_to_human((current[0] + current[1]))) |
.skip(reason='Table logging is now disabled')
.django_db()
def test_drf_tracking_logging(client):
endpoint_ping = client.get('/api/v1/awards/?page=1&limit=10')
assert (endpoint_ping.status_code == status.HTTP_200_OK)
assert (APIRequestLog.objects.count() == 1)
assert (APIRequestLog.objects.first().path == '/api/v1/awards/')
queryparams = eval(APIRequestLog.objects.first().query_params)
assert (queryparams['page'] == '1')
assert (queryparams['limit'] == '10') |
class RDepPriority(Digraph.Node):
depends_on = ['RDepDependsOn', 'RDepNoDirectedCircles', 'RDepOneComponent', 'RDepSolvedBy', 'RDepMasterNodes']
def __init__(self, config):
Digraph.Node.__init__(self, 'RDepPriority')
self.config = config
def get_type_set():
return set([InputModuleTypes.reqdeps])
def rewrite(reqset):
tracer.debug('Called.')
def handle_priorization(node, inc_weight):
tracer.debug('Node [%s] inc_weight [%4.3f]', node.get_id(), inc_weight)
weight = (inc_weight * node.get_value('Factor'))
if ((not node.is_value_available('Priority')) or (node.get_value('Priority') < weight)):
tracer.debug('Node [%s] set priority to [%4.3f]', node.get_id(), weight)
node.set_value('Priority', weight)
for out_node in node.outgoing:
tracer.debug('Recursive call to node [%s] with weight [%4.3f]', out_node.get_id(), weight)
handle_priorization(out_node, weight)
for req in reqset.get_master_nodes():
handle_priorization(req, 1.0) |
_renderer(wrap_type=ScoreDistribution)
class ScoreDistributionRenderer(MetricRenderer):
def render_html(self, obj: ScoreDistribution) -> List[BaseWidgetInfo]:
metric_result = obj.get_result()
distr_fig = plot_4_distr(curr_1=HistogramData.from_distribution(metric_result.current_top_k_distr), curr_2=HistogramData.from_distribution(metric_result.current_other_distr), ref_1=HistogramData.from_distribution(metric_result.reference_top_k_distr), ref_2=HistogramData.from_distribution(metric_result.reference_other_distr), name_1='top_k', name_2='other', xaxis_name='scores', color_2='secondary')
counters = [CounterData.float(label='current score entropy (top k)', value=metric_result.current_entropy, precision=4)]
if (metric_result.reference_entropy is not None):
counters.append(CounterData.float(label='reference score entropy (top k)', value=metric_result.reference_entropy, precision=4))
return [header_text(label='Score Distribution'), counter(counters=counters), plotly_figure(title='', figure=distr_fig)] |
class SignalMethodTask(ITask):
task_id: str = None
workflow_instance: object = None
signal_name: str = None
signal_input: List = None
exception_thrown: BaseException = None
ret_value: object = None
def start(self):
logger.debug(f'[signal-task-{self.task_id}-{self.signal_name}] Created')
self.task = asyncio.get_event_loop().create_task(self.signal_main())
async def signal_main(self):
logger.debug(f'[signal-task-{self.task_id}-{self.signal_name}] Running')
current_task.set(self)
if (not (self.signal_name in self.workflow_instance._signal_methods)):
self.status = Status.DONE
self.exception_thrown = SignalNotFound(self.signal_name)
logger.error(f'Signal not found: {self.signal_name}')
return
signal_proc = self.workflow_instance._signal_methods[self.signal_name]
self.status = Status.RUNNING
try:
logger.info(f'Invoking signal {self.signal_name}({str(self.signal_input)[1:(- 1)]})')
self.ret_value = (await signal_proc(self.workflow_instance, *self.signal_input))
logger.info(f'Signal {self.signal_name}({str(self.signal_input)[1:(- 1)]}) returned {self.ret_value}')
self.decider.complete_signal_execution(self)
except CancelledError:
logger.debug('Coroutine cancelled (expected)')
except Exception as ex:
logger.error(f'Signal {self.signal_name}({str(self.signal_input)[1:(- 1)]}) failed', exc_info=1)
self.exception_thrown = ex
finally:
self.status = Status.DONE |
class OptionPlotoptionsSeriesSonificationContexttracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Tuples():
def __init__(self, t1: Tuple[(float, float)], t2=(1, 2, 3), t3: Tuple[(float, ...)]=(0.1, 0.2, 0.3)):
self.t1 = t1
self.t2 = t2
self.t3 = t3
def __eq__(self, other):
return (isinstance(other, type(self)) and (self.t1 == other.t1) and (self.t2 == other.t2) and (self.t3 == other.t3)) |
class FiniteElement(object):
def __init__(self, cell, degree, nodes, entity_nodes=None):
self.cell = cell
self.degree = degree
self.nodes = nodes
self.entity_nodes = entity_nodes
if entity_nodes:
self.nodes_per_entity = np.array([len(entity_nodes[d][0]) for d in range((cell.dim + 1))])
raise NotImplementedError
self.node_count = nodes.shape[0]
def tabulate(self, points, grad=False):
raise NotImplementedError
def interpolate(self, fn):
raise NotImplementedError
def __repr__(self):
return ('%s(%s, %s)' % (self.__class__.__name__, self.cell, self.degree)) |
def fetch_production_capacity(zone_key: ZoneKey, target_datetime: datetime, session: Session) -> (dict[(str, Any)] | None):
geo_limit = ZONE_KEY_TO_GEO_LIMIT[zone_key]
geo_ids = GEO_LIMIT_TO_GEO_IDS[geo_limit]
url = '
params = {'start_date': target_datetime.strftime('%Y-01-01T00:00'), 'end_date': target_datetime.strftime('%Y-12-31T23:59'), 'time_trunc': 'year', 'geo_trunc': 'electric_system', 'geo_limit': geo_limit, 'geo_ids': geo_ids, 'tecno_select': 'all'}
r: Response = session.get(url, params=params)
if (r.status_code == 200):
data = r.json()['included']
capacity = {}
for item in data:
value: float = round(item['attributes']['total'], 0)
if (item['type'] in MODE_MAPPING):
mode = MODE_MAPPING[item['type']]
if (mode in capacity):
capacity[mode]['value'] += value
else:
mode_capacity = {'datetime': target_datetime.strftime('%Y-%m-%d'), 'value': value, 'source': 'ree.es'}
capacity[mode] = mode_capacity
logger.info(f'''Fetched capacity for {zone_key} on {target_datetime.date()}:
{capacity}''')
return capacity
else:
logger.warning(f'{zone_key}: No capacity data available for year {target_datetime.year}') |
_tag
def show_article_icon(article, state):
correspond = {'digg': ['digg_count', 'fa-thumbs-up'], 'look': ['look_count', 'fa-eye'], 'collects': ['collects_count', 'fa-star'], 'comment': ['comment_count', 'fa-comment']}
key = 'look'
if state:
word = re.search('[-](.*?)_.*?', state)
key = word.group(1)
attr = correspond.get(key, ['look_count', 'fa-eye'])
number = getattr(article, attr[0])
return mark_safe(f'''
<i class="fa {attr[1]}"></i>
{number}
''') |
def split_on_groups(all_args):
groups = []
cmd = []
used_cmds = []
for item in all_args:
if (item in SUPPORTED_COMMANDS):
used_cmds.append(item)
if (cmd != []):
groups.append(cmd)
cmd = []
cmd.append(item)
if cmd:
groups.append(cmd)
return (groups, used_cmds) |
def _matrix_repr(M):
item = M[(0, 0)]
if (type(item) is flint.fmpz):
return f'fmpz_mat({M.nrows()}, {M.ncols()}, {M.entries()!r})'
elif (type(item) is flint.fmpq):
return f'fmpq_mat({M.nrows()}, {M.ncols()}, {M.entries()!r})'
elif (type(item) is flint.nmod):
return f'nmod_mat({M.nrows()}, {M.ncols()}, {M.entries()!r}, {M.modulus()})'
elif (type(item) is flint.fmpz_mod):
return f'fmpz_mod_mat({M.nrows()}, {M.ncols()}, {M.entries()!r}, {M.modulus()})'
else:
assert False |
def test_convert_indirect_edge_to_unconditional(parser):
jmp_instr = MockFixedJump(42)
function = MockFunction([(block := MockBlock(0, [MockEdge(0, 42, BranchType.IndirectBranch)], instructions=[jmp_instr])), MockBlock(42, [])])
assert parser._can_convert_single_outedge_to_unconditional(block)
cfg = parser.parse(function)
assert ([v.name for v in cfg.nodes] == [0, 42])
cfg_edge = cfg.edges[0]
assert ((cfg_edge.source.address, cfg_edge.sink.address) == (0, 42))
assert isinstance(cfg_edge, UnconditionalEdge)
assert (len(list(cfg.instructions)) == 0) |
class NoHealthCheckTest(AmbassadorTest):
def init(self):
self.target = HealthCheckServer()
def config(self) -> Generator[(Union[(str, Tuple[(Node, str)])], None, None)]:
(yield (self, self.format('\n---\napiVersion: getambassador.io/v3alpha1\nkind: Mapping\nname: {self.target.path.k8s}-health\nhostname: "*"\nprefix: /healthcheck/\nservice: {self.target.path.fqdn}\nresolver: endpoint\nload_balancer:\n policy: round_robin\n')))
def queries(self):
(yield Query(self.url('healthcheck/'), phase=1))
(yield Query(self.url('ambassador/v0/diag/'), phase=1))
(yield Query(self.url('healthcheck/makeUnhealthy/'), phase=1))
for _ in range(500):
(yield Query(self.url('healthcheck/'), expected=[200, 500], phase=2))
time.sleep(0.06)
for _ in range(500):
(yield Query(self.url('healthcheck/'), expected=[200, 500], phase=3))
time.sleep(0.06)
def check(self):
valid = 0
errors = 0
for i in range(3, 1003):
if (self.results[i].status == 200):
valid += 1
elif (self.results[i].status == 500):
errors += 1
msg = 'Errors: {}, Valid: {}'.format(errors, valid)
margin = 100
assert (abs((errors - 200)) < margin)
assert (abs((valid - 800)) < margin) |
.django_db
def test_tas_unparsable_too_short(client, monkeypatch, elasticsearch_award_index, subaward_with_tas):
_setup_es(client, monkeypatch, elasticsearch_award_index)
resp = query_by_tas_subaward(client, {'require': [['011', '011-0990', '3-4-2']]})
assert (resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY), 'Failed to return 422 Response' |
class OptionSeriesTilemapStatesHover(Options):
def animation(self) -> 'OptionSeriesTilemapStatesHoverAnimation':
return self._config_sub_data('animation', OptionSeriesTilemapStatesHoverAnimation)
def brightness(self):
return self._config_get(0.2)
def brightness(self, num: float):
self._config(num, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def halo(self) -> 'OptionSeriesTilemapStatesHoverHalo':
return self._config_sub_data('halo', OptionSeriesTilemapStatesHoverHalo)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(1)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def marker(self) -> 'OptionSeriesTilemapStatesHoverMarker':
return self._config_sub_data('marker', OptionSeriesTilemapStatesHoverMarker) |
def extractBabelfishoutofwaterWordpressCom(item):
badtags = ['goodreads', 'readathon', 'Writing', 'Blog']
if any([(tmp in item['tags']) for tmp in badtags]):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
_member_required
def set_document_thumbnail(request, uuid, metadata_uuid=None):
uuid = UUID(uuid)
if metadata_uuid:
metadata_uuid = UUID(metadata_uuid)
try:
doc = Document.objects.all().get(uuid=uuid)
except Document.DoesNotExist:
raise Http404('Document with this uuid does not exist.')
if (metadata_uuid is not None):
try:
m = BinaryMetadata.objects.get(pk=metadata_uuid)
except BinaryMetadata.DoesNotExist:
raise Http404('Binary metadata with this uuid does not exist')
created = doc.create_thumbnail(binary_metadata_uuid=metadata_uuid, force=True)
if created:
doc.set_last_modified()
messages.add_message(request, messages.SUCCESS, f'Thumbnail created.')
else:
messages.add_message(request, messages.WARNING, f'Thumbnail could not be created.')
return redirect(doc) |
class ValveTestMultipleOrderedTunnel(ValveTestBases.ValveTestTunnel):
TUNNEL_ID = 2
CONFIG = "\nacls:\n tunnel_acl:\n - rule:\n dl_type: 0x0800\n ip_proto: 1\n actions:\n output:\n - tunnel: {dp: s2, port: 1}\nvlans:\n vlan100:\n vid: 1\ndps:\n s1:\n dp_id: 0x1\n hardware: 'GenericTFM'\n stack:\n priority: 1\n interfaces:\n 1:\n native_vlan: vlan100\n acls_in: [tunnel_acl]\n 2:\n native_vlan: vlan100\n acls_in: [tunnel_acl]\n 3:\n stack: {dp: s2, port: 3}\n 4:\n stack: {dp: s2, port: 4}\n s2:\n dp_id: 0x2\n hardware: 'GenericTFM'\n interfaces:\n 1:\n native_vlan: vlan100\n 3:\n stack: {dp: s1, port: 3}\n 4:\n stack: {dp: s1, port: 4}\n"
def test_tunnel_update_multiple_tunnels(self):
valve = self.valves_manager.valves[1]
port = valve.dp.ports[3]
self.apply_ofmsgs(valve.stack_manager.add_tunnel_acls())
self.validate_tunnel(self.DP_ID, self.DP_ID, 1, 0, 3, self.TUNNEL_ID, True, 'Did not encapsulate and forward')
self.validate_tunnel(self.DP_ID, self.DP_ID, 2, 0, 3, self.TUNNEL_ID, True, 'Did not encapsulate and forward')
self.set_port_down(port.number)
ofmsgs = valve.stack_manager.add_tunnel_acls()
self.assertTrue(ofmsgs, 'No tunnel ofmsgs returned after a topology change')
self.apply_ofmsgs(ofmsgs)
self.validate_tunnel(self.DP_ID, self.DP_ID, 1, 0, 4, self.TUNNEL_ID, True, 'Did not encapsulate and forward out re-calculated port')
self.validate_tunnel(self.DP_ID, self.DP_ID, 1, 0, 4, self.TUNNEL_ID, True, 'Did not encapsulate and forward out re-calculated port') |
class Const(Field):
errors = {'only_null': 'Must be null.', 'const': "Must be the value '{const}'."}
def __init__(self, const: typing.Any, **kwargs: typing.Any):
assert ('allow_null' not in kwargs)
super().__init__(**kwargs)
self.const = const
def validate(self, value: typing.Any) -> typing.Any:
if (value != self.const):
if (self.const is None):
raise self.validation_error('only_null')
raise self.validation_error('const')
return value |
def update_address_links(address, method):
if ('Healthcare' not in frappe.get_active_domains()):
return
patient_links = list(filter((lambda link: (link.get('link_doctype') == 'Patient')), address.links))
for link in patient_links:
customer = frappe.db.get_value('Patient', link.get('link_name'), 'customer')
if (customer and (not address.has_link('Customer', customer))):
address.append('links', dict(link_doctype='Customer', link_name=customer)) |
class Migration(migrations.Migration):
dependencies = [('extra_settings', '0001_initial')]
operations = [migrations.AddField(model_name='setting', name='value_duration', field=models.DurationField(blank=True, null=True, verbose_name='Value')), migrations.AlterField(model_name='setting', name='value_type', field=models.CharField(choices=[('bool', 'bool'), ('date', 'date'), ('datetime', 'datetime'), ('decimal', 'decimal'), ('duration', 'duration'), ('email', 'email'), ('file', 'file'), ('float', 'float'), ('image', 'image'), ('int', 'int'), ('string', 'string'), ('text', 'text'), ('time', 'time'), ('url', 'url')], max_length=20, verbose_name='Type'))] |
class OptionSeriesPyramidDataDragdropDraghandle(Options):
def className(self):
return self._config_get('highcharts-drag-handle')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#fff')
def color(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('rgba(0, 0, 0, 0.6)')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(901)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def add_background(youtube_uri, filename, citation, position):
regex = re.compile('(?:\\/|%3D|v=|vi=)([0-9A-z\\-_]{11})(?:[%#?&]|$)').search(youtube_uri)
if (not regex):
flash('YouTube URI is invalid!', 'error')
return
youtube_uri = f'
if ((position == '') or (position == 'center')):
position = 'center'
elif position.isdecimal():
position = int(position)
else:
flash('Position is invalid! It can be "center" or decimal number.', 'error')
return
regex = re.compile('^([a-zA-Z0-9\\s_-]{1,100})$').match(filename)
if (not regex):
flash('Filename is invalid!', 'error')
return
filename = filename.replace(' ', '_')
with open('utils/backgrounds.json', 'r', encoding='utf-8') as backgrounds:
data = json.load(backgrounds)
if (filename in list(data.keys())):
flash('Background video with this name already exist!', 'error')
return
if (youtube_uri in [data[i][0] for i in list(data.keys())]):
flash('Background video with this YouTube URI is already added!', 'error')
return
with open('utils/backgrounds.json', 'r+', encoding='utf-8') as backgrounds:
data = json.load(backgrounds)
data[filename] = [youtube_uri, (filename + '.mp4'), citation, position]
backgrounds.seek(0)
json.dump(data, backgrounds, ensure_ascii=False, indent=4)
config = tomlkit.loads(Path('utils/.config.template.toml').read_text())
config['settings']['background']['background_choice']['options'].append(filename)
with Path('utils/.config.template.toml').open('w') as toml_file:
toml_file.write(tomlkit.dumps(config))
flash(f'Added "{citation}-{filename}.mp4" as a new background video!')
return |
('/emailable-report/export', strict_slashes=False)
('/allure-docker-service/emailable-report/export', strict_slashes=False)
_required
def emailable_report_export_endpoint():
try:
project_id = resolve_project(request.args.get('project_id'))
if (is_existent_project(project_id) is False):
body = {'meta_data': {'message': "project_id '{}' not found".format(project_id)}}
resp = jsonify(body)
resp.status_code = 404
return resp
check_process(GENERATE_REPORT_PROCESS, project_id)
project_path = get_project_path(project_id)
emailable_report_path = '{}/reports/{}'.format(project_path, EMAILABLE_REPORT_FILE_NAME)
report = send_file(emailable_report_path, as_attachment=True)
except Exception as ex:
message = str(ex)
body = {'meta_data': {'message': message}}
resp = jsonify(body)
resp.status_code = 400
return resp
else:
return report |
class CmdTelescope(Cmd):
keywords = ['telescope', 'tel']
description = 'Display a specified region in the memory and follow pointers to valid addresses.'
parser = argparse.ArgumentParser(prog=keywords[0], description=description, epilog=('Aliases: ' + ', '.join(keywords)))
parser.add_argument('--length', '-l', type=auto_int, default=64, help='Length of the telescope dump (default: %(default)s).')
parser.add_argument('address', type=auto_int, help='Start address of the telescope dump.')
def telescope(self, data, depth):
val = u32(data[0:4])
if (val == 0):
return [val, '']
if ((depth > 0) and self.isAddressInSections(val, 32)):
newdata = self.readMem(val, 32)
recursive_result = self.telescope(newdata, (depth - 1))
recursive_result.insert(0, val)
return recursive_result
else:
s = ''
for c in data:
if isprint(c):
s += c
else:
break
return [val, s]
def work(self):
args = self.getArgs()
if (args == None):
return True
if (not self.isAddressInSections(args.address, args.length)):
answer = yesno(('Warning: Address 0x%08x (len=0x%x) is not inside a valid section. Continue?' % (args.address, args.length)))
if (not answer):
return False
dump = self.readMem(args.address, (args.length + 4))
if (dump == None):
return False
for index in range(0, (len(dump) - 4), 4):
chain = self.telescope(dump[index:], 4)
output = ('0x%08x: ' % (args.address + index))
output += ' -> '.join([('0x%08x' % x) for x in chain[:(- 1)]])
output += ((' "' + chain[(- 1)]) + '"')
log.info(output)
return True |
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lojadelivros.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc
execute_from_command_line(sys.argv) |
def test_that_segment_defaults_are_applied(tmpdir):
with tmpdir.as_cwd():
with open('config.ert', 'w', encoding='utf-8') as fh:
fh.writelines(dedent('\n NUM_REALIZATIONS 2\n\n ECLBASE ECLIPSE_CASE\n REFCASE ECLIPSE_CASE\n OBS_CONFIG observations\n '))
with open('observations', 'w', encoding='utf-8') as fo:
fo.writelines(dedent('\n HISTORY_OBSERVATION FOPR\n {\n SEGMENT SEG\n {\n START = 5;\n STOP = 9;\n ERROR = 0.05;\n };\n };\n '))
run_sim(datetime(2014, 9, 10), [('FOPR', 'SM3/DAY', None), ('FOPRH', 'SM3/DAY', None)], days=range(10))
observations = ErtConfig.from_file('config.ert').enkf_obs
for i in range(1, 5):
assert (observations['FOPR'].observations[(datetime(2014, 9, 11) + timedelta(days=i))].std == 0.1)
for i in range(5, 9):
assert (observations['FOPR'].observations[(datetime(2014, 9, 11) + timedelta(days=i))].std == 0.1) |
def extractTwomorefreethoughtsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def extractGracegracy118BlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesHeatmapSonificationTracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class HttpDialogues(BaseHttpDialogues):
def __init__(self, self_address: Address, **kwargs: Any) -> None:
def role_from_first_message(message: Message, receiver_address: Address) -> BaseDialogue.Role:
return HttpDialogue.Role.CLIENT
BaseHttpDialogues.__init__(self, self_address=self_address, role_from_first_message=role_from_first_message, **kwargs) |
class RetryExceptionWrapperInterceptor(grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor):
def __init__(self, max_retries: int=3):
self._max_retries = 3
def _raise_if_exc(request: typing.Any, e: Union[(grpc.Call, grpc.Future)]):
if isinstance(e, grpc.RpcError):
if (e.code() == grpc.StatusCode.UNAUTHENTICATED):
raise FlyteAuthenticationException() from e
elif (e.code() == grpc.StatusCode.ALREADY_EXISTS):
raise FlyteEntityAlreadyExistsException() from e
elif (e.code() == grpc.StatusCode.NOT_FOUND):
raise FlyteEntityNotExistException() from e
elif (e.code() == grpc.StatusCode.INVALID_ARGUMENT):
raise FlyteInvalidInputException(request) from e
raise FlyteSystemException() from e
def intercept_unary_unary(self, continuation, client_call_details, request):
retries = 0
while True:
fut: grpc.Future = continuation(client_call_details, request)
e = fut.exception()
try:
if e:
self._raise_if_exc(request, e)
return fut
except FlyteException as e:
if (retries == self._max_retries):
raise e
retries = (retries + 1)
def intercept_unary_stream(self, continuation, client_call_details, request):
c: grpc.Call = continuation(client_call_details, request)
return c |
def update_tile_conn(tileconn, key_history, wirename1, wire1, wirename2, wire2, tiles):
tile1 = tiles[wire1['tile']]
tile2 = tiles[wire2['tile']]
if ((wire1['type'], wire1['shortname'], tile1['grid_x'], tile1['grid_y']) > (wire2['type'], wire2['shortname'], tile2['grid_x'], tile2['grid_y'])):
(wire1, tile1, wire2, tile2) = (wire2, tile2, wire1, tile1)
tileconn.append({'grid_deltas': [(tile2['grid_x'] - tile1['grid_x']), (tile2['grid_y'] - tile1['grid_y'])], 'tile_types': [tile1['type'], tile2['type']], 'wire_pair': [wire1['shortname'], wire2['shortname']]}) |
def mark_as_notified_on_before(sha256, notification_date):
if (not is_notified_on_before(sha256)):
try:
database_connection.execute('INSERT INTO seen_sha256_hashes (sha256, notification_date) values (?, ?)', [str(sha256), int(notification_date)])
except Exception as e:
print(('[!] Error with storing the hash in the SQLite3 database: ' + str(e)))
sys.exit()
finally:
database_connection.commit() |
def migrate(config_file, current_version, target_version, out=print, i=input):
logger = logging.getLogger(__name__)
if (current_version == target_version):
logger.info('Config file is already at version [%s]. Skipping migration.', target_version)
return
if (current_version < Config.EARLIEST_SUPPORTED_VERSION):
raise exceptions.ConfigError(f'The config file in {config_file.location} is too old. Please delete it and reconfigure Rally from scratch with {PROGRAM_NAME} configure.')
logger.info('Upgrading configuration from version [%s] to [%s].', current_version, target_version)
if (current_version >= target_version):
raise exceptions.ConfigError(f'The existing config file is available in a later version already. Expected version <= [{target_version}] but found [{current_version}]')
config_file.backup()
config = config_file.load()
config_file.store(config)
logger.info('Successfully self-upgraded configuration to version [%s]', target_version) |
class DNSClientProtocolTCP(DNSClientProtocol):
def __init__(self, dnsq, fut, clientip, logger=None):
super().__init__(dnsq, fut, clientip, logger=logger)
self.buffer = bytes()
def connection_made(self, transport):
self.send_helper(transport)
msg = self.dnsq.to_wire()
tcpmsg = (struct.pack('!H', len(msg)) + msg)
self.transport.write(tcpmsg)
def data_received(self, data):
self.buffer = utils.handle_dns_tcp_data((self.buffer + data), self.receive_helper)
def eof_received(self):
if (len(self.buffer) > 0):
self.logger.debug('Discard incomplete message')
self.transport.close() |
def update_marks_from_args(nodes, marks, tree, args):
if args.mark:
if (args.mark_leaves or args.mark_internals):
exit('ERROR: incompatible marking options')
for group in args.mark:
marks.append([])
nodes.append([])
group = group.replace(',,,', ';;;')
group = group.replace(',,', ';;')
group = group.replace(',', ';')
for (mark, pregroup) in enumerate(group.split(';'), 1):
for subgroup in pregroup.split('='):
if (';;' in subgroup):
(node1, node2) = subgroup.split((';;;' if (';;;' in subgroup) else ';;'))
node1 = get_node(tree, node1)
node2 = get_node(tree, node2)
anc = tree.common_ancestor([node1, node2])
if (';;;' in subgroup):
for node in anc.traverse():
marks[(- 1)].append(('#' + str(mark)))
nodes[(- 1)].append(node.node_id)
elif (';;' in subgroup):
marks[(- 1)].append(('#' + str(mark)))
nodes[(- 1)].append(anc.node_id)
else:
node = get_node(tree, subgroup)
marks[(- 1)].append(('#' + str(mark)))
nodes[(- 1)].append(node.node_id)
if args.mark_leaves:
if args.mark:
exit('ERROR: incompatible marking options')
marks.extend([['#1'] for n in tree.leaves()])
nodes.extend([[n.node_id] for n in tree.leaves()])
if args.mark_internals:
if args.mark:
exit('ERROR: incompatible marking options')
marks.extend([['#1' for _ in n.descendants()] for n in tree.descendants() if (not n.is_leaf)])
nodes.extend([[n2.node_id for n2 in n.descendants()] for n in tree.descendants() if (not n.is_leaf)])
remove_duplicated_marks(nodes, marks, tree)
if args.mark_gui:
for (node, mark) in zip(nodes, marks):
tree.mark_tree(node, marks=mark)
interactive_mark(tree, mode='check')
while (not False):
(subnodes, submarks) = interactive_mark(tree, mode=('last' if marks else 'new'))
if (not submarks):
break
marks.append(submarks)
nodes.append(subnodes)
remove_duplicated_marks(nodes, marks, tree) |
def make_othervar_node(name, blk):
binary = blk.binary
if (name in binary.othervar_nodes):
othervar_node = binary.othervar_nodes[name]
else:
othervar_node = OtherVarNode(binary=binary, name=name)
binary.othervar_nodes[name] = othervar_node
return othervar_node |
class OpenAIEmbeddingGenerator(object):
def __init__(self, env_file_path: str):
load_dotenv(dotenv_path=env_file_path)
self.OPENAI_API_KEY = os.getenv('OPENAI_API_KEY', '')
if (not self.OPENAI_API_KEY):
raise MissingEnvironmentVariableError('OPENAI_API_KEY')
self.OPENAI_GPT_MODEL = os.getenv('OPENAI_GPT_MODEL', '')
if (not self.OPENAI_GPT_MODEL):
raise MissingEnvironmentVariableError('OPENAI_GPT_MODEL')
def get_openai_api_key(self):
return self.OPENAI_API_KEY
def get_openai_gpt_model(self):
return self.OPENAI_GPT_MODEL
def load_text(self, data: List[str]):
if isinstance(data, list):
self.data = data
else:
raise TypeError('Data must be a list')
def count_tokens(self):
model = self.get_openai_gpt_model()
encoding = tiktoken.encoding_for_model(model)
self.num_tokens = 0
for d in self.data:
tokens = encoding.encode(d)
self.num_tokens += len(tokens)
return self.num_tokens
def calculate_embeddings_estimated_cost(self):
return ((self.count_tokens() / 1000) * 0.0001)
def calculate_embeddings(self):
EMBEDDING_MODEL = 'text-embedding-ada-002'
BATCH_SIZE = 1000
documents = [self.data[i:(i + BATCH_SIZE)] for i in range(0, len(self.data), BATCH_SIZE)]
openai.api_key = self.get_openai_api_key()
embeddings = []
for docs in documents:
response = openai.Embedding.create(model=EMBEDDING_MODEL, input=docs)
for (i, be) in enumerate(response['data']):
assert (i == be['index'])
batch_embeddings = [e['embedding'] for e in response['data']]
embeddings.extend(batch_embeddings)
return embeddings
def embeddings(self):
if (not hasattr(self, '_embeddings')):
self._embeddings = self.calculate_embeddings()
return self._embeddings
def generate_embedding(self, text: str):
EMBEDDING_MODEL = 'text-embedding-ada-002'
openai.api_key = self.get_openai_api_key()
response = openai.Embedding.create(model=EMBEDDING_MODEL, input=[text])
embedding = response['data'][0]['embedding']
return embedding
def load_embeddings_from_csv(self, embeddings_path: str, columns: List, **kwargs):
data = pd.read_csv(embeddings_path, **kwargs)
for col in columns:
data[col] = data[col].apply(literal_eval)
return data |
class fileParser():
def __init__(self, topFile, coorFile=''):
coorPostfix = os.path.splitext(coorFile)[(- 1)]
if ((coorPostfix == '.rst7') or (coorPostfix == '.rst') or (coorPostfix == '.inpcrd')):
coorType = 'inpcrd'
if (coorFile == ''):
self.uObject = MDAnalysis.Universe(topFile)
else:
coorPostfix = os.path.splitext(coorFile)[(- 1)]
if ((coorPostfix == '.rst7') or (coorPostfix == '.rst') or (coorPostfix == '.inpcrd')):
coorType = 'INPCRD'
self.uObject = MDAnalysis.Universe(topFile, coorFile, format=coorType)
else:
self.uObject = MDAnalysis.Universe(topFile, coorFile)
self.uObject.add_TopologyAttr('tempfactors')
self.topPath = topFile
def saveFile(self, selection, targetPath, targetType, saveTop=False, topPath=''):
atoms = self.uObject.select_atoms(selection)
if (len(atoms) == 0):
raise SelectionError('Empty selection!')
atoms.write(targetPath, targetType, bonds=None)
if saveTop:
assert (selection == 'all')
shutil.copyfile(self.topPath, topPath)
def saveNDX(self, selections, names, targetPath, nonHydrogen=True):
assert (len(selections) == len(names))
if (nonHydrogen == True):
HString = 'and not (name H*)'
else:
HString = ''
for (selection, name) in zip(selections, names):
atoms = self.uObject.select_atoms(f'{selection} {HString}')
if (len(atoms) == 0):
raise SelectionError('Empty selection!')
atoms.write(targetPath, 'ndx', name=name, mode='a')
def getResid(self, selection):
atoms = self.uObject.select_atoms(selection)
if (len(atoms) == 0):
raise SelectionError('Empty selection!')
return ','.join([str((num + 1)) for num in atoms.residues.ix])
def measureMinmax(self, selection):
atoms = self.uObject.select_atoms(selection)
if (len(atoms) == 0):
raise SelectionError('Empty selection!')
atomPositions = atoms.positions
xyz_array = np.transpose(atomPositions)
min_x = np.min(xyz_array[0])
max_x = np.max(xyz_array[0])
min_y = np.min(xyz_array[1])
max_y = np.max(xyz_array[1])
min_z = np.min(xyz_array[2])
max_z = np.max(xyz_array[2])
return np.array([[min_x, min_y, min_z], [max_x, max_y, max_z]])
def measureCenter(self, selection):
atoms = self.uObject.select_atoms(selection)
if (len(atoms) == 0):
raise SelectionError('Empty selection!')
atomPositions = atoms.positions
xyz_array = np.transpose(atomPositions)
center_x = np.average(xyz_array[0])
center_y = np.average(xyz_array[1])
center_z = np.average(xyz_array[2])
return np.array([center_x, center_y, center_z])
def measureDistance(self, selection1, selection2):
center1 = self.measureCenter(selection1)
center2 = self.measureCenter(selection2)
return round(np.linalg.norm((center2 - center1)), 1)
def measurePBC(self):
minmaxArray = self.measureMinmax('all')
vec = (minmaxArray[1] - minmaxArray[0])
center = self.measureCenter('all')
return np.array((vec, center))
def measurePolarAngles(self, selectionPro, selectionLig):
vector = (self.measureCenter(selectionLig) - self.measureCenter(selectionPro))
vector /= np.linalg.norm(vector)
return (float(int(math.degrees(np.arccos((- vector[1]))))), float(int(math.degrees(np.arctan2(vector[2], vector[0])))))
def setBeta(self, selection, beta):
atoms = self.uObject.select_atoms(selection)
if (len(atoms) == 0):
raise SelectionError('Empty selection!')
atoms.tempfactors = beta
def moveSystem(self, moveVector):
allAtoms = self.uObject.select_atoms('all')
transformations.translate(moveVector)(allAtoms)
def rotateSystem(self, axis, degrees):
assert ((axis == 'x') or (axis == 'y') or (axis == 'z'))
allAtoms = self.uObject.select_atoms('all')
if (axis == 'x'):
axisVector = (1, 0, 0)
elif (axis == 'y'):
axisVector = (0, 1, 0)
else:
axisVector = (0, 0, 1)
transformations.rotate.rotateby(degrees, axisVector, ag=allAtoms)(allAtoms)
def centerSystem(self):
vec = (self.measurePBC()[1] * (- 1.0))
self.moveSystem(vec) |
def test_decimal_precision_is_greater_than_scale():
'
schema = {'type': 'record', 'name': 'test_scale_is_an_int', 'fields': [{'name': 'field', 'type': {'logicalType': 'decimal', 'precision': 5, 'scale': 10, 'type': 'bytes'}}]}
with pytest.raises(SchemaParseException, match='decimal scale must be less than or equal to'):
parse_schema(schema) |
class Station(object):
__slots__ = ('ID', 'system', 'dbname', 'lsFromStar', 'market', 'blackMarket', 'shipyard', 'maxPadSize', 'outfitting', 'rearm', 'refuel', 'repair', 'planetary', 'fleet', 'odyssey', 'itemCount', 'dataAge')
def __init__(self, ID, system, dbname, lsFromStar, market, blackMarket, shipyard, maxPadSize, outfitting, rearm, refuel, repair, planetary, fleet, odyssey, itemCount=0, dataAge=None):
(self.ID, self.system, self.dbname) = (ID, system, dbname)
self.lsFromStar = int(lsFromStar)
self.market = (market if (itemCount == 0) else 'Y')
self.blackMarket = blackMarket
self.shipyard = shipyard
self.maxPadSize = maxPadSize
self.outfitting = outfitting
self.rearm = rearm
self.refuel = refuel
self.repair = repair
self.planetary = planetary
self.fleet = fleet
self.odyssey = odyssey
self.itemCount = itemCount
self.dataAge = dataAge
system.stations = (system.stations + (self,))
def name(self, detail=0):
return ('%s/%s' % (self.system.dbname, self.dbname))
def checkPadSize(self, maxPadSize):
return ((not maxPadSize) or (self.maxPadSize in maxPadSize))
def checkPlanetary(self, planetary):
return ((not planetary) or (self.planetary in planetary))
def checkFleet(self, fleet):
return ((not fleet) or (self.fleet in fleet))
def checkOdyssey(self, odyssey):
return ((not odyssey) or (self.odyssey in odyssey))
def distFromStar(self, addSuffix=False):
ls = self.lsFromStar
if (not ls):
if addSuffix:
return 'Unk'
else:
return '?'
if (ls < 1000):
suffix = ('ls' if addSuffix else '')
return ('{:n}'.format(ls) + suffix)
if (ls < 10000):
suffix = ('ls' if addSuffix else '')
return ('{:.2f}K'.format((ls / 1000)) + suffix)
if (ls < 1000000):
suffix = ('ls' if addSuffix else '')
return ('{:n}K'.format(int((ls / 1000))) + suffix)
return '{:.2f}ly'.format((ls / (((365 * 24) * 60) * 60)))
def isTrading(self):
return ((self.itemCount > 0) or (self.market == 'Y'))
def itemDataAgeStr(self):
if (self.itemCount and self.dataAge):
return '{:7.2f}'.format(self.dataAge)
return '-'
def str(self):
return ('%s/%s' % (self.system.dbname, self.dbname)) |
.WebInterfaceUnitTestConfig(intercom_mock_class=IntercomMock, database_mock_class=DbMock)
class TestAppShowAnalysis():
def test_app_show_analysis_get_valid_fw(self, test_client):
result = test_client.get(f'/analysis/{TEST_FW.uid}').data
assert ((b'<strong>UID:</strong> ' + make_bytes(TEST_FW.uid)) in result)
assert (b'data-toggle="tooltip" title="mandatory plugin description"' in result)
assert (b'data-toggle="tooltip" title="optional plugin description"' in result)
assert (b'test text' in result), 'general info: file type is missing'
assert (b'1970-01-01' not in result)
assert (b'unknown' in result)
result = test_client.get(f'/analysis/{TEST_FW_2.uid}').data
assert (b'unknown' not in result)
assert (b'2000-01-01' in result)
def test_app_show_analysis_file_with_preview(self, test_client):
result = test_client.get(f'/analysis/{TEST_TEXT_FILE.uid}').data
assert ((b'<strong>UID:</strong> ' + make_bytes(TEST_TEXT_FILE.uid)) in result)
assert (b'Preview' in result)
def test_app_show_analysis_invalid_analysis(self, test_client):
result = test_client.get(f'/analysis/{TEST_FW.uid}/this_analysis_does_not_exist/ro/{TEST_FW.uid}').data
assert (b'Error!' in result)
def test_app_single_file_analysis(self, test_client, intercom_task_list):
result = test_client.get(f'/analysis/{TEST_FW.uid}')
assert (b'Add new analysis' in result.data)
assert (b'Update analysis' in result.data)
assert (not intercom_task_list)
post_new = test_client.post(f'/analysis/{TEST_FW.uid}', content_type='multipart/form-data', data={'analysis_systems': ['plugin_a', 'plugin_b']})
assert (post_new.status_code == HTTPStatus.FOUND)
assert intercom_task_list
assert (intercom_task_list[0].scheduled_analysis == ['plugin_a', 'plugin_b'])
def test_app_failed_analysis(self, test_client):
template = test_client.get(f'/analysis/{FAILED_FO.uid}/failed_analysis').data.decode()
assert ('Failed' in template)
assert ('reason for fail' in template)
assert ('class="table-danger"' in template), 'failed result should be rendered in "danger" style' |
.sphinx(buildername='html', srcdir=os.path.join(SOURCE_DIR, 'include_from_rst'), freshenv=True)
def test_include_from_rst(app, status, warning, get_sphinx_app_doctree):
app.build()
assert ('build succeeded' in status.getvalue())
warnings = warning.getvalue().strip()
assert (warnings == '')
get_sphinx_app_doctree(app, docname='index', regress=True, regress_ext='.xml') |
def test_generate_gpu_scene_with_references_before_generating_gpu_of_references_first(create_test_data, store_local_session, create_pymel, create_maya_env):
data = create_test_data
gen = RepresentationGenerator(version=data['building1_yapi_look_dev_main_v001'])
with pytest.raises(RuntimeError) as cm:
gen.generate_gpu()
assert (str(cm.value) == 'Please generate the GPU Representation of the references first!!!\n{}'.format(data['building1_yapi_model_main_v003'].absolute_full_path)) |
class KeyBindingEditor(Editor):
has_focus = Bool(False)
key = Event()
clear = Event()
def init(self, parent):
self.control = KeyBindingCtrl(self, parent, size=wx.Size(160, 19))
def update_editor(self):
self.control.Refresh()
def _key_changed(self, event):
binding = self.object
key_name = key_event_to_name(event)
cur_binding = binding.owner.key_binding_for(binding, key_name)
if (cur_binding is not None):
result = confirm(parent=self.control, message=f'''{key_name!r} has already been assigned to '{cur_binding.description}'.
Do you wish to continue?''', title='Duplicate Key Definition')
if (result != YES):
return
self.value = key_name
def _clear_changed(self):
self.value = '' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.