hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9cfb1818f6d53d232b4a27bfb9cdc6fede1c9ca7
| 3,470
|
py
|
Python
|
datasets/utils/batch_collator.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 9
|
2022-03-15T17:28:48.000Z
|
2022-03-29T12:32:28.000Z
|
datasets/utils/batch_collator.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T06:03:14.000Z
|
2022-03-29T13:38:29.000Z
|
datasets/utils/batch_collator.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T19:34:06.000Z
|
2022-03-15T19:34:06.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
import re
import collections
from torch._six import string_classes
class BatchCollator(object):
'''
pack dict batch
'''
def __init__(self):
super(BatchCollator,self).__init__()
def __call__(self, batch):
data= {}
size = len(batch)
for key in batch[0]:
l = []
for i in range(size):
l = l + [batch[i][key]]
data[key] = l
return data
# def vis_custom_collate(batch):
# r"""Puts each tensor data field into a tensor with outer dimension batch size
# and Puts list data into list with length batch size"""
# tensors = []
# if len(batch[0]) == 3:
# list_1 = []
# list_2 = []
# elif len(batch[0]) == 2:
# list_1 = []
# for i in range(len(batch)):
# tensors.append(batch[i][0])
# if len(batch[0]) == 3:
# list_1.append(batch[i][1])
# list_2.append(batch[i][2])
# elif len(batch[0]) == 2:
# list_1.append(batch[i][1])
# tensor = torch.stack(tensors, 0)
# if len(batch[0]) == 3:
# return tensor, list_1, list_2
# elif len(batch[0]) == 2:
# return tensor, list_1
# else:
# return tensor
def vis_custom_collate(batch):
r"""Puts each tensor data field into a tensor with outer dimension batch size
and Puts list data into list with length batch size"""
if len(batch[0]) == 3:
tensors = []
list_1 = []
list_2 = []
if len(batch[0]) == 2:
list_1 = []
list_2 = []
elif len(batch[0]) == 1:
list_1 = []
for i in range(len(batch)):
if len(batch[0]) == 3:
tensors.append(batch[i][0])
list_1.append(batch[i][1])
list_2.append(batch[i][2])
elif len(batch[0]) == 2:
list_1.append(batch[i][0])
list_2.append(batch[i][1])
elif len(batch[0]) == 1:
list_1.append(batch[i][0])
if len(batch[0]) == 3:
tensor = torch.stack(tensors, 0)
return tensor, list_1, list_2
elif len(batch[0]) == 2:
return list_1, list_2
elif len(batch[0]) == 1:
return list_1
def eval_custom_collate(batch):
r"""Puts each tensor data field into a tensor with outer dimension batch size
and Puts list data into list with length batch size"""
if len(batch[0]) == 4:
tensors = []
list_1 = []
list_2 = []
list_3 = []
elif len(batch[0]) == 3:
list_1 = []
list_2 = []
list_3 = []
elif len(batch[0]) == 2:
list_1 = []
list_2 = []
elif len(batch[0]) == 1:
list_1 = []
for i in range(len(batch)):
if len(batch[0]) == 4:
tensors.append(batch[i][0])
list_1.append(batch[i][1])
list_2.append(batch[i][2])
list_3.append(batch[i][3])
elif len(batch[0]) == 3:
list_1.append(batch[i][0])
list_2.append(batch[i][1])
list_3.append(batch[i][2])
elif len(batch[0]) == 2:
list_1.append(batch[i][0])
list_2.append(batch[i][1])
elif len(batch[0]) == 1:
list_1.append(batch[i][0])
if len(batch[0]) == 4:
tensor = torch.stack(tensors, 0)
return tensor, list_1, list_2, list_3
elif len(batch[0]) == 3:
return list_1, list_2, list_3
elif len(batch[0]) == 2:
return list_1, list_2
elif len(batch[0]) == 1:
return list_1
| 26.48855
| 83
| 0.535735
| 522
| 3,470
| 3.425287
| 0.124521
| 0.138702
| 0.135906
| 0.123602
| 0.82774
| 0.78915
| 0.771253
| 0.770134
| 0.748322
| 0.714765
| 0
| 0.054077
| 0.307205
| 3,470
| 131
| 84
| 26.48855
| 0.689684
| 0.295965
| 0
| 0.682353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047059
| false
| 0
| 0.058824
| 0
| 0.211765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1433c0b5164e9c828d920d22386a8b1886e756a8
| 44
|
py
|
Python
|
kolab/kotonoha/__init__.py
|
roy029/kolab
|
10a3054da5e7c96c575de1336056eee65368c087
|
[
"MIT"
] | null | null | null |
kolab/kotonoha/__init__.py
|
roy029/kolab
|
10a3054da5e7c96c575de1336056eee65368c087
|
[
"MIT"
] | 1
|
2021-11-14T05:38:27.000Z
|
2021-11-14T05:38:27.000Z
|
kolab/kotonoha/__init__.py
|
roy029/kolab
|
10a3054da5e7c96c575de1336056eee65368c087
|
[
"MIT"
] | 7
|
2020-11-02T13:05:44.000Z
|
2022-01-09T11:06:04.000Z
|
from kolab.kotonoha.kotonoha import Kotonoha
| 44
| 44
| 0.886364
| 6
| 44
| 6.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1438d464ff364b51b9b470dcf117f81f08ef05a2
| 170
|
py
|
Python
|
Multiprocessing/single.py
|
commoncdp2021/Gun-Gaja-Gun
|
95295f4ad97500d424b90c270bba6360f455844a
|
[
"MIT"
] | 171
|
2015-01-20T04:13:35.000Z
|
2022-03-14T17:17:40.000Z
|
Multiprocessing/single.py
|
commoncdp2021/Gun-Gaja-Gun
|
95295f4ad97500d424b90c270bba6360f455844a
|
[
"MIT"
] | 4
|
2017-04-29T20:11:09.000Z
|
2017-05-08T16:49:15.000Z
|
Multiprocessing/single.py
|
commoncdp2021/Gun-Gaja-Gun
|
95295f4ad97500d424b90c270bba6360f455844a
|
[
"MIT"
] | 112
|
2015-04-28T21:08:11.000Z
|
2022-03-16T23:09:25.000Z
|
#!/usr/bin/python
from gen_rand import gen_random_data
if __name__ == '__main__':
gen_random_data()
gen_random_data()
gen_random_data()
gen_random_data()
| 21.25
| 36
| 0.723529
| 25
| 170
| 4.16
| 0.48
| 0.432692
| 0.625
| 0.461538
| 0.5
| 0.5
| 0.5
| 0.5
| 0.5
| 0
| 0
| 0
| 0.170588
| 170
| 8
| 37
| 21.25
| 0.737589
| 0.094118
| 0
| 0.666667
| 0
| 0
| 0.051948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1ae99a5d72be923af2aa8187ae85d0023500663d
| 199
|
py
|
Python
|
spacy_dbpedia_spotlight/util.py
|
oroszgy/spacy-dbpedia-spotlight
|
49c78916d1bf48fc243f4ebc8352748bcbc70596
|
[
"MIT"
] | 51
|
2021-02-14T04:57:46.000Z
|
2022-03-30T08:57:16.000Z
|
spacy_dbpedia_spotlight/util.py
|
oroszgy/spacy-dbpedia-spotlight
|
49c78916d1bf48fc243f4ebc8352748bcbc70596
|
[
"MIT"
] | 7
|
2021-04-08T07:25:19.000Z
|
2022-03-17T16:36:45.000Z
|
spacy_dbpedia_spotlight/util.py
|
oroszgy/spacy-dbpedia-spotlight
|
49c78916d1bf48fc243f4ebc8352748bcbc70596
|
[
"MIT"
] | 8
|
2021-02-14T09:57:28.000Z
|
2022-02-24T14:19:34.000Z
|
try: # Python 3.8
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata # noqa: F401
pkg_meta = importlib_metadata.metadata(__name__.split(".")[0])
| 28.428571
| 62
| 0.758794
| 25
| 199
| 5.72
| 0.68
| 0.475524
| 0.321678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 0.145729
| 199
| 7
| 62
| 28.428571
| 0.805882
| 0.105528
| 0
| 0
| 0
| 0
| 0.005682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1af946057e6ad728350757d9d6f19a1fb8b32d3b
| 3,192
|
py
|
Python
|
colour/utilities/__init__.py
|
jchwei/colour
|
2b2ad0a0f2052a1a0b4b076b489687235e804fdf
|
[
"BSD-3-Clause"
] | null | null | null |
colour/utilities/__init__.py
|
jchwei/colour
|
2b2ad0a0f2052a1a0b4b076b489687235e804fdf
|
[
"BSD-3-Clause"
] | null | null | null |
colour/utilities/__init__.py
|
jchwei/colour
|
2b2ad0a0f2052a1a0b4b076b489687235e804fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .data_structures import Lookup, Structure, CaseInsensitiveMapping
from .common import (
handle_numpy_errors, ignore_numpy_errors, raise_numpy_errors,
print_numpy_errors, warn_numpy_errors, ignore_python_warnings, batch,
disable_multiprocessing, multiprocessing_pool, is_matplotlib_installed,
is_networkx_installed, is_openimageio_installed, is_pandas_installed,
is_iterable, is_string, is_numeric, is_integer, is_sibling, filter_kwargs,
filter_mapping, first_item, get_domain_range_scale, set_domain_range_scale,
domain_range_scale, to_domain_1, to_domain_10, to_domain_100,
to_domain_degrees, to_domain_int, from_range_1, from_range_10,
from_range_100, from_range_degrees, from_range_int)
from .array import (as_array, as_int_array, as_float_array, as_numeric, as_int,
as_float, as_namedtuple, closest_indexes, closest,
normalise_maximum, interval, is_uniform, in_array, tstack,
tsplit, row_as_diagonal, dot_vector, dot_matrix, orient,
centroid, linear_conversion, lerp, fill_nan, ndarray_write)
from .metrics import metric_mse, metric_psnr
from .verbose import (
ColourWarning, ColourUsageWarning, ColourRuntimeWarning, message_box,
show_warning, warning, runtime_warning, usage_warning, filter_warnings,
suppress_warnings, numpy_print_options, ANCILLARY_COLOUR_SCIENCE_PACKAGES,
ANCILLARY_RUNTIME_PACKAGES, ANCILLARY_DEVELOPMENT_PACKAGES,
ANCILLARY_EXTRAS_PACKAGES, describe_environment)
__all__ = ['Lookup', 'Structure', 'CaseInsensitiveMapping']
__all__ += [
'handle_numpy_errors', 'ignore_numpy_errors', 'raise_numpy_errors',
'print_numpy_errors', 'warn_numpy_errors', 'ignore_python_warnings',
'batch', 'disable_multiprocessing', 'multiprocessing_pool',
'is_matplotlib_installed', 'is_networkx_installed',
'is_openimageio_installed', 'is_pandas_installed', 'is_iterable',
'is_string', 'is_numeric', 'is_integer', 'is_sibling', 'filter_kwargs',
'filter_mapping', 'first_item', 'get_domain_range_scale',
'set_domain_range_scale', 'domain_range_scale', 'to_domain_1',
'to_domain_10', 'to_domain_100', 'to_domain_degrees', 'to_domain_int',
'from_range_1', 'from_range_10', 'from_range_100', 'from_range_degrees',
'from_range_int'
]
__all__ += [
'as_array', 'as_int_array', 'as_float_array', 'as_numeric', 'as_int',
'as_float', 'as_namedtuple', 'closest_indexes', 'closest',
'normalise_maximum', 'interval', 'is_uniform', 'in_array', 'tstack',
'tsplit', 'row_as_diagonal', 'dot_vector', 'dot_matrix', 'orient',
'centroid', 'linear_conversion', 'fill_nan', 'lerp', 'ndarray_write'
]
__all__ += ['metric_mse', 'metric_psnr']
__all__ += [
'ColourWarning', 'ColourUsageWarning', 'ColourRuntimeWarning',
'message_box', 'show_warning', 'warning', 'runtime_warning',
'usage_warning', 'filter_warnings', 'suppress_warnings',
'numpy_print_options', 'ANCILLARY_COLOUR_SCIENCE_PACKAGES',
'ANCILLARY_RUNTIME_PACKAGES', 'ANCILLARY_DEVELOPMENT_PACKAGES',
'ANCILLARY_EXTRAS_PACKAGES', 'describe_environment'
]
| 54.101695
| 79
| 0.758145
| 383
| 3,192
| 5.738903
| 0.274151
| 0.050046
| 0.043676
| 0.020928
| 0.862602
| 0.862602
| 0.862602
| 0.862602
| 0.862602
| 0.862602
| 0
| 0.009042
| 0.133772
| 3,192
| 58
| 80
| 55.034483
| 0.785895
| 0.006579
| 0
| 0.055556
| 0
| 0
| 0.355002
| 0.092458
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2162d1e841500e62dfd06303a3a876e6ba9a27a5
| 298
|
py
|
Python
|
src/wai/annotations/core/component/_ProcessorComponent.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/core/component/_ProcessorComponent.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | 3
|
2021-06-30T23:42:47.000Z
|
2022-03-01T03:45:07.000Z
|
src/wai/annotations/core/component/_ProcessorComponent.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC
from ..stream import StreamProcessor, InputElementType, OutputElementType
from ._Component import Component
class ProcessorComponent(
StreamProcessor[InputElementType, OutputElementType],
Component,
ABC
):
"""
Base class for plugin ISPs.
"""
pass
| 18.625
| 73
| 0.731544
| 27
| 298
| 8.037037
| 0.555556
| 0.285714
| 0.442396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201342
| 298
| 15
| 74
| 19.866667
| 0.911765
| 0.090604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.111111
| 0.333333
| 0
| 0.444444
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
0d30eb8acda686db12da3797b3b61a5d034fe73a
| 38
|
py
|
Python
|
web-service/kagan.py
|
ResearcherOne/example-repo
|
894e3c5d5948fbbadcf2651f75cbc928e119093e
|
[
"MIT"
] | null | null | null |
web-service/kagan.py
|
ResearcherOne/example-repo
|
894e3c5d5948fbbadcf2651f75cbc928e119093e
|
[
"MIT"
] | null | null | null |
web-service/kagan.py
|
ResearcherOne/example-repo
|
894e3c5d5948fbbadcf2651f75cbc928e119093e
|
[
"MIT"
] | null | null | null |
print("yeni kagan ozellik çalışıyor")
| 19
| 37
| 0.789474
| 5
| 38
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0d45beee999e9b43d68c430f2c97a8268cc934fa
| 25
|
py
|
Python
|
src/musescore/__init__.py
|
ryanrudes/musescore
|
4e38e1b269cb370c6bb6f3bf964fa12b942b6dd5
|
[
"MIT"
] | null | null | null |
src/musescore/__init__.py
|
ryanrudes/musescore
|
4e38e1b269cb370c6bb6f3bf964fa12b942b6dd5
|
[
"MIT"
] | null | null | null |
src/musescore/__init__.py
|
ryanrudes/musescore
|
4e38e1b269cb370c6bb6f3bf964fa12b942b6dd5
|
[
"MIT"
] | null | null | null |
from .musescore import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b49a3072d8f4af2bc6caae36776692324e75deae
| 14,138
|
py
|
Python
|
tests/test_vector.py
|
cs207FinalProjectGroup/cs207-FinalProject
|
faa78f023df43c13f2ccd4711835c4313f193c9b
|
[
"MIT"
] | null | null | null |
tests/test_vector.py
|
cs207FinalProjectGroup/cs207-FinalProject
|
faa78f023df43c13f2ccd4711835c4313f193c9b
|
[
"MIT"
] | null | null | null |
tests/test_vector.py
|
cs207FinalProjectGroup/cs207-FinalProject
|
faa78f023df43c13f2ccd4711835c4313f193c9b
|
[
"MIT"
] | null | null | null |
import sys
import os
import numpy as np
import pytest
sys.path.append('..')
import autodiff as ad
def test_create_vector():
v = ad.create_vector('v', [1, 2])
assert(v[0].getValue() == 1)
assert(v[1].getValue() == 2)
derivs = ad.get_deriv(v)
assert(np.array_equal(np.array([deriv.get('v1', 0) for deriv in derivs]), np.array([1, 0])))
assert(np.array_equal(np.array([deriv.get('v2', 0) for deriv in derivs]), np.array([0, 1])))
jacobian = ad.get_jacobian(v, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
jacobian = ad.get_jacobian(v, ['v1', 'v2', 'hello'])
assert(np.array_equal(jacobian, np.array([[1, 0, 0], [0, 1, 0]])))
v = ad.create_vector('v', [1, 2], [3, 4])
assert(v[0].getValue() == 1)
assert(v[1].getValue() == 2)
derivs = ad.get_deriv(v)
assert(np.array_equal(np.array([deriv.get('v1', 0) for deriv in derivs]), np.array([3, 0])))
assert(np.array_equal(np.array([deriv.get('v2', 0) for deriv in derivs]), np.array([0, 4])))
jacobian = ad.get_jacobian(v, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[3, 0], [0, 4]])))
jacobian = ad.get_jacobian(v, ['v1', 'v2', 'hello'])
assert(np.array_equal(jacobian, np.array([[3, 0, 0], [0, 4, 0]])))
with pytest.raises(Exception):
v = ad.create_vector('v', [1, 2], [3, 4, 5])
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = np.array([x, y])
assert(np.array_equal(ad.get_value(v), np.array([1, 2])))
jacobian = ad.get_jacobian(v, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[0, 0], [0, 0]])))
jacobian = ad.get_jacobian(v, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = np.array([x, 2 * y])
assert(np.array_equal(ad.get_value(v), np.array([1, 4])))
jacobian = ad.get_jacobian(v, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 2]])))
jacobian = ad.get_jacobian(v, ['y', 'x'])
assert(np.array_equal(jacobian, np.array([[0, 1], [2, 0]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = np.array([x + y, 2 * y])
assert(np.array_equal(ad.get_value(v), np.array([3, 4])))
jacobian = ad.get_jacobian(v, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 1], [0, 2]])))
jacobian = ad.get_jacobian(v, ['y', 'x'])
assert(np.array_equal(jacobian, np.array([[1, 1], [2, 0]])))
def test_add():
v1 = ad.create_vector('v', [1, 2])
v2 = ad.create_vector('v', [1, 5])
v3 = v1 + v2
assert(v3[0].getValue() == 2)
assert(v3[1].getValue() == 7)
jacobian = ad.get_jacobian(v3, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[2, 0], [0, 2]])))
v1 = ad.create_vector('v', [1, 2])
v2 = v1 + 10
assert(v2[0].getValue() == 11)
assert(v2[1].getValue() == 12)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
v1 = ad.create_vector('v', [1, 2])
v2 = ad.Scalar('v2', 4)
v3 = ad.Scalar('v1', 7)
v4 = v1 + np.array([v2, v3])
assert(v4[0].getValue() == 5)
assert(v4[1].getValue() == 9)
jacobian = ad.get_jacobian(v4, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 1], [1, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = ad.create_vector('v', [1, 5])
v3 = v1 + v2
assert(v3[0].getValue() == 2)
assert(v3[1].getValue() == 7)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([x + y, x])
v3 = v1 + v2
assert(v3[0].getValue() == 4)
assert(v3[1].getValue() == 3)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[2, 1], [1, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, 10])
v3 = v1 + v2
assert(v3[0].getValue() == 3)
assert(v3[1].getValue() == 12)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 1], [0, 1]])))
def test_mul():
v1 = ad.create_vector('v', [1, 2])
v2 = ad.create_vector('w', [3, 5])
v3 = v1 * v2
assert(v3[0].getValue() == 3)
assert(v3[1].getValue() == 10)
jacobian = ad.get_jacobian(v3, ['v1', 'v2', 'w1', 'w2'])
assert(np.array_equal(jacobian, np.array([[3, 0, 1, 0], [0, 5, 0, 2]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = ad.Scalar('v', 3)
v1 = np.array([x, y])
v2 = np.array([v, 3 * v])
v3 = v1 * v2
assert(v3[0].getValue() == 3)
assert(v3[1].getValue() == 18)
jacobian = ad.get_jacobian(v3, ['x', 'y', 'v'])
assert(np.array_equal(jacobian, np.array([[3, 0, 1], [0, 9, 6]])))
v1 = ad.create_vector('v', [2, 3])
v3 = v1 * v1
assert(v3[0].getValue() == 4)
assert(v3[1].getValue() == 9)
jacobian = ad.get_jacobian(v3, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[4, 0], [0, 6]])))
v1 = ad.create_vector('v', [1, 2])
v2 = v1 * 10
assert(v2[0].getValue() == 10)
assert(v2[1].getValue() == 20)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[10, 0], [0, 10]])))
x = ad.Scalar('x', 5)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([x * y, (x + y)])
v3 = v1 * v2
assert(v3[0].getValue() == 50)
assert(v3[1].getValue() == 14)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[20, 25], [2, 9]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, 10])
v3 = v1 * v2
assert(v3[0].getValue() == 2)
assert(v3[1].getValue() == 20)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[2, 1], [0, 10]])))
def test_neg():
v1 = ad.create_vector('v', [1, 2])
v2 = -v1
assert(v2[0].getValue() == -1)
assert(v2[1].getValue() == -2)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[-1, 0], [0, -1]])))
v3 = -v2
assert(v3[0].getValue() == 1)
assert(v3[1].getValue() == 2)
jacobian = ad.get_jacobian(v3, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
v1 = ad.create_vector('v', [1, 2])
v2 = -1 * -v1
assert(v2[0].getValue() == 1)
assert(v2[1].getValue() == 2)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
def test_sub():
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, x])
v3 = v1 - v2
assert(v3[0].getValue() == -1)
assert(v3[1].getValue() == 1)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, -1], [-1, 1]])))
v1 = ad.create_vector('v', [1, 2])
v2 = v1 - 10
assert(v2[0].getValue() == -9)
assert(v2[1].getValue() == -8)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = ad.create_vector('v', [1, 5])
v3 = v1 - v2
assert(v3[0].getValue() == 0)
assert(v3[1].getValue() == -3)
jacobian = ad.get_jacobian(v3, ['x', 'y', 'v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0, -1, 0], [0, 1, 0, -1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, 10])
v3 = v1 - v2
assert(v3[0].getValue() == -1)
assert(v3[1].getValue() == -8)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, -1], [0, 1]])))
def test_pow():
v1 = ad.create_vector('v', [2, 5])
v2 = v1 ** 2
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 25)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[4, 0], [0, 10]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 5)
v1 = np.array([x, y])
v2 = v1 ** 2
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 25)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[4, 0], [0, 10]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = (v1 ** 2) ** 3
assert(v2[0].getValue() == 64)
assert(v2[1].getValue() == 729)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[6 * (2 ** 5), 0], [0, 6 * (3 ** 5)]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = np.array([y, 2])
v3 = v1 ** v2
assert(v3[0].getValue() == 8)
assert(v3[1].getValue() == 9)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[12, np.log(2) * 8], [0, 6]])))
def test_rpow():
v1 = ad.create_vector('v', [2, 5])
v2 = 2 ** v1
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 32)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * 4, 0], [0, np.log(2) * 32]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 5)
v1 = np.array([x, y])
v2 = 2 ** v1
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 32)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * 4, 0], [0, np.log(2) * 32]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = 2 ** (2 * v1)
assert(v2[0].getValue() == 16)
assert(v2[1].getValue() == 64)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * 32, 0], [0, np.log(2) * 128]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = (2 ** 2) ** v1
assert(v2[0].getValue() == 16)
assert(v2[1].getValue() == 64)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * (2 ** 4) * 2, 0], [0, np.log(2) * (2 ** 6) * 2]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x + y, x])
v2 = (2 ** 2) ** v1
assert(v2[0].getValue() == 2 ** 10)
assert(v2[1].getValue() == 16)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * (2 ** 10) * 2, np.log(2) * (2 ** 10) * 2], [np.log(2) * (2 ** 4) * 2, 0]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x + y, x])
v2 = 2 ** (2 * v1)
assert(v2[0].getValue() == 2 ** 10)
assert(v2[1].getValue() == 16)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * (2 ** 10) * 2, np.log(2) * (2 ** 10) * 2], [np.log(2) * (2 ** 4) * 2, 0]])))
def test_exp():
v1 = ad.create_vector('v', [2, 5])
v2 = ad.exp(v1)
assert(np.isclose(v2[0].getValue(), np.exp(2)))
assert(np.isclose(v2[1].getValue(), np.exp(5)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[np.exp(2), 0], [0, np.exp(5)]])))
v1 = ad.create_vector('v', [2, 5])
v2 = ad.exp(2 * v1)
assert(np.isclose(v2[0].getValue(), np.exp(4)))
assert(np.isclose(v2[1].getValue(), np.exp(10)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, 2 * np.array([[np.exp(4), 0], [0, np.exp(10)]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = ad.exp(np.array([x + y, x * y]))
assert(np.isclose(v1[0].getValue(), np.exp(5)))
assert(np.isclose(v1[1].getValue(), np.exp(6)))
jacobian = ad.get_jacobian(v1, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.exp(5), np.exp(5)], [3 * np.exp(6), 2 * np.exp(6)]])))
def test_sin():
v1 = ad.create_vector('v', [0, 100])
v2 = ad.sin(v1)
assert(v2[0].getValue() == 0)
assert(np.isclose(v2[1].getValue(), np.sin(100)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, np.cos(100)]])))
v1 = ad.Scalar('x', 4)
v2 = ad.Scalar('y', 10)
v3 = ad.sin(np.array([v1, v2])) / ad.sin(np.array([v1, v2]))
assert(np.isclose(v3[0].getValue(), 1))
assert(np.isclose(v3[1].getValue(), 1))
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.isclose(jacobian, np.array([[0, 0], [0, 0]])).all())
v1 = ad.Scalar('x', 4)
v2 = ad.Scalar('y', 10)
v3 = ad.sin(np.array([v1, v2])) ** 2
assert(np.isclose(v3[0].getValue(), np.sin(4) ** 2))
assert(np.isclose(v3[1].getValue(), np.sin(10) ** 2))
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.isclose(jacobian, np.array([[2 * np.sin(4) * np.cos(4), 0], [0, 2 * np.sin(10) * np.cos(10)]])).all())
v1 = ad.Scalar('x', 4)
v2 = ad.Scalar('y', 10)
v3 = ad.sin(np.array([v1 * v2, v1 + v2])) ** 2
assert(np.isclose(v3[0].getValue(), np.sin(40) ** 2))
assert(np.isclose(v3[1].getValue(), np.sin(14) ** 2))
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.isclose(jacobian, np.array([[2 * np.sin(40) * np.cos(40) * 10, 2 * np.sin(40) * np.cos(40) * 4],
[2 * np.sin(14) * np.cos(14), 2 * np.sin(14) * np.cos(14)]])).all())
def test_cos():
#Similar to sin.
v1 = ad.create_vector('v', [0, 100])
v2 = ad.cos(v1)
assert(v2[0].getValue() == 1)
assert(np.isclose(v2[1].getValue(), np.cos(100)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.isclose(jacobian, np.array([[0, 0], [0, -np.sin(100)]])).all())
def test_tan():
v1 = ad.create_vector('v', [0, 100])
v2 = ad.tan(v1)
assert(v2[0].getValue() == 0)
assert(np.isclose(v2[1].getValue(), np.tan(100)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.isclose(jacobian, np.array([[1, 0], [0, 1 / (np.cos(100) ** 2)]])).all())
| 34.99505
| 135
| 0.52419
| 2,377
| 14,138
| 3.059739
| 0.034077
| 0.137632
| 0.089372
| 0.123745
| 0.908153
| 0.901416
| 0.888629
| 0.845731
| 0.831019
| 0.788808
| 0
| 0.088121
| 0.215801
| 14,138
| 403
| 136
| 35.081886
| 0.567872
| 0.001061
| 0
| 0.581395
| 0
| 0
| 0.017357
| 0
| 0
| 0
| 0
| 0
| 0.392442
| 1
| 0.031977
| false
| 0
| 0.014535
| 0
| 0.046512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b4a46380b4f9d03328634433e3bf1ca35dfad80e
| 345
|
py
|
Python
|
literal/apps/authentication/dto.py
|
spanickroon/Text-From-Photo-Django-API
|
e1ef79c90a443cc3e606dec9e1c531aa5943ca59
|
[
"MIT"
] | null | null | null |
literal/apps/authentication/dto.py
|
spanickroon/Text-From-Photo-Django-API
|
e1ef79c90a443cc3e606dec9e1c531aa5943ca59
|
[
"MIT"
] | null | null | null |
literal/apps/authentication/dto.py
|
spanickroon/Text-From-Photo-Django-API
|
e1ef79c90a443cc3e606dec9e1c531aa5943ca59
|
[
"MIT"
] | 1
|
2021-06-08T18:06:21.000Z
|
2021-06-08T18:06:21.000Z
|
import typing
from pydantic import BaseModel
class AuthenticationDTO(BaseModel):
username: typing.Optional[str]
email: typing.Optional[str]
token: typing.Optional[str]
password: typing.Optional[str]
class RegisterDTO(BaseModel):
username: str
token: str
class LoginDTO(BaseModel):
username: str
token: str
| 16.428571
| 35
| 0.721739
| 39
| 345
| 6.384615
| 0.384615
| 0.2249
| 0.273092
| 0.200803
| 0.2249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194203
| 345
| 20
| 36
| 17.25
| 0.895683
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.076923
| 0.153846
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
b4bc77f4a06b433cbc8b8d1fc881bd78002c499f
| 31
|
py
|
Python
|
paperscraper/scholar/__init__.py
|
henrykrumb/paperscraper
|
31abb49701b90bfb5107b46e82941068d242ec38
|
[
"MIT"
] | 16
|
2020-11-11T15:06:04.000Z
|
2022-03-22T07:39:47.000Z
|
paperscraper/scholar/__init__.py
|
henrykrumb/paperscraper
|
31abb49701b90bfb5107b46e82941068d242ec38
|
[
"MIT"
] | 8
|
2020-11-11T09:35:48.000Z
|
2021-12-02T11:36:07.000Z
|
paperscraper/scholar/__init__.py
|
henrykrumb/paperscraper
|
31abb49701b90bfb5107b46e82941068d242ec38
|
[
"MIT"
] | 7
|
2021-03-20T09:10:39.000Z
|
2022-01-06T21:12:02.000Z
|
from .scholar import * # noqa
| 15.5
| 30
| 0.677419
| 4
| 31
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 31
| 1
| 31
| 31
| 0.875
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4d81b6bf97dcbf05cfb55609351e0afe983e80f
| 6,608
|
py
|
Python
|
tests/test_mock_twilio.py
|
gregziegan/eviction-tracker
|
4db4bacb9675f985cf2f4a747855491e9036ad28
|
[
"BSD-3-Clause"
] | 5
|
2021-09-15T08:06:59.000Z
|
2022-01-26T21:25:50.000Z
|
tests/test_mock_twilio.py
|
gregziegan/eviction-tracker
|
4db4bacb9675f985cf2f4a747855491e9036ad28
|
[
"BSD-3-Clause"
] | 18
|
2022-01-14T17:15:53.000Z
|
2022-02-14T07:33:53.000Z
|
tests/test_mock_twilio.py
|
thebritican/eviction-tracker
|
1e34d509b0410d61de6abd6be521c53951fa038a
|
[
"BSD-3-Clause"
] | 1
|
2021-09-15T01:46:57.000Z
|
2021-09-15T01:46:57.000Z
|
import unittest
import json
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_testing import TestCase
from eviction_tracker.detainer_warrants.models import PhoneNumberVerification
from eviction_tracker.database import db
from eviction_tracker.app import create_app
from eviction_tracker.commands import validate_phone_number, twilio_client
class MockTwilioLookup:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
def from_fixture(file_name):
with open(file_name) as twilio_response:
phone_dict = json.load(twilio_response)
return MockTwilioLookup(phone_dict)
class TestTwilioResponse(TestCase):
def create_app(self):
app = create_app(self)
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://eviction_tracker_test:junkdata@localhost:5432/eviction_tracker_test'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_insert_phone_with_caller_name(self):
'''
Testing json response with caller_name but null carrier
'''
twilio_response = MockTwilioLookup.from_fixture(
'tests/fixtures/phone_number_with_caller_name.json')
phone_number = PhoneNumberVerification.from_twilio_response(
twilio_response)
db.session.add(phone_number)
db.session.commit()
phone_number_entry = db.session.query(PhoneNumberVerification).first()
self.assertEqual(
twilio_response.caller_name['caller_name'], phone_number_entry.caller_name)
self.assertEqual(
twilio_response.caller_name['caller_type'], phone_number_entry.caller_type)
self.assertEqual(
twilio_response.caller_name['error_code'], phone_number_entry.name_error_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.carrier_error_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.mobile_country_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.mobile_network_code)
self.assertEqual(twilio_response.carrier,
phone_number_entry.carrier_name)
self.assertEqual(twilio_response.carrier,
phone_number_entry.phone_type)
self.assertEqual(twilio_response.country_code,
phone_number_entry.country_code)
self.assertEqual(twilio_response.national_format,
phone_number_entry.national_format)
self.assertEqual(twilio_response.phone_number,
phone_number_entry.phone_number)
def test_insert_phone_missing_caller_name(self):
'''
Testing json response with carrier but null caller_name
'''
twilio_response = MockTwilioLookup.from_fixture(
'tests/fixtures/phone_number_missing_caller_name.json')
phone_number = PhoneNumberVerification.from_twilio_response(
twilio_response)
output_missing_name = PhoneNumberVerification.from_twilio_response(
twilio_response)
db.session.add(output_missing_name)
db.session.commit()
phone_number_entry = db.session.query(PhoneNumberVerification).first()
self.assertEqual(twilio_response.caller_name,
phone_number_entry.caller_name)
self.assertEqual(twilio_response.caller_name,
phone_number_entry.caller_type)
self.assertEqual(twilio_response.caller_name,
phone_number_entry.name_error_code)
self.assertEqual(
twilio_response.carrier['error_code'], phone_number_entry.carrier_error_code)
self.assertEqual(
twilio_response.carrier['mobile_country_code'], phone_number_entry.mobile_country_code)
self.assertEqual(
twilio_response.carrier['mobile_network_code'], phone_number_entry.mobile_network_code)
self.assertEqual(twilio_response.carrier['name'],
phone_number_entry.carrier_name)
self.assertEqual(twilio_response.carrier['type'],
phone_number_entry.phone_type)
self.assertEqual(twilio_response.country_code,
phone_number_entry.country_code)
self.assertEqual(twilio_response.national_format,
phone_number_entry.national_format)
self.assertEqual(twilio_response.phone_number,
phone_number_entry.phone_number)
def test_insert_phone_with_all_data(self):
'''
Testing json response with caller_name but null carrier
'''
twilio_response = MockTwilioLookup.from_fixture(
'tests/fixtures/phone_number_with_all_data.json')
phone_number = PhoneNumberVerification.from_twilio_response(
twilio_response)
db.session.add(phone_number)
db.session.commit()
phone_number_entry = db.session.query(PhoneNumberVerification).first()
self.assertEqual(
twilio_response.caller_name['caller_name'], phone_number_entry.caller_name)
self.assertEqual(
twilio_response.caller_name['caller_type'], phone_number_entry.caller_type)
self.assertEqual(
twilio_response.caller_name['error_code'], phone_number_entry.name_error_code)
self.assertEqual(
twilio_response.carrier['error_code'], phone_number_entry.carrier_error_code)
self.assertEqual(
twilio_response.carrier['mobile_country_code'], phone_number_entry.mobile_country_code)
self.assertEqual(
twilio_response.carrier['mobile_network_code'], phone_number_entry.mobile_network_code)
self.assertEqual(twilio_response.carrier['name'],
phone_number_entry.carrier_name)
self.assertEqual(twilio_response.carrier['type'],
phone_number_entry.phone_type)
self.assertEqual(twilio_response.country_code,
phone_number_entry.country_code)
self.assertEqual(twilio_response.national_format,
phone_number_entry.national_format)
self.assertEqual(twilio_response.phone_number,
phone_number_entry.phone_number)
| 43.473684
| 139
| 0.684171
| 707
| 6,608
| 6.005658
| 0.127298
| 0.132124
| 0.135657
| 0.225389
| 0.786858
| 0.780735
| 0.780735
| 0.769666
| 0.769666
| 0.738577
| 0
| 0.001003
| 0.24546
| 6,608
| 151
| 140
| 43.761589
| 0.850582
| 0.025272
| 0
| 0.639344
| 0
| 0
| 0.074086
| 0.045362
| 0
| 0
| 0
| 0
| 0.270492
| 1
| 0.065574
| false
| 0
| 0.081967
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2ecd03821371367e9341268cd6de6a5ce70ed726
| 139
|
py
|
Python
|
venv/Lib/site-packages/pandas/tseries/api.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | 1
|
2021-02-06T21:00:00.000Z
|
2021-02-06T21:00:00.000Z
|
venv/Lib/site-packages/pandas/tseries/api.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tseries/api.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | 1
|
2021-04-26T22:41:56.000Z
|
2021-04-26T22:41:56.000Z
|
"""
Timeseries API
"""
# flake8: noqa
from pandas.tseries.frequencies import infer_freq
import pandas.tseries.offsets as offsets
| 15.444444
| 50
| 0.733813
| 17
| 139
| 5.941176
| 0.764706
| 0.257426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.179856
| 139
| 8
| 51
| 17.375
| 0.877193
| 0.201439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2589c35befb45e449414d67ee4ba5a7414b89d1e
| 41,979
|
py
|
Python
|
19.py
|
Swizec/advent-of-code-2017
|
efcdac9ddb81dd1db4a7bc0945ff4c13f6f15513
|
[
"MIT"
] | 5
|
2017-12-02T08:55:59.000Z
|
2018-01-11T05:52:18.000Z
|
19.py
|
Swizec/advent-of-code-2017
|
efcdac9ddb81dd1db4a7bc0945ff4c13f6f15513
|
[
"MIT"
] | null | null | null |
19.py
|
Swizec/advent-of-code-2017
|
efcdac9ddb81dd1db4a7bc0945ff4c13f6f15513
|
[
"MIT"
] | 1
|
2020-06-18T19:27:02.000Z
|
2020-06-18T19:27:02.000Z
|
testinput = """
|
| +--+
A | C
F---|----E|--+
| | | D
+B-+ +--+
"""
input = """
|
+-------------------------------+ +-------------+ +-+ +---------------------+ +-------+ +-----------------------------------------+ +-----+
| | | | | | | | | | | | | | |
+-----+ +-----------------------------------|---+ | | | | +---+ +-----|---------------+ +---------------+ +-----+ +---|---------------------|-------+ +---|-+ +-+
| | | | | | | | | | | | | | | | | | | | | | | | | | |
+-|---+ +-----------------|---------------|---+ | +---------|-|-+ | +---|---|---------------------+ +---+ | | | +-|-----|-|---|-----------------+ +---------|-------|-+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +-----------------|-------------------|-----|-|---------|-------|---+ | | +-----------|-----|-----------+ | | | | +---+ +-----|-|---+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-------------------|---------------|-----------|-----------|-+ | +-------------------------|---+ +-|---------|---------|-|-----|-----------|-----------------------------------|-|-+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-+ | +---+ +-----------------------------------|---------------------+ +---------------------------|-----------|---|-----------------------------------|-------+ | +---------+ | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +-----------------------------------|---------------|-------------|-----------|-----------+ | | | | | | +---|-|-----|-----------+ | +---------------------+ | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | +-----------------|-----------------|-|-|-----------|-----------|-----|---|-----|-|---+ | +-|---|-------|-------|---+ +---|---|-+ +-----|-----------|---|-+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +---|---|---------------------------+ | | | | | +---------|-|---+ | | | | | | | | | +-+ | | +---|---|------P|-+ | | | | +-+ | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | +-|---|-|---------------|---------|---------|-+ | | | +---|-----|-------|-----------|-----+ +---------------------|-------------|-----------|-------|-----+ | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | +-------------------|---------|---------|-|-+ | | | | | | | | | | | | | | | | +-|-----|---|-------------------------+ +-|---+ +-+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | +-----|---------------------|-+ | | | | +-|-------------|-----+ | | | | | | | | | +-----------|---|---+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Y | | | | | | | | |
| | | | | | | | +---+ +-----------|-|-------+ | | | +-----|---|-----|-----------------------------+ +---------------|-----|---------------|-------+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-|-|-----|---|-------------|-|-|-------|-|---|-----|---|-------------|-|---+ +---------|-----|-----------------------+ +-----|-----------|---|-------+ +-----+ | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +-------|-------------|---+ +-------------|-|-|-+ | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-----|-|-----|-|-------|-|-----------------|-------------|-|-----+ | +---+ +-------------------------------------|V----------------|-----------|---------------------------|---------------|-----+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | +-----------|-+ | | | | | | | | | | +-|-----|---------|-----|-----------+ | | +-|---------------------------------|-|-+ | | | | +-------+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-|-|---|-|-|-------|---|---|-----|-----|-|---+ +-----|-|-----|-|-+ | | | | | | | | +---+ | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-|-----|---|-|----Q----|-----+ | U +-----|-------|-----|-|-------|---|---+ | | | | | | | +-----|-|-----------------------------------|---------|---------|-----+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | +-------------------|-----|-|-----------|-|-----------------------|---------|-------------|---------------------|-----------+ | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | +-|-------+ | | | +-|-----|---|-|---------|---|-|-----|-+ | | +-|---|-----|-----------+ | | | | | | | | | | | | | +-|-|-------+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | +---------|-------|-----|-----------------------|---------|---------+ | | | +-----|-----+ | | | | | +-+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | +-|-|-------------|---+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-|-|---|-------------------------+ | | | | | | | | | | | | | +-------|-----|---|-+ | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | +-----------|-----------|-----------+ | +-------------------+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | +---|---+ | | | | | | | | | | | | | | | | | +-----|-------------|---+ | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | +---|-|-|-|---------------------|-|-------------------|-+ | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | +-|-----|-------------|-----|-|-----------|-----|---+ +-----|---------------------|---|-------+ | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | +---+ | | | | | | | | | | | | +-|-----+ | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +-+ | | +-|-----------|---------|-|---|---|-------------------|-------|-----------|-----|---|-----------|-------------+ | +-------|-|-----|-----|-------|-----------|-|-----------+ | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | +-|---|-----|---------+ +-+ | | +-|-----------|----B--------|-+ | | | +-+ | +---+ | +-|-|---+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | +-----------|-----|---|-|-+ | | | +-----|-------------|-|---------|-----|---|-----------|-+ | | +-|-+ | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | +---|-----|---------|---------------------------|-----------|---|-----------+ | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | +---|-----|---------------+ | | | | | | | | +-|-----|---------|-|-------|---|-|-|-----|---|---+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | +-------|-|-------|-----+ | +-----------|---|---------|-------|---------|-----|-------|-|-|-----|---|-|-+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | +-----------|---|---------|-|-----|-+ | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +---|-----------------|-|-+ | | | | +---|-+ +---------|-------------------------|---+ | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | +-+ | +-|---------|---------|-----------------------------|-----|---------------|-------------|-|-----------------+ +-|---|-------|---+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | +---|-----|---|-----+ | +---|-------|-----|-------|-|---------|---------------------|---|---------|---------------------------|---|-|-|---|---------------|---+ +-+ | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | +-----+ +-+ | | | | | | | | | | | | +-----|-|-----+ | | | | +-|-----------|---------------|-------------------------|-----|-|---+ | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-+ | | | +---------|---|-|---+ | | | | | | +-|-------+ | | | | | | | | | | | | +-----+ | | | +-|-----------+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | +---------------|-|-|---|-----|-----------------------------------------------|-|---|---------------|---|-----+ | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | +-------------------|-----|-------|---+ | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | +-|-|---------|-----------------|-------------------|-|---|-----------------|---|---------|---------+ | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +-----|-----|-------+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | +---|-|-|---|-------|-+ | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +---------|-----|-|---|---|-------|-|-----------|-----------------------|-----|-|---------------|-------------|-----------|---------|-------------------|-------|---|---|-|---|-|---|-----+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-+ | | | | | | | | | | +-------+ | | | | +-----|-------+ | | | | | | | | | +-----------|---|-----------|-|-+ +-+ | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-----+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-----|-|-------|-------------------|-|-------|-----------|-|---|---------------------|-----|---|-------------------------|---------|---+ | | | | | | | | | | | | +-----------+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | +-|-|---------|-----------|---|---|---------------------|-------------------+ | +-+ | | | +-----------|-+ | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-----|-----|-----------------|-----|---------|-------------|---------------------|---|-+ +-|-|-------------|-------------|-------------|-|-----+ | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-------+ | | | | | | | | | | | | | | | | | | | +---|---|-----------|---|-------+ | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | +---------------|---|-----|-------------+ | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | +-|-----|---|-------|---------|---|---|---|-------|-----|-----------|-------------------------------+ | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | +---+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-|-|-------|-----+ | | | | | | | | | | | +-----|---+ | | | | | | | | | | | +-|-----------|---|---|-|---|-|---------|-|-----|-|-+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | +---|-|-+ | | | | | | | | | +-------|---|---------+ | +---+ | | | | +-----|-+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | +-----|---|-|-|-|---------------|-----|-|-------|-----|-|-|-------|-----|-----------------------------+ | | | | | | +-----|-+ | | | | +-+ | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | +---|-|-|-------------------|-|-|-------|-------|---------|-------|-|---------------|-------------|-|---------------------+ | | | | | | +-----|-------+ | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | +---------|-----|---|-|-------+ | | | | +-+ | +---|-|-+ +---------|-|-|-----------|---+ | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | +-------------|-------+ +-------------------+ | | | +-------|---|-|-------+ | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | +-------|---------|-|-|-+ | | | | +-------|---------|-----|---|---|---+ | +-+ | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | +-------|---|-----|-|---|---------------------------|-------------|---|-|-----|-----|---|-------|-|---|---+ | | | | | | | | |
| | | | | | | | | | | | F | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | +---+ | | | | | +-----|---|---------------------------------------|---------|---+ +-|-----------|-----+ | +-+ | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | A | | | | | | | | | | | | | | | | | | | | | |
+---|-|-+ | | +---------|---|---|---+ | | | | | | | | | | | | | | +-----------------------------------------+ | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +-----------|-+ +-|-|-|-|-|---------------------|-------|-|-|-----------|---|-----|-|---------|---|-----------------|---------|---------------|-----|---|---------------|-----|-|-+ | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-------|-|-|-------------+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | +---------------+ | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | +-------------|-----------------|-----------|---|-----|-|---------|-------------------------+ | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | +-|-+ | | | | | | | | | | | | | | | | | | | | | | +---|-------------------|-|-----|---|-------------|-|-----|-----+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | +-|---|-+ | | | | | | | | | | | | | | +-+ | | | | | | | | +---------|-----|-----|---|-----|-------|-------|-|---+ | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+---|---+ | | | | | | +-+ | | | | | | | | | | | | | | | | | | | | | | | +-|-----|---+ +-----------+ | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+---+ +---|-+ | | | | | | | | | | +---------|-+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-------|---------|---------|-|---|-|-----|-----------|-----------|---|--W------------|---------------------|-----|-------------+ | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | +---|-|---|-----|-+ | | | +---+ | | | | | +-|---------------------|---|-------|-----------------------------------------|-----|---------|-----|-|-----|-----|---|-------|-+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | +-|-----|---+ | | +-----|---|---|-|---------|---|-----+ | +-+ | | | | | | | | | | | | | | | | | | +-----+ | | | | +-|-|-+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | +---------|-+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | +---|-+ | | | | | | | | | | | +-----|-----|-|---|-----------+ | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +---|-----|-------|---|---|---|-|-|-+ | | | | | +---|-----|-----|-----------------|---|-----|-------------|---------------------|-----------|-------------------|---|-----+ | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-------------------|-|-+ | | | | | | | | | | | +---------|---------------|-------|---------|-------------+ +-------+ +-|-+ | | | | | +-|-|-------|-+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | +-|-|-+ | | | | | | | | | | | +-------|---------------------|-----------|-----|-------------------------------+ | | | | | | | | +-+ |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | +-|-|---------|-----+ | | | | | | | +-------------|-------|-------------|-----|-----------------------------------|-|---------|-----------------------|-+ | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | +-----|-----|-------------------+ | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| +-------+ | | | +-----|-|-|-|---------------|---|---+ +---------------|-----+ +---------|---+ | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | +-+ | | | +---+ +-----|-----------|---------|-----------------|-------------------------------+ | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | +-|-|-------+ | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | +-----------|-|-------|-----|---------+ | | +-------------|-----|-------------------------|-|-|-+ | | | +-+
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+-----+ +---+ +-------------------|-----------------+ | +-|-+ +-------------------------------+ | | +-------------------|---------------|---+ | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +---------|-|-----------|-----------------------------------+ +-----|-----------------------------------------------------------|-|-------------|-----------------|-+ | | | +---+
| | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | +-----|---|-------------|-----------------------------------------------|---------------------------|-----------------|---------|---|-+ | | +-----------------|-+ | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | |
+-----------+ +-----+ | | | +-+ +-----------|-----------|-----------------------------------------------+ | +---+ | +-----------------------------|-+ | | |
| | | | | | | | | | | | | | | | | |
+-------|---|-----------+ | | | | | | +-------------------------+ +-------------------+ +-+ | | | | | |
| | | | | | | | | | | | | | | | | |
| | | | | | +---------------------------------------------------------------------------------------------------+ | | | | | | |
| | | | | | | | | | | | | | | |
+-------|---+ | +--------------------------------T|-------------------------------------------|-----------------------------------------------------------------|-|-----|-------+ |
| | | | | | | | | | | |
+-----------------------------------------------------------|---------------------------------------------------------------------------------------|-------------------+ +-+ +-----+ |
| | | | | | | |
+---------------+ +-+ +-------------------------------------------+ +---------------------+
"""
def followpath(input):
linelen = max(len(line) for line in input.split("\n"))
input = [line.ljust(linelen, " ") for line in input.split("\n") if len(line) > 0]
pos = (0, input[0].index("|"))
vx = 0
vy = 1
letters = []
steps = 0
print linelen, len(input)
while True:
y, x = pos
if y < 0 or y >= len(input) or x < 0 or x >= linelen or input[y][x] == " ":
return steps, letters
if input[y][x] == "+":
if vy != 0:
# go left or right
if input[y][x-1] == "-" or input[y][x-1] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
vy = 0
vx = -1
else:
vy = 0
vx = 1
else:
# go up or down
if input[y-1][x] == "|" or input[y-1][x] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
vy = -1
vx = 0
else:
vy = 1
vx = 0
elif input[y][x] not in ["-", "|"]:
letters.append(input[y][x])
steps += 1
pos = (y + vy, x + vx)
steps, letters = followpath(input)
print steps, "".join(letters)
| 162.081081
| 201
| 0.012578
| 170
| 41,979
| 3.105882
| 0.305882
| 0.090909
| 0.079545
| 0.05303
| 0.113636
| 0.075758
| 0
| 0
| 0
| 0
| 0
| 0.001329
| 0.605803
| 41,979
| 259
| 202
| 162.081081
| 0.030578
| 0.000715
| 0
| 0.405738
| 0
| 0.659836
| 0.971988
| 0.194226
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.008197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
259c1b66ef692b4331473a57e392a26935871c50
| 151
|
py
|
Python
|
Project_Codev0.1/Class-diagram_Classes/class Reccomendation.py
|
cyberseihis/Wallsource
|
4bd981e75c3ebf97c9673ffb80147ef2bdf7d61a
|
[
"MIT"
] | null | null | null |
Project_Codev0.1/Class-diagram_Classes/class Reccomendation.py
|
cyberseihis/Wallsource
|
4bd981e75c3ebf97c9673ffb80147ef2bdf7d61a
|
[
"MIT"
] | null | null | null |
Project_Codev0.1/Class-diagram_Classes/class Reccomendation.py
|
cyberseihis/Wallsource
|
4bd981e75c3ebf97c9673ffb80147ef2bdf7d61a
|
[
"MIT"
] | null | null | null |
class Reccomandation:
def __init__(self, input_text):
self.text = input_text
def __get__(self, name ):
return self.name
| 16.777778
| 35
| 0.622517
| 18
| 151
| 4.666667
| 0.555556
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.298013
| 151
| 8
| 36
| 18.875
| 0.792453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d3307d21c0104cfa76eb4e09db2fdc6d925d6393
| 8,682
|
py
|
Python
|
thenewboston_node/business_logic/tests/test_blockchain/test_add_block.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 30
|
2021-03-05T22:08:17.000Z
|
2021-09-23T02:45:45.000Z
|
thenewboston_node/business_logic/tests/test_blockchain/test_add_block.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 148
|
2021-03-05T23:37:50.000Z
|
2021-11-02T02:18:58.000Z
|
thenewboston_node/business_logic/tests/test_blockchain/test_add_block.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 14
|
2021-03-05T21:58:46.000Z
|
2021-10-15T17:27:52.000Z
|
import pytest
from thenewboston_node.business_logic.blockchain.base import BlockchainBase
from thenewboston_node.business_logic.exceptions import ValidationError
from thenewboston_node.business_logic.models import Block, NodeDeclarationSignedChangeRequest
from thenewboston_node.core.utils.cryptography import KeyPair, derive_public_key
@pytest.mark.parametrize('blockchain_argument_name', ('memory_blockchain', 'file_blockchain'))
def test_can_add_block(
file_blockchain: BlockchainBase,
memory_blockchain: BlockchainBase,
treasury_account_key_pair: KeyPair,
user_account_key_pair: KeyPair,
preferred_node,
blockchain_argument_name,
primary_validator_key_pair,
):
blockchain: BlockchainBase = locals()[blockchain_argument_name]
treasury_account_number = treasury_account_key_pair.public
treasury_initial_balance = blockchain.get_account_current_balance(treasury_account_number)
assert treasury_initial_balance is not None
user_account_number = user_account_key_pair.public
primary_validator = blockchain.get_primary_validator()
assert primary_validator
pv_account_number = primary_validator.identifier
assert pv_account_number
preferred_node_account_number = preferred_node.identifier
assert primary_validator.fee_amount > 0
assert preferred_node.fee_amount > 0
assert primary_validator.fee_amount != preferred_node.fee_amount
total_fees = primary_validator.fee_amount + preferred_node.fee_amount
pv_signing_key = primary_validator_key_pair.private
assert derive_public_key(pv_signing_key) == pv_account_number
block0 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account_number,
amount=30,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=pv_signing_key,
preferred_node=preferred_node,
)
blockchain.add_block(block0)
assert blockchain.get_account_current_balance(user_account_number) == 30
assert blockchain.get_account_current_balance(
treasury_account_number
) == treasury_initial_balance - 30 - total_fees
assert blockchain.get_account_current_balance(preferred_node_account_number) == preferred_node.fee_amount
assert blockchain.get_account_current_balance(pv_account_number) == primary_validator.fee_amount
with pytest.raises(ValidationError, match='Block number must be equal to next block number.*'):
blockchain.add_block(block0)
block1 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account_number,
amount=10,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=pv_signing_key,
preferred_node=preferred_node,
)
blockchain.add_block(block1)
assert blockchain.get_account_current_balance(user_account_number) == 40
assert blockchain.get_account_current_balance(treasury_account_number
) == (treasury_initial_balance - 30 - 10 - 2 * total_fees)
assert blockchain.get_account_current_balance(preferred_node_account_number) == preferred_node.fee_amount * 2
assert blockchain.get_account_current_balance(pv_account_number) == primary_validator.fee_amount * 2
block2 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=treasury_account_number,
amount=5,
request_signing_key=user_account_key_pair.private,
pv_signing_key=pv_signing_key,
preferred_node=preferred_node,
)
blockchain.add_block(block2)
assert blockchain.get_account_current_balance(user_account_number) == 40 - 5 - total_fees
assert blockchain.get_account_current_balance(treasury_account_number
) == (treasury_initial_balance - 30 - 10 + 5 - 2 * total_fees)
assert blockchain.get_account_current_balance(preferred_node_account_number) == preferred_node.fee_amount * 3
assert blockchain.get_account_current_balance(pv_account_number) == primary_validator.fee_amount * 3
@pytest.mark.parametrize('blockchain_argument_name', ('memory_blockchain', 'file_blockchain'))
def test_can_add_coin_transfer_block(
memory_blockchain: BlockchainBase,
file_blockchain: BlockchainBase,
treasury_account_key_pair: KeyPair,
user_account_key_pair: KeyPair,
primary_validator_key_pair: KeyPair,
preferred_node_key_pair: KeyPair,
preferred_node,
blockchain_argument_name,
):
blockchain: BlockchainBase = locals()[blockchain_argument_name]
treasury_account = treasury_account_key_pair.public
treasury_initial_balance = blockchain.get_account_current_balance(treasury_account)
assert treasury_initial_balance is not None
user_account = user_account_key_pair.public
pv_account = primary_validator_key_pair.public
node_account = preferred_node_key_pair.public
total_fees = 1 + 4
block0 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account,
amount=30,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=primary_validator_key_pair.private,
preferred_node=preferred_node,
)
blockchain.add_block(block0)
assert blockchain.get_account_current_balance(user_account) == 30
assert blockchain.get_account_current_balance(node_account) == 1
assert blockchain.get_account_current_balance(pv_account) == 4
assert blockchain.get_account_current_balance(treasury_account) == treasury_initial_balance - 30 - total_fees
with pytest.raises(ValidationError, match='Block number must be equal to next block number.*'):
blockchain.add_block(block0)
block1 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account,
amount=10,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=primary_validator_key_pair.private,
preferred_node=preferred_node,
)
blockchain.add_block(block1)
assert blockchain.get_account_current_balance(user_account) == 40
assert blockchain.get_account_current_balance(
treasury_account
) == treasury_initial_balance - 30 - 10 - 2 * total_fees
assert blockchain.get_account_current_balance(node_account) == 1 * 2
assert blockchain.get_account_current_balance(pv_account) == 4 * 2
block2 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=treasury_account,
amount=5,
request_signing_key=user_account_key_pair.private,
pv_signing_key=primary_validator_key_pair.private,
preferred_node=preferred_node,
)
blockchain.add_block(block2)
assert blockchain.get_account_current_balance(user_account) == 40 - 5 - total_fees
assert blockchain.get_account_current_balance(
treasury_account
) == treasury_initial_balance - 30 - 10 + 5 - 2 * total_fees
assert blockchain.get_account_current_balance(node_account) == 1 * 3
assert blockchain.get_account_current_balance(pv_account) == 4 * 3
@pytest.mark.parametrize('blockchain_argument_name', ('memory_blockchain', 'file_blockchain'))
def test_can_add_node_declaration_block(
memory_blockchain: BlockchainBase,
file_blockchain: BlockchainBase,
user_account_key_pair: KeyPair,
blockchain_argument_name,
primary_validator_key_pair,
):
blockchain: BlockchainBase = locals()[blockchain_argument_name]
user_account = user_account_key_pair.public
request0 = NodeDeclarationSignedChangeRequest.create(
network_addresses=['http://127.0.0.1'], fee_amount=3, signing_key=user_account_key_pair.private
)
block0 = Block.create_from_signed_change_request(blockchain, request0, primary_validator_key_pair.private)
blockchain.add_block(block0)
assert blockchain.get_node_by_identifier(user_account) == request0.message.node
blockchain.snapshot_blockchain_state()
assert blockchain.get_last_blockchain_state().get_node(user_account) == request0.message.node
request1 = NodeDeclarationSignedChangeRequest.create(
network_addresses=['http://127.0.0.2', 'http://192.168.0.34'],
fee_amount=3,
signing_key=user_account_key_pair.private
)
block1 = Block.create_from_signed_change_request(blockchain, request1, primary_validator_key_pair.private)
blockchain.add_block(block1)
assert blockchain.get_node_by_identifier(user_account) == request1.message.node
blockchain.snapshot_blockchain_state()
assert blockchain.get_last_blockchain_state().get_node(user_account) == request1.message.node
| 45.21875
| 113
| 0.773209
| 1,040
| 8,682
| 6.009615
| 0.097115
| 0.06448
| 0.08512
| 0.11232
| 0.89808
| 0.85056
| 0.84288
| 0.80048
| 0.70384
| 0.65424
| 0
| 0.016023
| 0.15895
| 8,682
| 191
| 114
| 45.455497
| 0.839907
| 0
| 0
| 0.547619
| 0
| 0
| 0.036512
| 0.008293
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.017857
| false
| 0
| 0.029762
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3a4ce7631a1d2f8712096aeea00cd3b9efe12f9
| 129
|
py
|
Python
|
7KYU/largest_pair_sum.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/largest_pair_sum.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/largest_pair_sum.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
from typing import List
def largest_pair_sum(numbers: List[int]) -> int:
return sum(sorted(numbers, reverse=True)[:2])
| 21.5
| 49
| 0.697674
| 19
| 129
| 4.631579
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.170543
| 129
| 6
| 50
| 21.5
| 0.813084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d3ab10e48c1dfedbe1ccb75edc1456fd8323c7b9
| 85
|
py
|
Python
|
dist/book/codes/105-1.py
|
EManualResource/book-python-basic
|
a6f9e985b8765f9e8dbc7a0bea82243545d3fa06
|
[
"Apache-2.0"
] | null | null | null |
dist/book/codes/105-1.py
|
EManualResource/book-python-basic
|
a6f9e985b8765f9e8dbc7a0bea82243545d3fa06
|
[
"Apache-2.0"
] | null | null | null |
dist/book/codes/105-1.py
|
EManualResource/book-python-basic
|
a6f9e985b8765f9e8dbc7a0bea82243545d3fa06
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
#coding:utf-8
"""
请计算:19+2*4-8/2
"""
a = 19+2*4-8/2
print a
| 8.5
| 22
| 0.552941
| 21
| 85
| 2.238095
| 0.619048
| 0.12766
| 0.170213
| 0.212766
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 0.164706
| 85
| 9
| 23
| 9.444444
| 0.478873
| 0.388235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6ca9d10dc3e84e365fb6a632584a5fe9fc698a9e
| 75
|
py
|
Python
|
localite/api.py
|
stim-devices/dev-localite
|
66edf74047c73393f7be9b21b86792980045d01d
|
[
"MIT"
] | null | null | null |
localite/api.py
|
stim-devices/dev-localite
|
66edf74047c73393f7be9b21b86792980045d01d
|
[
"MIT"
] | 6
|
2019-10-16T07:07:14.000Z
|
2022-01-24T10:42:00.000Z
|
localite/api.py
|
stim-devices/dev-localite
|
66edf74047c73393f7be9b21b86792980045d01d
|
[
"MIT"
] | 3
|
2019-10-22T06:30:37.000Z
|
2021-12-09T12:07:28.000Z
|
from localite.flow.mitm import start, kill
from localite.coil import Coil
| 18.75
| 42
| 0.813333
| 12
| 75
| 5.083333
| 0.666667
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 75
| 3
| 43
| 25
| 0.938462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9f03d0c4cf40f4ea4b84456d57dcb7f92d75b9bd
| 54,734
|
py
|
Python
|
stemdl/runtime.py
|
nlaanait/stemdl
|
ae82905ae7e96bdb43a6626dbf73a63a5bb8b85b
|
[
"MIT"
] | null | null | null |
stemdl/runtime.py
|
nlaanait/stemdl
|
ae82905ae7e96bdb43a6626dbf73a63a5bb8b85b
|
[
"MIT"
] | null | null | null |
stemdl/runtime.py
|
nlaanait/stemdl
|
ae82905ae7e96bdb43a6626dbf73a63a5bb8b85b
|
[
"MIT"
] | null | null | null |
"""
Created on 10/9/17.
@author: Numan Laanait, Michael Matheson
email: laanaitn@ornl.gov, mathesonm@ornl.gov
"""
import time
from datetime import datetime
import os
import sys
import re
import numpy as np
import math
from itertools import chain
from multiprocessing import cpu_count
from copy import deepcopy
#TF
import tensorflow as tf
from collections import OrderedDict
import horovod.tensorflow as hvd
from tensorflow.python.client import timeline
#from tensorflow.contrib.compiler import xla
# stemdl
from . import network
from . import inputs
from . import optimizers
from . import lr_policies
from . import losses
tf.logging.set_verbosity(tf.logging.ERROR)
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable,
*args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
class TrainHelper:
def __init__(self, params, saver, writer, net_ops, last_step=0, log_freq=1):
self.params = params
self.last_step = last_step
self.net_ops = net_ops
self.start_time = time.time()
self.cumm_time = time.time()
self.saver = saver
self.writer = writer
self.elapsed_epochs = self.last_step * self.params['batch_size'] * 1.0 * hvd.size() / \
self.params['NUM_EXAMPLES_PER_EPOCH']
self.log_freq = log_freq
def before_run(self):
self.last_step +=1
self.start_time = time.time()
self.elapsed_epochs = self.last_step * self.params['batch_size'] * 1.0 * hvd.size() / \
self.params['NUM_EXAMPLES_PER_EPOCH']
# call to hvd forces global namespace into class on purpose.
def write_summaries(self, summary):
if hvd.rank() == 0:
with tf.summary.FileWriter(self.params['checkpt_dir']) as summary_writer:
summary_writer.add_summary(summary, global_step=self.last_step)
print_rank('Saved Summaries.')
def save_checkpoint(self):
pass
def run_summary(self) :
tfversion = tensorflow_version_tuple()
print_rank( 'TensorFlow ... %i.%i.%s' % tfversion )
if 'LSB_JOBNAME' in os.environ :
print_rank( 'job name ... %s' % os.environ[ 'LSB_JOBNAME' ] )
if 'LSB_JOBID' in os.environ :
print_rank( 'job number ... %s' % os.environ[ 'LSB_JOBID' ] )
if 'LSB_OUTPUTFILE' in os.environ :
print_rank( 'job output ... %s' % os.environ[ 'LSB_OUTPUTFILE' ] )
print_rank( 'number of ranks ... %d' % hvd.size( ) )
print_rank( 'network_config ... %s' % self.params[ 'network_config' ] )
print_rank( 'batch_size ... %d' % self.params[ 'batch_size' ] )
print_rank( ' ... %d total' % ( self.params[ 'batch_size' ] * hvd.size( ) ) )
print_rank( 'data type ... %s' % ( 'fp16' if self.params[ 'IMAGE_FP16' ] else 'fp32' ) )
print_rank( 'data_dir ... %s' % self.params[ 'data_dir' ] )
print_rank( 'input_flags ... %s' % self.params[ 'input_flags' ] )
print_rank( 'hyper_params ... %s' % self.params[ 'hyper_params' ] )
print_rank( 'checkpt_dir ... %s' % self.params[ 'checkpt_dir' ] )
print_rank( '' )
print_rank( 'command line ... %s' % self.params[ 'cmdline' ] )
print_rank( '' )
@staticmethod
def save_trace(run_metadata, trace_dir, trace_step):
# Writing trace to json file. open with chrome://tracing
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open( trace_dir + '/timeline_' + str( trace_step ) + '.ctf.' + str(hvd.rank()) + '.json', 'w') as f:
f.write(trace.generate_chrome_trace_format( show_memory = True, show_dataflow = True ))
print_rank('Run & Saved GPU Trace.')
def log_stats(self, loss_value, learning_rate):
self.nanloss(loss_value)
t = time.time( )
duration = t - self.start_time
examples_per_sec = self.params['batch_size'] * hvd.size() / duration
self.cumm_time = (time.time() - self.cumm_time)/self.log_freq
flops = self.net_ops * examples_per_sec
avg_flops = self.net_ops * self.params['batch_size'] * hvd.size() / self.cumm_time
format_str = (
'time= %.1f, step= %d, epoch= %2.2e, loss= %.3e, lr= %.2e, step_time= %2.2f sec, ranks= %d, examples/sec= %.1f, flops = %3.2e, average_time= %2.2f, average_flops= %3.3e')
print_rank(format_str % ( t - self.params[ 'start_time' ], self.last_step, self.elapsed_epochs,
loss_value, learning_rate, duration, hvd.size(), examples_per_sec, flops, self.cumm_time, avg_flops) )
self.cumm_time = time.time()
@staticmethod
def nanloss(loss_value):
if np.isnan(loss_value):
print_rank('loss is nan...')
# sys.exit(0)
class TrainHelper_YNet(TrainHelper):
def log_stats(self, loss_value, aux_losses, learning_rate):
t = time.time( )
duration = t - self.start_time
examples_per_sec = self.params['batch_size'] * hvd.size() / duration
self.cumm_time = (time.time() - self.cumm_time)/self.log_freq
flops = self.net_ops * examples_per_sec
avg_flops = self.net_ops * self.params['batch_size'] * hvd.size() / self.cumm_time
loss_inv, loss_dec_re, loss_dec_im, loss_reg = aux_losses
self.nanloss(loss_value)
format_str = (
'time= %.1f, step= %2.2e, epoch= %2.2e, lr= %.2e, loss=%.3e, loss_inv= %.2e, loss_dec_im=%.2e, loss_dec_re=%.2e, loss_reg=%.2e, step_time= %2.2f sec, ranks= %d, examples/sec= %.1f')
print_rank(format_str % ( t - self.params[ 'start_time' ], self.last_step, self.elapsed_epochs,
learning_rate, loss_value, loss_inv, loss_dec_im, loss_dec_re, loss_reg, duration, hvd.size(), examples_per_sec))
self.cumm_time = time.time()
def print_rank(*args, **kwargs):
if hvd.rank() == 0:
print(*args, **kwargs)
def train(network_config, hyper_params, params, gpu_id=None):
"""
Train the network for a number of steps using horovod and asynchronous I/O staging ops.
:param network_config: OrderedDict, network configuration
:param hyper_params: OrderedDict, hyper_parameters
:param params: dict
:return: None
"""
#########################
# Start Session #
#########################
# Config file for tf.Session()
config = tf.ConfigProto(allow_soft_placement=params['allow_soft_placement'],
log_device_placement=params['log_device_placement'],
)
config.gpu_options.allow_growth = True
if gpu_id is None:
gpu_id = hvd.local_rank()
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.force_gpu_compatible = True
config.intra_op_parallelism_threads = 6
config.inter_op_parallelism_threads = max(1, cpu_count()//6)
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
# JIT causes gcc errors on dgx-dl and is built without on Summit.
sess = tf.Session(config=config)
############################
# Setting up Checkpointing #
###########################
last_step = 0
if params[ 'restart' ] :
# Check if training is a restart from checkpoint
ckpt = tf.train.get_checkpoint_state(params[ 'checkpt_dir' ] )
if ckpt is None :
print_rank( '<ERROR> Could not restart from checkpoint %s' % params[ 'checkpt_dir' ])
else :
last_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print_rank("Restoring from previous checkpoint @ step=%d" %last_step)
global_step = tf.Variable(last_step, name='global_step',trainable=False)
############################################
# Setup Graph, Input pipeline and optimizer#
############################################
# Start building the graph
# Setup data stream
with tf.device(params['CPU_ID']):
with tf.name_scope('Input') as _:
if params['filetype'] == 'tfrecord':
dset = inputs.DatasetTFRecords(params, dataset=params['dataset'], debug=False)
elif params['filetype'] == 'lmdb':
dset = inputs.DatasetLMDB(params, dataset=params['dataset'], debug=params['debug'])
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
##################
# Building Model#
##################
# Build model, forward propagate, and calculate loss
scope = 'model'
summary = False
if params['debug']:
summary = True
print_rank('Starting up queue of images+labels: %s, %s ' % (format(images.get_shape()),
format(labels.get_shape())))
with tf.variable_scope(scope,
# Force all variables to be stored as float32
custom_getter=float32_variable_storage_getter) as _:
# Setup Neural Net
if params['network_class'] == 'resnet':
n_net = network.ResNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=False)
if params['network_class'] == 'cnn':
n_net = network.ConvNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
if params['network_class'] == 'fcdensenet':
n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
if params['network_class'] == 'fcnet':
n_net = network.FCNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
if params['network_class'] == 'YNet':
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
###### XLA compilation #########
#if params['network_class'] == 'fcdensenet':
# def wrap_n_net(*args):
# images, labels = args
# n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
# operation='train', summary=False, verbose=True)
# n_net.build_model()
# return n_net.model_output
#
# n_net.model_output = xla.compile(wrap_n_net, inputs=[images, labels])
##############################
# Build it and propagate images through it.
n_net.build_model()
# calculate the total loss
total_loss, loss_averages_op = losses.calc_loss(n_net, scope, hyper_params, params, labels, step=global_step, images=images, summary=summary)
#get summaries, except for the one produced by string_input_producer
if summary: summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# print_rank([scope.name for scope in n_net.scopes])
#######################################
# Apply Gradients and setup train op #
#######################################
# get learning policy
def learning_policy_func(step):
return lr_policies.decay_warmup(params, hyper_params, step)
## TODO: implement other policies in lr_policies
iter_size = params.get('accumulate_step', 0)
skip_update_cond = tf.cast(tf.floormod(global_step, tf.constant(iter_size, dtype=tf.int32)), tf.bool)
if params['IMAGE_FP16']:
opt_type='mixed'
else:
opt_type=tf.float32
# setup optimizer
opt_dict = hyper_params['optimization']['params']
train_opt, learning_rate = optimizers.optimize_loss(total_loss, hyper_params['optimization']['name'],
opt_dict, learning_policy_func, run_params=params, hyper_params=hyper_params, iter_size=iter_size, dtype=opt_type,
loss_scaling=hyper_params.get('loss_scaling',1.0),
skip_update_cond=skip_update_cond,
on_horovod=True, model_scopes=n_net.scopes)
# Gather all training related ops into a single one.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
increment_op = tf.assign_add(global_step, 1)
ema = tf.train.ExponentialMovingAverage(decay=0.9, num_updates=global_step)
all_ops = tf.group(*([train_opt] + update_ops + IO_ops + [increment_op]))
with tf.control_dependencies([all_ops]):
train_op = ema.apply(tf.trainable_variables())
# train_op = tf.no_op(name='train')
########################
# Setting up Summaries #
########################
# Stats and summaries
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# if hvd.rank() == 0:
summary_writer = tf.summary.FileWriter(os.path.join(params['checkpt_dir'], str(hvd.rank())), sess.graph)
# Add Summary histograms for trainable variables and their gradients
if params['debug']:
if hyper_params['network_type'] == 'inverter':
predic = tf.transpose(n_net.model_output, perm=[0,2,3,1])
tf.summary.image("outputs", predic, max_outputs=4)
tf.summary.image("targets", tf.transpose(labels, perm=[0,2,3,1]), max_outputs=4)
tf.summary.image("inputs", tf.transpose(tf.reduce_mean(images, axis=1, keepdims=True), perm=[0,2,3,1]), max_outputs=4)
elif hyper_params['network_type'] == 'YNet':
predic_inverter = tf.transpose(n_net.model_output['inverter'], perm=[0,2,3,1])
tf.summary.image("output_inverter", predic_inverter, max_outputs=2)
predic_decoder_RE = tf.transpose(n_net.model_output['decoder_RE'], perm=[0,2,3,1])
predic_decoder_IM = tf.transpose(n_net.model_output['decoder_IM'], perm=[0,2,3,1])
tf.summary.image("output_decoder_RE", predic_decoder_RE, max_outputs=2)
tf.summary.image("output_decoder_IM", predic_decoder_IM, max_outputs=2)
new_labels = tf.unstack(labels, axis=1)
for label, tag in zip(new_labels, ['potential', 'probe_RE', 'probe_IM']):
label = tf.expand_dims(label, axis=-1)
# label = tf.transpose(label, perm=[0,2,3,1])
tf.summary.image(tag, label, max_outputs=2)
tf.summary.image("inputs", tf.transpose(tf.reduce_mean(images, axis=1, keepdims=True), perm=[0,2,3,1]), max_outputs=4)
summary_merged = tf.summary.merge_all()
###############################
# Setting up training session #
###############################
#Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Sync
print_rank('Syncing horovod ranks...')
sync_op = hvd.broadcast_global_variables(0)
sess.run(sync_op)
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
# Saver and Checkpoint restore
checkpoint_file = os.path.join(params[ 'checkpt_dir' ], 'model.ckpt')
saver = tf.train.Saver(max_to_keep=None, save_relative_paths=True)
# Check if training is a restart from checkpoint
if params['restart'] and ckpt is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
print_rank("Restoring from previous checkpoint @ step=%d" % last_step)
# Train
train_elf = TrainHelper(params, saver, summary_writer, n_net.get_ops(), last_step=last_step, log_freq=params['log_frequency'])
saveStep = params['save_step']
validateStep = params['validate_step']
summaryStep = params['summary_step']
train_elf.run_summary()
maxSteps = params[ 'max_steps' ]
logFreq = params[ 'log_frequency' ]
traceStep = params[ 'trace_step' ]
maxTime = params.get('max_time', 1e12)
val_results = []
loss_results = []
loss_value = 1e10
val = 1e10
while train_elf.last_step < maxSteps :
train_elf.before_run()
doLog = bool(train_elf.last_step % logFreq == 0)
doSave = bool(train_elf.last_step % saveStep == 0)
doSumm = bool(train_elf.last_step % summaryStep == 0 and params['debug'])
doTrace = bool(train_elf.last_step == traceStep and params['gpu_trace'])
doValidate = bool(train_elf.last_step % validateStep == 0)
doFinish = bool(train_elf.start_time - params['start_time'] > maxTime)
if train_elf.last_step == 1 and params['debug']:
summary = sess.run([train_op, summary_merged])[-1]
train_elf.write_summaries( summary )
elif not doLog and not doSave and not doTrace and not doSumm:
sess.run(train_op)
elif doLog and not doSave and not doSumm:
_, loss_value, lr = sess.run( [ train_op, total_loss, learning_rate ] )
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, lr )
elif doLog and doSumm and doSave :
_, summary, loss_value, lr = sess.run( [ train_op, summary_merged, total_loss, learning_rate ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, lr )
train_elf.write_summaries( summary )
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doLog and doSumm :
_, summary, loss_value, lr = sess.run( [ train_op, summary_merged, total_loss, learning_rate ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, lr )
train_elf.write_summaries( summary )
elif doSumm:
summary = sess.run([train_op, summary_merged])[-1]
train_elf.write_summaries( summary )
elif doSave :
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doTrace :
sess.run(train_op, options=run_options, run_metadata=run_metadata)
train_elf.save_trace(run_metadata, params[ 'trace_dir' ], params[ 'trace_step' ] )
train_elf.before_run()
# Here we do validation:
if doValidate:
val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
val_results.append((train_elf.last_step,val))
if doFinish:
#val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
#val_results.append((train_elf.last_step, val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
if np.isnan(loss_value):
break
val_results.append((train_elf.last_step,val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
def train_YNet(network_config, hyper_params, params, gpu_id=None):
"""
Train the network for a number of steps using horovod and asynchronous I/O staging ops.
:param network_config: OrderedDict, network configuration
:param hyper_params: OrderedDict, hyper_parameters
:param params: dict
:return: None
"""
#########################
# Start Session #
#########################
# Config file for tf.Session()
config = tf.ConfigProto(allow_soft_placement=params['allow_soft_placement'],
log_device_placement=params['log_device_placement'],
)
config.gpu_options.allow_growth = True
if gpu_id is None:
gpu_id = hvd.local_rank()
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.force_gpu_compatible = True
config.intra_op_parallelism_threads = 6
config.inter_op_parallelism_threads = max(1, cpu_count()//6)
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
# JIT causes gcc errors on dgx-dl and is built without on Summit.
sess = tf.Session(config=config)
############################
# Setting up Checkpointing #
###########################
last_step = 0
if params[ 'restart' ] :
# Check if training is a restart from checkpoint
ckpt = tf.train.get_checkpoint_state(params[ 'checkpt_dir' ] )
if ckpt is None :
print_rank( '<ERROR> Could not restart from checkpoint %s' % params[ 'checkpt_dir' ])
else :
last_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print_rank("Restoring from previous checkpoint @ step=%d" %last_step)
global_step = tf.Variable(last_step, name='global_step',trainable=False)
############################################
# Setup Graph, Input pipeline and optimizer#
############################################
# Start building the graph
# Setup data stream
with tf.device(params['CPU_ID']):
with tf.name_scope('Input') as _:
if params['filetype'] == 'tfrecord':
dset = inputs.DatasetTFRecords(params, dataset=params['dataset'], debug=False)
elif params['filetype'] == 'lmdb':
dset = inputs.DatasetLMDB(params, dataset=params['dataset'], debug=params['debug'])
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
##################
# Building Model#
##################
# Build model, forward propagate, and calculate loss
scope = 'model'
summary = False
if params['debug']:
summary = True
print_rank('Starting up queue of images+labels: %s, %s ' % (format(images.get_shape()),
format(labels.get_shape())))
with tf.variable_scope(scope,
# Force all variables to be stored as float32
custom_getter=float32_variable_storage_getter) as _:
# Setup Neural Net
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
###### XLA compilation #########
#if params['network_class'] == 'fcdensenet':
# def wrap_n_net(*args):
# images, labels = args
# n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
# operation='train', summary=False, verbose=True)
# n_net.build_model()
# return n_net.model_output
#
# n_net.model_output = xla.compile(wrap_n_net, inputs=[images, labels])
##############################
# Build it and propagate images through it.
n_net.build_model()
# # Stop gradients
# stop_op = tf.stop_gradient(n_net.model_output['encoder'])
# calculate the total loss
psi_out_true = images
constr_loss = losses.get_YNet_constraint(n_net, hyper_params, params, images, weight=10)
total_loss, _, indv_losses = losses.calc_loss(n_net, scope, hyper_params, params, labels, step=global_step, images=images, summary=summary)
#get summaries, except for the one produced by string_input_producer
if summary: summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# print_rank([scope.name for scope in n_net.scopes])
#######################################
# Apply Gradients and setup train op #
#######################################
# optimizer for unsupervised step
var_list = [itm for itm in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if 'CVAE' in str(itm.name)]
reg_hyper = deepcopy(hyper_params)
reg_hyper['initial_learning_rate'] = 1e-1
def learning_policy_func_reg(step):
return lr_policies.decay_warmup(params, reg_hyper, step)
iter_size = params.get('accumulate_step', 0)
skip_update_cond = tf.cast(tf.floormod(global_step, tf.constant(iter_size, dtype=tf.int32)), tf.bool)
if params['IMAGE_FP16']:
opt_type='mixed'
else:
opt_type=tf.float32
reg_opt, learning_rate = optimizers.optimize_loss(constr_loss, 'Momentum',
{'momentum': 0.9}, learning_policy_func_reg, var_list=var_list, run_params=params, hyper_params=reg_hyper, iter_size=iter_size, dtype=opt_type,
loss_scaling=1.0,
skip_update_cond=skip_update_cond,
on_horovod=True, model_scopes=None)
# optimizer for supervised step
def learning_policy_func(step):
return lr_policies.decay_warmup(params, hyper_params, step)
## TODO: implement other policies in lr_policies
opt_dict = hyper_params['optimization']['params']
train_opt, learning_rate = optimizers.optimize_loss(total_loss, hyper_params['optimization']['name'],
opt_dict, learning_policy_func, run_params=params, hyper_params=hyper_params, iter_size=iter_size, dtype=opt_type,
loss_scaling=hyper_params.get('loss_scaling',1.0),
skip_update_cond=skip_update_cond,
on_horovod=True, model_scopes=n_net.scopes)
# Gather unsupervised training ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ema = tf.train.ExponentialMovingAverage(decay=0.9, num_updates=global_step)
increment_op = tf.assign_add(global_step, 1)
with tf.control_dependencies([tf.group(*[reg_opt, update_ops])]):
reg_op = ema.apply(var_list=var_list)
# Gather supervised training related ops into a single one.
increment_op = tf.assign_add(global_step, 1)
all_ops = tf.group(*([train_opt] + update_ops + IO_ops + [increment_op]))
with tf.control_dependencies([all_ops]):
train_op = ema.apply(tf.trainable_variables())
########################
# Setting up Summaries #
########################
# Stats and summaries
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# if hvd.rank() == 0:
summary_writer = tf.summary.FileWriter(os.path.join(params['checkpt_dir'], str(hvd.rank())), sess.graph)
# Add Summary histograms for trainable variables and their gradients
if params['debug']:
predic_inverter = tf.transpose(n_net.model_output['inverter'], perm=[0,2,3,1])
tf.summary.image("output_inverter", predic_inverter, max_outputs=2)
predic_decoder_RE = tf.transpose(n_net.model_output['decoder_RE'], perm=[0,2,3,1])
predic_decoder_IM = tf.transpose(n_net.model_output['decoder_IM'], perm=[0,2,3,1])
tf.summary.image("output_decoder_RE", predic_decoder_RE, max_outputs=2)
tf.summary.image("output_decoder_IM", predic_decoder_IM, max_outputs=2)
new_labels = tf.unstack(labels, axis=1)
for label, tag in zip(new_labels, ['potential', 'probe_RE', 'probe_IM']):
label = tf.expand_dims(label, axis=-1)
# label = tf.transpose(label, perm=[0,2,3,1])
tf.summary.image(tag, label, max_outputs=2)
tf.summary.image("inputs", tf.transpose(tf.reduce_mean(images, axis=1, keepdims=True), perm=[0,2,3,1]), max_outputs=4)
summary_merged = tf.summary.merge_all()
###############################
# Setting up training session #
###############################
#Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Sync
print_rank('Syncing horovod ranks...')
sync_op = hvd.broadcast_global_variables(0)
sess.run(sync_op)
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
# Saver and Checkpoint restore
checkpoint_file = os.path.join(params[ 'checkpt_dir' ], 'model.ckpt')
saver = tf.train.Saver(max_to_keep=None, save_relative_paths=True)
# Check if training is a restart from checkpoint
if params['restart'] and ckpt is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
print_rank("Restoring from previous checkpoint @ step=%d" % last_step)
# Train
train_elf = TrainHelper_YNet(params, saver, summary_writer, n_net.get_ops(), last_step=last_step, log_freq=params['log_frequency'])
saveStep = params['save_step']
validateStep = params['validate_step']
summaryStep = params['summary_step']
train_elf.run_summary()
maxSteps = params[ 'max_steps' ]
logFreq = params[ 'log_frequency' ]
traceStep = params[ 'trace_step' ]
maxTime = params.get('max_time', 1e12)
inner_loop = hyper_params.get('inner_iter', 1e12)
val_results = []
loss_results = []
loss_value = 1e10
val = 1e10
current_batch = np.zeros(images.shape.as_list(), dtype=np.float32)
batch_buffer = []
while train_elf.last_step < maxSteps :
# batch_buffer.append(images.eval(session=sess))
train_elf.before_run()
doLog = bool(train_elf.last_step % logFreq == 0)
doSave = bool(train_elf.last_step % saveStep == 0)
doSumm = bool(train_elf.last_step % summaryStep == 0 and params['debug'])
doTrace = bool(train_elf.last_step == traceStep and params['gpu_trace'])
doValidate = bool(train_elf.last_step % validateStep == 0)
doFinish = bool(train_elf.start_time - params['start_time'] > maxTime)
if train_elf.last_step == 1 and params['debug']:
_, summary, current_batch = sess.run([train_op, summary_merged, images])
train_elf.write_summaries( summary )
elif not doLog and not doSave and not doTrace and not doSumm:
_, current_batch = sess.run([train_op, images])
elif doLog and not doSave and not doSumm:
_, lr, loss_value, aux_losses, current_batch = sess.run( [ train_op, learning_rate, total_loss, indv_losses, images])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, aux_losses, lr)
elif doLog and doSumm and doSave :
_, summary, loss_value, aux_losses, lr, current_batch = sess.run( [ train_op, summary_merged, total_loss, indv_losses,
learning_rate, images ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, aux_losses, lr )
train_elf.write_summaries( summary )
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doLog and doSumm :
_, summary, loss_value, aux_losses, lr, current_batch = sess.run( [ train_op, summary_merged, total_loss, indv_losses, learning_rate, images ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, aux_losses, lr )
train_elf.write_summaries( summary )
elif doSumm:
_, summary, current_batch = sess.run([train_op, summary_merged, images])
train_elf.write_summaries( summary )
elif doSave :
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doTrace :
sess.run(train_op, options=run_options, run_metadata=run_metadata)
train_elf.save_trace(run_metadata, params[ 'trace_dir' ], params[ 'trace_step' ] )
train_elf.before_run()
# Here we do validation:
if doValidate:
val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
val_results.append((train_elf.last_step,val))
if doFinish:
#val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
#val_results.append((train_elf.last_step, val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
if np.isnan(loss_value):
break
if inner_loop < 100:
batch_buffer.append(current_batch)
if bool(train_elf.last_step % inner_loop == 0 and train_elf.last_step >= 10):
for itr, current_batch in enumerate(batch_buffer):
_, constr_val = sess.run([reg_op, constr_loss], feed_dict={psi_out_true:current_batch})
if doLog:
print_rank('\t\tstep={}, reg iter={}, constr_loss={:2.3e}'.format(train_elf.last_step, itr, constr_val))
del batch_buffer
batch_buffer = []
val_results.append((train_elf.last_step,val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
def validate(network_config, hyper_params, params, sess, dset, num_batches=10):
"""
Runs validation with current weights
:param params:
:param hyper_params:
:param network_config:
:param sess:
:param num_batches: default 100.
:return:
"""
print_rank("Running Validation ..." )
with tf.device(params['CPU_ID']):
# Get Test data
dset.set_mode(mode='eval')
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
scope = 'model'
summary = False
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
with tf.variable_scope(scope, reuse=True) as _:
# Setup Neural Net
params['IMAGE_FP16'] = False
if images.dtype != tf.float32:
images = tf.cast(images, tf.float32)
# Setup Neural Net
if params['network_class'] == 'resnet':
n_net = network.ResNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=False, verbose=False)
if params['network_class'] == 'cnn':
n_net = network.ConvNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=False, verbose=False)
if params['network_class'] == 'fcdensenet':
n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=False, verbose=False)
if params['network_class'] == 'fcnet':
n_net = network.FCNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=summary, verbose=True)
if params['network_class'] == 'YNet':
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=summary, verbose=True)
# Build it and propagate images through it.
n_net.build_model()
# Calculate predictions
if hyper_params['network_type'] == 'regressor' or hyper_params['network_type'] == 'classifier':
labels_shape = labels.get_shape().as_list()
layer_params={'bias':labels_shape[-1], 'weights':labels_shape[-1],'regularize':False}
logits = losses.fully_connected(n_net, layer_params, params['batch_size'],
name='linear',reuse=None)
else:
pass
#TODO: implement prediction layer for hybrid network
# Do evaluation
result = None
if hyper_params['network_type'] == 'regressor':
validation_error = tf.losses.mean_squared_error(labels, predictions=logits, reduction=tf.losses.Reduction.NONE)
# Average validation error over the batches
errors = np.array([sess.run(validation_error) for _ in range(num_batches)])
errors = errors.reshape(-1, params['NUM_CLASSES'])
avg_errors = errors.mean(0)
result = avg_errors
print_rank('Validation MSE: %s' % format(avg_errors))
elif hyper_params['network_type'] == 'classifier':
labels = tf.argmax(labels, axis=1)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
in_top_1_op = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32)
in_top_5_op = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
eval_ops = [in_top_1_op, in_top_5_op, cross_entropy]
output = np.array([sess.run(eval_ops) for _ in range(num_batches)])
accuracy = output[:,:2]
val_loss = output[:,-1]
accuracy = accuracy.sum(axis=(0,-1))/(num_batches*params['batch_size'])*100
val_loss = val_loss.sum()/(num_batches*params['batch_size'])
result = accuracy
print_rank('Validation Accuracy (.pct), Top-1: %2.2f , Top-5: %2.2f, Loss: %2.2f' %(accuracy[0], accuracy[1], val_loss))
elif hyper_params['network_type'] == 'hybrid':
#TODO: implement evaluation call for hybrid network
print('not implemented')
elif hyper_params['network_type'] == 'YNet':
loss_params = hyper_params['loss_function']
#model_output = tf.concat([n_net.model_output[subnet] for subnet in ['inverter', 'decoder_RE', 'decoder_IM']], axis=1)
model_output = [n_net.model_output[subnet] for subnet in ['inverter', 'decoder_RE', 'decoder_IM']]
labels = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)]
if loss_params['type'] == 'MSE_PAIR':
errors = [tf.losses.mean_pairwise_squared_error(tf.cast(label, tf.float32), out)
for label, out in zip(labels, model_output)]
errors = tf.stack(errors)
loss_label= loss_params['type']
elif loss_params['type'] == 'ABS_DIFF':
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32), reduction=tf.losses.Reduction.SUM)
elif loss_params['type'] == 'MSE':
errors = tf.losses.mean_squared_error(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32), reduction=tf.losses.Reduction.SUM)
loss_label= loss_params['type']
errors = tf.expand_dims(errors,axis=0)
error_averaging = hvd.allreduce(errors)
if num_batches is not None:
num_samples = num_batches
elif num_batches > dset.num_samples:
num_samples = dset.num_samples
errors = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples//params['batch_size'])])
result = errors.mean(0)
print_rank('Validation Reconstruction Error %s: '% loss_label, result)
elif hyper_params['network_type'] == 'inverter':
loss_params = hyper_params['loss_function']
if labels.shape.as_list()[1] > 1:
labels, _, _ = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)]
if loss_params['type'] == 'MSE_PAIR':
errors = tf.losses.mean_pairwise_squared_error(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32))
loss_label= loss_params['type']
elif loss_params['type'] == 'rMSE':
labels = tf.cast(labels, tf.float32)
l2_true = tf.sqrt(tf.reduce_sum(labels ** 2, axis=[1,2,3]))
l2_output = tf.sqrt(tf.reduce_sum(n_net.model_output **2, axis = [1,2,3]))
errors = tf.reduce_mean(tf.abs(l2_true - l2_output)/l2_true)
errors *= 100
loss_label= loss_params['type']
else:
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32), reduction=tf.losses.Reduction.MEAN)
errors = tf.expand_dims(errors,axis=0)
error_averaging = hvd.allreduce(errors, average=True)
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
errors = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples//params['batch_size'])])
result = errors.mean()
print_rank('Validation Reconstruction Error %s: %3.3e' % (loss_label, result))
tf.summary.scalar("Validation_loss_label_%s" % loss_label, tf.constant(errors.mean()))
return result
def validate_ckpt(network_config, hyper_params, params, num_batches=None,
last_model= False, sleep=-1):
"""
Runs evaluation with current weights
:param params:
:param hyper_params:
:param network_config:
:param num_batches: default 100.
:params sleep: number of seconds to sleep. for single eval pass sleep<0.
:return:
"""
#########################
# Start Session #
#########################
# Config file for tf.Session()
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False,
)
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.intra_op_parallelism_threads = 1
# config.inter_op_parallelism_threads = 12
sess = tf.Session(config=config)
# Get Test data
with tf.device(params['CPU_ID']):
with tf.name_scope('Input') as _:
if params['filetype'] == 'tfrecord':
dset = inputs.DatasetTFRecords(params, dataset=params['dataset'], debug=False)
elif params['filetype'] == 'lmdb':
dset = inputs.DatasetLMDB(params, dataset=params['dataset'], debug=params['debug'])
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
scope='model'
with tf.variable_scope(
scope,
# Force all variables to be stored as float32
custom_getter=float32_variable_storage_getter) as _:
# Setup Neural Net
if params['network_class'] == 'resnet':
n_net = network.ResNet(scope, params, hyper_params, network_config, tf.cast(images, tf.float32), labels,
operation='eval_ckpt', summary=False, verbose=False)
if params['network_class'] == 'cnn':
n_net = network.ConvNet(scope, params, hyper_params, network_config, tf.cast(images, tf.float32), labels,
operation='eval_ckpt', summary=False, verbose=False)
if params['network_class'] == 'fcdensenet':
n_net = network.FCDenseNet(scope, params, hyper_params, network_config, tf.cast(images, tf.float32),
labels, operation='eval_ckpt', summary=False, verbose=True)
if params['network_class'] == 'fcnet':
n_net = network.FCNet(scope, params, hyper_params, network_config, images, labels,
operation='eval_ckpt', summary=False, verbose=True)
if params['network_class'] == 'YNet':
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='eval_ckpt', summary=False, verbose=True)
# Build it and propagate images through it.
n_net.build_model()
# Calculate predictions
#if hyper_params['network_type'] == 'regressor' or hyper_params['network_type'] == 'classifier':
# labels_shape = labels.get_shape().as_list()
# layer_params={'bias':labels_shape[-1], 'weights':labels_shape[-1],'regularize':False}
# logits = fully_connected(n_net, layer_params, params['batch_size'],
# name='linear',reuse=None)
#else:
# pass
# Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Sync
sync_op = hvd.broadcast_global_variables(0)
sess.run(sync_op)
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
# restore from moving averages
ema = tf.train.ExponentialMovingAverage(0.9999)
vars_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(var_list=vars_to_restore)
# saver = tf.train.Saver()
# Find models in checkpoint directory
dirs = np.array(os.listdir(params['checkpt_dir']))
pattern = re.compile("meta")
steps = np.array([bool(re.search(pattern,itm)) for itm in dirs])
saved_steps = dirs[steps]
model_steps = np.array([int(itm.split('.')[1].split('-')[-1]) for itm in saved_steps])
model_steps = np.sort(model_steps)
ckpt_paths = [os.path.join(params['checkpt_dir'], "model.ckpt-%s" % step) for step in model_steps]
if last_model:
ckpt_paths = [ckpt_paths[-1]]
model_steps = [model_steps[-1]]
if params['output']:
output_dir = os.path.join(os.getcwd(), 'outputs_%s' % params['checkpt_dir'].split('/')[-1])
if not os.path.exists(output_dir):
tf.gfile.MakeDirs(output_dir)
# Validate Models
for ckpt, last_step in zip(ckpt_paths, model_steps):
#
saver.restore(sess, os.path.join(params['checkpt_dir'],"model.ckpt-%s" %format(last_step)))
print_rank("Restoring from previous checkpoint @ step=%d" % last_step)
# Validate model
# TODO: add hybrid validation and check that it works correctly for previous
if hyper_params['network_type'] == 'regressor':
validation_error = tf.losses.mean_squared_error(labels, predictions=logits, reduction=tf.losses.Reduction.NONE)
# Average validation error over batches
errors = np.array([sess.run([IO_ops, validation_error])[-1] for _ in range(num_batches)])
errors = errors.reshape(-1, params['NUM_CLASSES'])
avg_errors = errors.mean(0)
print_rank('Validation MSE: %s' % format(avg_errors))
elif hyper_params['network_type'] == 'classifier':
# Average validation accuracies over batches
label = tf.argmax(labels, axis=1)
in_top_1_op = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32)
in_top_5_op = tf.cast(tf.nn.in_top_k(logits, label, 5), tf.float32)
eval_ops = [in_top_1_op,in_top_5_op]
output = np.array([sess.run([IO_ops,eval_ops])[-1] for _ in range(num_batches)])
accuracy = output.sum(axis=(0,-1))/(num_batches*params['batch_size'])*100
print_rank('Validation Accuracy (.pct), Top-1: %2.2f , Top-5: %2.2f' %(accuracy[0], accuracy[1]))
elif hyper_params['network_type'] == 'hybrid':
pass
elif hyper_params['network_type'] == 'inverter':
if labels.shape.as_list()[1] > 1:
labels, _, _ = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)]
loss_params = hyper_params['loss_function']
if params['output']:
output = tf.cast(n_net.model_output, tf.float32)
print('output shape',output.get_shape().as_list())
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
for idx in range(num_samples):
output_arr, label_arr = sess.run([IO_ops, n_net.model_output, labels])[-2:]
#label_arr = sess.run([IO_ops, labels])[-1]
np.save(os.path.join(output_dir,'label_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), label_arr)
np.save(os.path.join(output_dir,'output_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), output_arr)
else:
if loss_params['type'] == 'MSE_PAIR':
errors = tf.losses.mean_pairwise_squared_error(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32))
loss_label= loss_params['type']
else:
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32), reduction=tf.losses.Reduction.MEAN)
errors = tf.expand_dims(errors,axis=0)
error_averaging = hvd.allreduce(errors)
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
error = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples)])
print_rank('Validation Reconstruction Error %s: %3.3e' % (loss_label, error.mean()))
elif hyper_params['network_type'] == 'YNet':
loss_params = hyper_params['loss_function']
model_output = tf.concat([n_net.model_output[subnet] for subnet in ['inverter', 'decoder_RE', 'decoder_IM']], axis=1)
if params['output']:
output = tf.cast(model_output, tf.float32)
print('output shape',output.get_shape().as_list())
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
for idx in range(num_samples):
output_arr, label_arr = sess.run([IO_ops, model_output, labels])[-2:]
#label_arr = sess.run([IO_ops, labels])[-1]
np.save(os.path.join(output_dir,'label_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), label_arr)
np.save(os.path.join(output_dir,'output_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), output_arr)
else:
if loss_params['type'] == 'MSE_PAIR':
errors = tf.losses.mean_pairwise_squared_error(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32))
loss_label= loss_params['type']
else:
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32), reduction=tf.losses.Reduction.MEAN)
#errors = tf.expand_dims(errors,axis=0)
#error_averaging = hvd.allreduce(errors)
error_averaging = errors
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
#error = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(4)])
error = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples)])
print('Rank=%d, Validation Reconstruction Error %s: %3.3e' % (hvd.rank(),loss_label, error.mean()))
#print_rank('Validation Reconstruction Error %s: %3.3e' % (loss_label, error.mean()))
if sleep < 0:
break
else:
print_rank('sleeping for %d s ...' % sleep)
time.sleep(sleep)
| 49.533032
| 189
| 0.59886
| 6,676
| 54,734
| 4.674805
| 0.083283
| 0.024672
| 0.01961
| 0.016918
| 0.815694
| 0.794482
| 0.764683
| 0.754334
| 0.747413
| 0.738857
| 0
| 0.011013
| 0.271732
| 54,734
| 1,104
| 190
| 49.577899
| 0.771932
| 0.12442
| 0
| 0.691799
| 0
| 0.005291
| 0.098224
| 0.001909
| 0
| 0
| 0
| 0.003623
| 0
| 1
| 0.025132
| false
| 0.003968
| 0.025132
| 0.003968
| 0.066138
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9f7f9527ef909a08ce64bf7e9249c0a2bfd1e0b8
| 29
|
py
|
Python
|
myp/package/__init__.py
|
YunisDEV/py-scripts
|
c9eccffa3e69bb32a46fef94e0517a149f3701ea
|
[
"MIT"
] | 2
|
2021-04-03T14:16:16.000Z
|
2021-04-03T15:38:32.000Z
|
myp/package/__init__.py
|
YunisDEV/py-scripts
|
c9eccffa3e69bb32a46fef94e0517a149f3701ea
|
[
"MIT"
] | null | null | null |
myp/package/__init__.py
|
YunisDEV/py-scripts
|
c9eccffa3e69bb32a46fef94e0517a149f3701ea
|
[
"MIT"
] | 2
|
2021-04-15T10:28:28.000Z
|
2021-04-28T19:22:16.000Z
|
from .reader import MYPReader
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c884d4e81d696f03d50742d4521eae2c4b56d8a
| 211
|
py
|
Python
|
parser/team02/proyec/Valor/Valor.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team02/proyec/Valor/Valor.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team02/proyec/Valor/Valor.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from ast.Expresion import Expresion
class Valor(Expresion):
def __init__(self,value,line,column):
self.value = valor
def getValor(self,entorno,tree):
return self.value
| 23.444444
| 42
| 0.63981
| 25
| 211
| 5.24
| 0.64
| 0.206107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.274882
| 211
| 8
| 43
| 26.375
| 0.856209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4c902ee7c18b790e653d820da759ee90d287a130
| 154
|
py
|
Python
|
dolfyn/meta/api_dumb.py
|
aidanbharath/dolfyn
|
7c8c62a780ae310b1ffdf04592fa77f400b04334
|
[
"Apache-2.0"
] | 28
|
2016-03-07T16:31:34.000Z
|
2022-03-29T03:28:36.000Z
|
dolfyn/meta/api_dumb.py
|
aidanbharath/dolfyn
|
7c8c62a780ae310b1ffdf04592fa77f400b04334
|
[
"Apache-2.0"
] | 85
|
2015-09-04T15:51:26.000Z
|
2022-03-29T20:45:08.000Z
|
dolfyn/meta/api_dumb.py
|
aidanbharath/dolfyn
|
7c8c62a780ae310b1ffdf04592fa77f400b04334
|
[
"Apache-2.0"
] | 27
|
2016-04-02T04:02:10.000Z
|
2022-03-26T02:45:06.000Z
|
valid=False
def marray(arr,*args,**kwargs):
return arr
def unitsDict(*args,**kwargs):
return None
def varMeta(*args,**kwargs):
return None
| 14
| 31
| 0.668831
| 21
| 154
| 4.904762
| 0.52381
| 0.291262
| 0.466019
| 0.38835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 154
| 10
| 32
| 15.4
| 0.81746
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.428571
| 0.857143
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
4cac182d3495b48c2694e7a50f14cbba4414441f
| 46,231
|
py
|
Python
|
cddm/_core_nb.py
|
IJSComplexMatter/cddm
|
f4d7521ad88271027c61743b2e8a2355a40cb117
|
[
"MIT"
] | 4
|
2021-01-30T12:26:58.000Z
|
2021-11-04T15:26:21.000Z
|
cddm/_core_nb.py
|
IJSComplexMatter/cddm
|
f4d7521ad88271027c61743b2e8a2355a40cb117
|
[
"MIT"
] | 2
|
2020-03-12T15:24:04.000Z
|
2021-06-30T10:53:32.000Z
|
cddm/_core_nb.py
|
IJSComplexMatter/cddm
|
f4d7521ad88271027c61743b2e8a2355a40cb117
|
[
"MIT"
] | 4
|
2020-02-13T10:19:01.000Z
|
2021-06-18T18:52:55.000Z
|
"""
Low level numba functions
"""
from __future__ import absolute_import, print_function, division
import numpy as np
import numba as nb
from cddm.conf import C,F, I64, NUMBA_TARGET, NUMBA_FASTMATH, NUMBA_CACHE
from cddm.fft import _fft, _ifft
from cddm.decorators import doc_inherit
#Some useful functions
@nb.vectorize([F(C)], target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _abs2(x):
"""Absolute square of data"""
return x.real*x.real + x.imag*x.imag
@nb.vectorize([F(C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def abs2(x):
"""Absolute square of data"""
return x.real*x.real + x.imag*x.imag
@nb.vectorize([F(F,F),C(C,C)], target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _mean(a,b):
"""Man value"""
return 0.5 * (a+b)
@nb.vectorize([F(F,F),C(C,C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def mean(a,b):
"""Man value"""
return 0.5 * (a+b)
@nb.vectorize([F(F,F),C(C,C)], target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _choose(a,b):
"""Chooses data randomly"""
r = np.random.rand()
if r >= 0.5:
return a
else:
return b
@nb.vectorize([F(F,F),C(C,C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def choose(a,b):
"""Chooses data randomly"""
r = np.random.rand()
if r >= 0.5:
return a
else:
return b
@nb.guvectorize([(F[:],F[:]),(C[:],C[:])],"(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def convolve(a, out):
"""Convolves input array with kernel [0.25,0.5,0.25]"""
n = len(out)
assert n > 2
result = a[0]
for i in range(1,n-1):
out[i-1] = result
result = 0.25*(a[i-1]+2*a[i]+a[i+1])
out[i] = result
out[-1] = a[-1]
# @nb.guvectorize([(F[:],F[:]),(C[:],C[:])],"(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def convolve(a, out):
# """Convolves input array with kernel [0.25,0.5,0.25]"""
# n = len(out)
# assert n > 2
# for i in range(n):
# out[i] = a[i]
@nb.guvectorize([(F[:],F[:],F[:],F[:]),(F[:],F[:],C[:],C[:])],"(n),(m),(m)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def interpolate(x_new, x,y,out):
"""Linear interpolation"""
assert len(x) >= 2
for i in range(len(x_new)):
xi = x_new[i]
for j in range(1,len(x)):
x0 = x[j-1]
x1 = x[j]
if xi <= x1:
#interpolate or extrapolate backward
deltay = y[j] - y[j-1]
deltax = x1 - x0
out[i] = (xi - x0) * deltay/ deltax + y[j-1]
break
#extrapolate forward
if xi > x1:
deltay = y[-1] - y[-2]
deltax = x1 - x0
out[i] = (xi - x0) * deltay/deltax + y[-2]
@nb.guvectorize([(I64[:],I64[:],F[:],F[:]),(I64[:],I64[:],C[:],C[:])],"(n), (m),(m)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _log_interpolate(x_new, x,y, out):
"""Linear interpolation in semilogx space."""
assert len(x) >= 2
for i in range(len(x_new)):
xi = x_new[i]
log = False
if xi > 1:
xi = np.log(xi)
log = True
for j in range(1,len(x)):
x0 = x[j-1]
x1 = x[j]
if x0 >= 1 and log == True:
x0 = np.log(x0)
x1 = np.log(x1)
if x_new[i] <= x[j]:
#interpolate or extrapolate backward
deltay = y[j] - y[j-1]
deltax = x1-x0
out[i] = (xi - x0) * deltay / deltax + y[j-1]
break
#extrapolate forward for data points outside of the domain
if xi > x1:
deltay = y[-1] - y[-2]
deltax = x1 - x0
out[i] = (xi - x0) * deltay/deltax + y[-2]
def log_interpolate(x_new, x,y, out = None):
"""Linear interpolation in semilogx space."""
#wrapped to suprres divide by zero warning numba issue #4793
with np.errstate(divide='ignore'):
return _log_interpolate(x_new, x,y, out)
log_interpolate.__doc__ = _log_interpolate.__doc__
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _median(array, out):
"""Performs median filter."""
n = len(array)
assert n > 2
result_out = array[0]
out[-1] = array[-1]
for i in range(1,n-1):
if array[i] < array[i+1]:
if array[i] < array[i-1]:
result = min(array[i+1],array[i-1])
else:
result = array[i]
else:
if array[i] < array[i-1]:
result = array[i]
else:
result = max(array[i+1],array[i-1])
out[i-1] = result_out
result_out = result
out[i] = result_out
#out[n-1] = result_out
#out[0] = out[1]
def median(array, out = None):
"""Performs median filter of complex or float data."""
array = np.asarray(array)
if np.iscomplexobj(array):
if out is None:
out = np.empty_like(array)
_median(array.real, out.real)
_median(array.imag, out.imag)
return out
else:
return _median(array, out)
@nb.vectorize([F(F,F,F)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _weighted_sum_real(x, y, weight):
return x * weight + (1.- weight) * y
@nb.vectorize([C(C,C,C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _weighted_sum_complex(x, y, weight):
real = x.real * weight.real + (1.- weight.real) * y.real
imag = x.imag * weight.imag + (1.- weight.imag) * y.imag
return real + 1j * imag
def weighted_sum(x, y, weight, out = None):
"""Performs weighted sum of two data sets, given the weight data.
Weight must be normalized between 0 and 1. Performs:
`x * weight + (1.- weight) * y`
"""
if np.iscomplexobj(weight):
return _weighted_sum_complex(x, y, weight, out)
else:
return _weighted_sum_real(x, y, weight, out)
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _median_slow(array, out):
"""Performs median filter. slow implementation... for testing"""
n = len(array)
assert n > 2
for i in range(1,n-1):
median = np.sort(array[i-1:i+2])[1]
out[i] = median
out[0] = array[0]
out[-1] = array[-1]
#out[0] = out[1]
#out[n-1] = out[n-2]
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def decreasing(array, out):
"""Performs decreasing filter. Each next element must be smaller or equal"""
n = len(array)
for i in range(n):
if i == 0:
out[0] = array[0]
else:
if array[i] < out[i-1] or np.isnan(out[i-1]):
out[i] = array[i]
else:
out[i] = out[i-1]
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def increasing(array,out):
"""Performs increasing filter. Each next element must be greater or equal"""
n = len(array)
for i in range(1,n):
if i == 0:
out[0] = array[0]
else:
if array[i] > out[i-1] or np.isnan(out[i-1]):
out[i] = array[i]
else:
out[i] = out[i-1]
#------------------------------------------------
# low level numba-optimized computation functions
#------------------------------------------------
@nb.jit([(C[:],C[:], F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_add_vec(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = x.real * y.real + x.imag * y.imag
#add
out[j] = out[j] + tmp
@nb.jit([(C[:],C[:], C[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex_add_vec(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = y * np.conj(x)
#add
out[j] = out[j] + tmp
@nb.jit([(C[:],C[:]), (F[:],F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_vec(x, out):
for j in range(x.shape[0]):
out[j] = out[j] + x[j]
@nb.jit([(C[:],C[:],F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_squaresum_vec(x, y, out):
for j in range(x.shape[0]):
xx = x[j]
yy = y[j]
tmp = xx.real * xx.real + xx.imag* xx.imag
tmp = tmp + yy.real * yy.real + yy.imag * yy.imag
out[j] = out[j] + tmp
@nb.jit([(C[:],C[:],F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_stats_vec(x, out1, out2):
for j in range(x.shape[0]):
out1[j] = out1[j] + x[j]
out2[j] = out2[j] + x[j].real * x[j].real + x[j].imag * x[j].imag
@nb.guvectorize([(C[:,:],C[:],F[:])],"(m,n)->(n),(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _calc_stats_vec(f,out1, out2):
for i in range(f.shape[0]):
_add_stats_vec(f[i],out1,out2)
@nb.guvectorize([(C[:,:],C[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(n,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_vec(f1,f2,t1,t2,dummy,out):
for i in range(f1.shape[0]):
for j in range(f2.shape[0]):
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
_cross_corr_add_vec(f1[i],f2[j], out[m])
@nb.guvectorize([(C[:,:],C[:,:],I64[:],I64[:],C[:,:],C[:,:])],"(l,k),(n,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex_vec(f1,f2,t1,t2,dummy,out):
for i in range(f1.shape[0]):
for j in range(f2.shape[0]):
m=t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
_cross_corr_complex_add_vec(f1[i],f2[j], out[m])
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:])],"(m),(n),(m),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr(x,y,t1,t2,dummy,out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
#m = abs(j-i)
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
tmp = x[i].real * y[j].real + x[i].imag * y[j].imag
out[m] += tmp
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],C[:],C[:])],"(m),(n),(m),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex(x,y,t1,t2,dummy,out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
m = t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
tmp = np.conj(x[i]) *y[j]
out[m] += tmp
@nb.jit([(C[:],C[:], F[:])], nopython = True)
def _cross_corr_add(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = x.real * y.real + x.imag * y.imag
#add
out[0] = out[0] + tmp
@nb.jit([(C[:],C[:], C[:])], nopython = True)
def _cross_corr_complex_add(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = y * np.conj(x)
#add
out[0] = out[0] + tmp
@nb.guvectorize([(C[:],C[:],F[:],F[:])],"(n),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_regular(x,y,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_corr_add(x[0:n],y[i:], out[i:i+1])
if i > 0:
_cross_corr_add(y[0:n],x[i:], out[i:i+1])
@nb.guvectorize([(C[:],C[:],C[:],C[:])],"(n),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex_regular(x,y,dummy,out):
for i in range((out.shape[0]+1)//2):
n = x.shape[0] - i
_cross_corr_complex_add(x[0:n],y[i:], out[i:i+1])
if i > 0:
j = out.shape[0] - i
_cross_corr_complex_add(x[i:], y[0:n], out[j:j+1])
@nb.guvectorize([(C[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_corr_add(x[i:],x[0:n], out[i:i+1])
@nb.guvectorize([(C[:],C[:],C[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_complex_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_corr_complex_add(x[0:n],x[i:], out[i:i+1])
@nb.guvectorize([(C[:,:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_vec(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m= abs(t[j]-t[i])
if m < out.shape[0]:
_cross_corr_add_vec(f[i],f[j], out[m])
#else just skip calculation
@nb.guvectorize([(C[:,:],I64[:],C[:,:],C[:,:])],"(l,k),(l),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_complex_vec(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m= t[j]-t[i]
if m >= 0:
if m < out.shape[0]:
_cross_corr_complex_add_vec(f[i],f[j], out[m])
else:
m = abs(m)
if m < out.shape[0]:
#negative tau, so store complex conjugate
_cross_corr_complex_add_vec(f[j],f[i], out[m])
@nb.guvectorize([(C[:],I64[:],F[:],F[:])],"(l),(l),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m= abs(t[j]-t[i])
if m < out.shape[0]:
tmp = f[i].real * f[j].real + f[i].imag * f[j].imag
out[m] += tmp
@nb.guvectorize([(C[:],I64[:],C[:],C[:])],"(l),(l),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_complex(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m = t[j]-t[i]
if m >= 0:
if m < out.shape[0]:
tmp = np.conj(f[i]) * f[j]
out[m] += tmp
else:
m = abs(m)
if m < out.shape[0]:
tmp = f[i] * np.conj(f[j])
out[m] += tmp
@nb.jit([(C[:],C[:], F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_add_vec(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = x-y
d = tmp.real*tmp.real + tmp.imag*tmp.imag
#add
out[j] = out[j] + d
@nb.guvectorize([(C[:,:],C[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(n,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_vec(f1,f2,t1,t2,dummy,out):
for i in range(f1.shape[0]):
for j in range(f2.shape[0]):
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
_cross_diff_add_vec(f2[j],f1[i], out[m])
#else just skip calculation
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:])],"(m),(n),(m),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff(x,y,t1,t2,dummy,out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
#m = abs(j-i)
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
tmp = y[j]-x[i]
d = tmp.real*tmp.real + tmp.imag*tmp.imag
out[m] += d
@nb.jit([(C[:],C[:], F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_add(xv,yv, out):
for j in range(xv.shape[0]):
tmp = xv[j] - yv[j]
d = tmp.real*tmp.real + tmp.imag*tmp.imag
out[0] += d
@nb.guvectorize([(C[:],C[:],F[:],F[:])],"(n),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_regular(x,y,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_diff_add(y[i:],x[0:n], out[i:i+1])
if i > 0:
_cross_diff_add(y[0:n],x[i:], out[i:i+1])
@nb.guvectorize([(C[:,:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_diff_vec(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i,f.shape[0]):
m=abs(t[j]-t[i])
if m < out.shape[0]:
_cross_diff_add_vec(f[j],f[i], out[m])
#else just skip calculation
@nb.guvectorize([(C[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_diff_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_diff_add(x[i:],x[0:n], out[i:i+1])
@nb.guvectorize([(C[:],I64[:],F[:],F[:])],"(l),(l),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_diff(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m=abs(t[j]-t[i])
if m < out.shape[0]:
tmp = f[j] - f[i]
d = tmp.real*tmp.real + tmp.imag*tmp.imag
out[m] += d
@nb.guvectorize([(C[:,:],I64[:],I64[:],C[:,:],C[:,:]),(F[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_vec(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = abs(t2[j]-t1[i])
if m < out.shape[0]:
_add_vec(f[i], out[m])
@nb.guvectorize([(C[:,:],I64[:],I64[:],C[:,:],C[:,:]),(F[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_vec(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
_add_vec(f[i], out[m])
@nb.guvectorize([(C[:],I64[:],I64[:],C[:],C[:]),(F[:],I64[:],I64[:],F[:],F[:])],"(l),(l),(n),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = abs(t2[j]-t1[i])
if m < out.shape[0]:
out[m] += f[i]
@nb.guvectorize([(C[:],I64[:],I64[:],C[:],C[:]),(F[:],I64[:],I64[:],F[:],F[:])],"(l),(l),(n),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
out[m] += f[i]
@nb.guvectorize([(C[:],C[:],C[:]), (F[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
if i == 0:
tmp = out[0]
tmp = tmp*0.
for j in range(x.shape[0]):
tmp = tmp + x[j]
prev = tmp*2
out[0] += tmp
if i > 0:
prev = prev - x[i-1] - x[n]
out[i] += prev
@nb.guvectorize([(C[:],C[:],C[:]), (F[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_regular_inverted(x,dummy,out):
for i in range((out.shape[0]+1)//2):
n = x.shape[0] - i
if i == 0:
tmp = out[0]
tmp = tmp*0.
for j in range(x.shape[0]):
tmp = tmp + x[j]
prev1 = tmp
prev2 = tmp
out[0] += tmp
if i > 0:
prev1 = prev1 - x[i-1]
prev2 = prev2 - x[n]
out[i] += prev1
out[-i] += prev2
@nb.guvectorize([(C[:],C[:],C[:]), (F[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_regular(x,dummy,out):
for i in range((out.shape[0]+1)//2):
n = x.shape[0] - i
if i == 0:
tmp = out[0]
tmp = tmp*0.
for j in range(x.shape[0]):
tmp = tmp + x[j]
prev1 = tmp
prev2 = tmp
out[0] += tmp
if i > 0:
prev1 = prev1 - x[i-1]
prev2 = prev2 - x[n]
out[i] += prev2
out[-i] += prev1
@nb.guvectorize([(C[:],C[:],F[:],F[:]),(C[:],C[:],C[:],C[:])],"(n),(n),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_fft_regular(x, y, dummy, out):
out_length = len(dummy)
if np.iscomplexobj(out):
out_length = (out_length+1)//2
length = len(x)
tmp1 = np.empty((length*2), x.dtype)
tmp1[0:length] = x
tmp1[length:] = 0.
tmp2 = np.empty((length*2), y.dtype)
tmp2[0:length] = y
tmp2[length:] = 0.
x = _fft(tmp1, overwrite_x = True)
y = _fft(tmp2, overwrite_x = True)
x = np.conj(x)*y
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:out_length] += _out[:out_length]
out[-1:-out_length:-1] += _out[-1:-out_length:-1]
else:
out[:] += _out[:out_length].real
out[1:] += _out[-1:-out_length:-1].real
@nb.guvectorize([(C[:],F[:],F[:]),(C[:],C[:],C[:])],"(n),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_fft_regular(x, dummy, out):
out_length = len(dummy)
length = len(x)
tmp = np.empty((length*2), x.dtype)
tmp[0:length] = x
tmp[length:] = 0.
x = _fft(tmp, overwrite_x = True)
x = x*np.conj(x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:] += _out[:out_length]
else:
out[:] += _out[:out_length].real
@nb.jit([(C[:],I64[:],C[:]),(F[:],I64[:],C[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _fill_data(x,t, out):
for i in range(t.shape[0]):
m = t[i]
if m < out.shape[0]:
out[m] = x[i]
@nb.jit([(I64[:],C[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _fill_ones(t, out):
for i in range(t.shape[0]):
m = t[i]
if m < out.shape[0]:
out[m] = 1.
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],I64[:],F[:],F[:]),(C[:],C[:],I64[:],I64[:],I64[:],C[:],C[:])],"(n),(n),(n),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_fft(x, y, t1,t2,length, dummy, out):
out_length = len(dummy)
if np.iscomplexobj(out):
out_length = (out_length+1)//2
tmp1 = np.zeros((length*2), x.dtype)
_fill_data(x,t1, tmp1)
#tmp1[list(t1)] = x
tmp2 = np.zeros((length*2), y.dtype)
_fill_data(y,t2, tmp2)
#tmp2[list(t2)] = y
x = _fft(tmp1, overwrite_x = True)
y = _fft(tmp2, overwrite_x = True)
np.conj(x, out = x)
np.multiply(x,y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:out_length] += _out[:out_length]
out[-1:-out_length:-1] += _out[-1:-out_length:-1]
else:
out[:] += _out[:out_length].real
out[1:] += _out[-1:-out_length:-1].real
@nb.guvectorize([(C[:],I64[:],I64[:],F[:],F[:]),(C[:],I64[:],I64[:],C[:],C[:])],"(n),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_fft(x, t,length, dummy, out):
out_length = len(dummy)
tmp = np.zeros((length*2), x.dtype)
_fill_data(x,t, tmp)
#tmp[list(t)] = x
x = _fft(tmp, overwrite_x = True)
y = np.conj(x)
np.multiply(x,y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:] += _out[:out_length]
else:
out[:] += _out[:out_length].real
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:]), (C[:],C[:],I64[:],I64[:],C[:],C[:])],"(n),(m),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_fft(x, y,t,length, dummy, out):
out_length = len(dummy)
tmp1 = np.zeros((length*2), x.dtype)
_fill_data(x,t, tmp1)
x = _fft(tmp1, overwrite_x = True)
np.multiply(np.conj(x),y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:] += np.conj(_out[:out_length])
out[1:] += np.conj(_out[-1:-out_length:-1])
else:
out[:] += _out[:out_length].real
out[1:] += _out[-1:-out_length:-1].real
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:]),(C[:],C[:],I64[:],I64[:],C[:],C[:])],"(n),(m),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_fft(x, y,t,length, dummy, out):
out_length = len(dummy)
out_length = (out_length+1)//2
tmp1 = np.zeros((length*2), x.dtype)
_fill_data(x,t, tmp1)
x = _fft(tmp1, overwrite_x = True)
np.multiply(np.conj(x),y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:out_length] += np.conj(_out[:out_length])
out[-1:-out_length:-1] += np.conj(_out[-1:-out_length:-1])
else:
out[:out_length] += _out[:out_length].real
out[-1:-out_length:-1] += _out[-1:-out_length:-1].real
#-----------------------------
# occurence count functions
@nb.jit(nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_count_cross(t1,t2,n):
for ii in range(t1.shape[0]):
for jj in range(t2.shape[0]):
m = abs(t2[jj] - t1[ii])
if m < len(n):
n[m] += 1
@nb.jit(nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_count_cross_complex(t1,t2,n):
for ii in range(t1.shape[0]):
for jj in range(t2.shape[0]):
m = t2[jj] - t1[ii]
if abs(m) < (len(n)+1)/2:
n[m] += 1
# @nb.jit([(I64[:],I64[:],I64[:,:],I64[:,:])],cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def cross_tau_time(t1,t2,tn1,tn2):
# assert len(t1) == len(t2)
# assert tn1.shape == tn2.shape
# count = np.zeros((tn1.shape[0],),tn1.dtype)
# for ii in range(t1.shape[0]):
# for jj in range(t2.shape[0]):
# m = abs(t1[ii] - t2[jj])
# if m < tn1.shape[0]:
# i = count[m]
# if i < tn1.shape[1]:
# tn1[m,i] = t1[ii]
# tn2[m,i] = t2[jj]
# count[m] +=1
@nb.jit([(I64[:],I64[:],I64[:,:],I64[:,:])],cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def cross_tau_times(t1,t2,tpos,tneg):
assert len(t1) == len(t2)
assert tpos.shape == tpos.shape
count_pos = np.zeros((tpos.shape[0],),tpos.dtype)
count_neg = np.zeros((tneg.shape[0],),tneg.dtype)
for ii in range(t1.shape[0]):
for jj in range(t2.shape[0]):
m = t1[ii] - t2[jj]
if abs(m) < tpos.shape[0]:
if m >= 0:
i = count_pos[m]
if i < tpos.shape[1]:
tpos[m,i] = t1[ii]
count_pos[m] +=1
else:
m = -m
i = count_neg[m]
if i < tneg.shape[1]:
tneg[m,i] = t1[ii]
count_neg[m] +=1
@nb.jit([(I64[:],I64[:,:])],cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def auto_tau_times(t,tpos):
count_pos = np.zeros((tpos.shape[0],),tpos.dtype)
for ii in range(t.shape[0]):
for jj in range(ii,t.shape[0]):
m = t[jj] - t[ii]
assert m >= 0
if m < tpos.shape[0]:
i = count_pos[m]
if i < tpos.shape[1]:
tpos[m,i] = t[ii]
count_pos[m] +=1
def cross_count_mixed(t1,t2, n, period):
pos = np.empty((n,len(t1)//period * 2),int)
pos[...] = -1
neg = np.empty((n,len(t1)//period * 2),int)
neg[...] = -1
cross_tau_times(t1,t2,pos,neg)
count_pos_pos = np.zeros((n, 2*n), int)
count_neg_neg = np.zeros((n, 2*n), int)
count_pos_neg = np.zeros((n, 2*n), int)
for i in range(n):
pmask = pos[i] > 0
_add_count_cross(pos[i,pmask],pos[i,pmask],count_pos_pos[i])
nmask = neg[i] > 0
_add_count_cross(neg[i,nmask],neg[i,nmask],count_neg_neg[i])
_add_count_cross(pos[i,pmask],neg[i,nmask],count_pos_neg[i])
return count_pos_pos,count_neg_neg,count_pos_neg
def auto_count_mixed(t, n, period):
pos = np.empty((n,len(t)//period * 2),int)
pos[...] = -1
auto_tau_times(t,pos)
count = np.zeros((n, 2*n), int)
for i in range(n):
mask = pos[i] > 0
_add_count_cross(pos[i,mask],pos[i,mask],count[i])
return count
@nb.jit(nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_count_auto(t,n):
for ii in range(t.shape[0]):
for jj in range(ii,t.shape[0]):
m = abs(t[ii] - t[jj])
if m < len(n):
n[m] += 1
#normalization functions
#-----------------------
# complex inf
CINF = C(np.inf + np.inf*1j)
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_baseline_real(data, count, bg1, bg2):
return data/count - (bg1.real * bg2.real + bg1.imag * bg2.imag)
@nb.vectorize([C(C,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_baseline_complex(data, count, bg1, bg2):
if count != 0:
return data/count - bg2 * np.conj(bg1)
else:
return CINF
def normalize_corr_baseline(data, count, bg1, bg2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_baseline_complex(data, count, bg1, bg2, out)
else:
return _normalize_corr_baseline_real(data, count, bg1, bg2, out)
@nb.vectorize([F(F,I64,C,C,F,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_baseline_real(data, count, bg1, bg2, var, sq):
tmp = data - 0.5 * sq
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp + (0.5 * d2) + var
@nb.vectorize([C(C,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_baseline_complex(data, count, bg1, bg2, var, sq):
tmp = data - 0.5 * sq * (1. + 1j)
if count != 0:
tmp = tmp/count
else:
tmp = CINF
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
real = (0.5 * d2) + var
d = (bg1.imag + bg2.real)
d2 = d*d
d = (bg1.real - bg2.imag)
d2 = d2 + d*d
imag = (0.5 * d2) + var
return tmp + real + (1j* imag)
def normalize_struct_baseline(data, count, bg1, bg2, var, sq, out = None):
if np.iscomplexobj(data):
return _normalize_struct_baseline_complex(data, count, bg1, bg2, var, sq, out)
else:
return _normalize_struct_baseline_real(data, count, bg1, bg2, var, sq, out)
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_real(data, count, m1, m2):
tmp = m1.real * m2.real + m1.imag * m2.imag
tmp = tmp/count
tmp = data - tmp
return tmp/count
@nb.vectorize([C(C,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_complex(data, count, m1, m2):
if count != 0:
tmp = m2 * np.conj(m1)
tmp = tmp/count
tmp = data - tmp
return tmp/count
else:
return CINF
def normalize_corr_compensated(data, count, m1, m2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_compensated_complex(data, count, m1, m2, out)
else:
return _normalize_corr_compensated_real(data, count, m1, m2, out)
@nb.vectorize([F(F,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_subtracted_real(data, count, bg1, bg2, m1, m2):
tmp = m1.real * m2.real + m1.imag * m2.imag
tmp = tmp/count
tmp = data - tmp
tmp = tmp/count
tmp += (m1.real/count - bg1.real)*(m2.real/count - bg2.real)
tmp += (m1.imag/count - bg1.imag)*(m2.imag/count - bg2.imag)
return tmp
@nb.vectorize([C(C,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_subtracted_complex(data, count, bg1, bg2, m1, m2):
if count != 0:
tmp = m2 * np.conj(m1)
tmp = tmp/count
tmp = data - tmp
tmp = tmp/count
tmp += np.conj(m1/count-bg1)*(m2/count-bg2)
return tmp
else:
return CINF
def normalize_corr_compensated_subtracted(data, count, bg1, bg2, m1, m2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_compensated_subtracted_complex(data, count, bg1, bg2, m1, m2, out)
else:
return _normalize_corr_compensated_subtracted_real(data, count, bg1, bg2, m1, m2, out)
@nb.vectorize([F(F,I64,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_compensated_real(data, count, var, sq, m1, m2):
tmp = (m1.real-m2.real)* (m1.real- m2.real)
tmp = tmp + (m1.imag-m2.imag)* (m1.imag- m2.imag)
tmp = 0.5*tmp/count
tmp = data + tmp - 0.5 * sq
return tmp/count + var
@nb.vectorize([C(C,I64,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_compensated_complex(data, count, var, sq, m1, m2):
if count == 0:
return CINF
else:
real = (m1.real-m2.real)* (m1.real- m2.real)
real = real + (m1.imag-m2.imag)* (m1.imag- m2.imag)
real = 0.5*real/count - 0.5 * sq
real = real/count + var
imag = (m1.imag+m2.real)* (m1.imag+ m2.real)
imag = imag + (m1.real-m2.imag)* (m1.real- m2.imag)
imag = 0.5*imag/count - 0.5 * sq
imag = imag/count + var
c = real + 1j* imag
return data/count + c
def normalize_struct_compensated(data, count, var, sq, m1,m2, out = None):
if np.iscomplexobj(data):
return _normalize_struct_compensated_complex(data, count, var, sq, m1,m2, out)
else:
return _normalize_struct_compensated_real(data, count, var, sq, m1,m2, out)
@nb.vectorize([F(F,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_subtracted_real(data, count, bg1, bg2, m1, m2):
tmp = data
tmp = tmp - bg1.real * m2.real - bg1.imag * m2.imag
tmp = tmp - bg2.real * m1.real - bg2.imag * m1.imag
return tmp/count + bg1.real * bg2.real + bg1.imag * bg2.imag
@nb.vectorize([C(C,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_subtracted_complex(data, count, bg1, bg2, m1, m2):
tmp = data
tmp = tmp - np.conj(bg1) * m2 - np.conj(m1) * bg2
if count != 0:
return tmp/count + np.conj(bg1) * bg2
else:
return CINF
def normalize_corr_subtracted(data, count, bg1, bg2, m1,m2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_subtracted_complex(data, count, bg1, bg2, m1,m2, out)
else:
return _normalize_corr_subtracted_real(data, count, bg1, bg2, m1, m2, out)
@nb.vectorize([F(F,I64,C,C,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_subtracted_real(data, count, bg1, bg2, var, sq, m1, m2):
tmp = data - 0.5 * sq
tmp = tmp + (m1.real-m2.real)* (bg1.real- bg2.real)
tmp = tmp + (m1.imag-m2.imag)* (bg1.imag- bg2.imag)
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp - (0.5 * d2) + var
@nb.vectorize([C(C,I64,C,C,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_subtracted_complex(data, count, bg1, bg2, var, sq, m1, m2):
if count == 0:
return CINF
else:
tmp = data - 0.5 * sq * (1. + 1j)
real = (m1.real-m2.real)* (bg1.real- bg2.real)
real += (m1.imag-m2.imag)* (bg1.imag- bg2.imag)
imag = (m1.imag + m2.real)* (bg1.imag + bg2.real)
imag += (m1.real-m2.imag)* (bg1.real- bg2.imag)
tmp = tmp + real + imag * 1j
tmp = tmp/count
d = (bg2.real - bg1.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
real = (0.5 * d2) - var
d = (bg1.imag + bg2.real)
d2 = d*d
d = (bg1.real - bg2.imag)
d2 = d2 + d*d
imag = (0.5 * d2) - var
c = real + 1j* imag
return tmp - c
def normalize_struct_subtracted(data, count, bg1, bg2, var, m1,m2, sq, out = None):
if np.iscomplexobj(data):
return _normalize_struct_subtracted_complex(data, count, bg1, bg2, var, m1, m2, sq, out)
else:
return _normalize_struct_subtracted_real(data, count, bg1, bg2, var, m1, m2, sq, out)
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_0(data, count, bg1, bg2):
return data/count - (bg1.real * bg2.real + bg1.imag * bg2.imag)
@nb.vectorize([F(F,I64,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_cdiff_1(data, count, d):
return data/count - (d.real * d.real + d.imag*d.imag)
@nb.vectorize([F(F,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_2(data, count, bg1, bg2, m1, m2):
tmp = data
tmp = tmp - bg1.real * m2.real - bg1.imag * m2.imag
tmp = tmp - bg2.real * m1.real - bg2.imag * m1.imag
return tmp/count + bg1.real * bg2.real + bg1.imag * bg2.imag
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_2b(data, count, m1, m2):
tmp = m1.real * m2.real + m1.imag * m2.imag
tmp = tmp/count
tmp = data -tmp
return tmp/count
@nb.vectorize([F(F,I64,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_cdiff_3(data, count, dm, m1, m2):
ds = m2 - m1
tmp = data - 2*(dm.real * ds.real + dm.imag * ds.imag)
return tmp/count + (dm.real * dm.real + dm.imag * dm.imag)
@nb.vectorize([F(F,I64,C,C,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_3(data, count, bg1, bg2, sq, m1, m2):
tmp = data - 0.5 * sq
tmp = tmp + (m1.real-m2.real)* (bg1.real- bg2.real)
tmp = tmp + (m1.imag-m2.imag)* (bg1.imag- bg2.imag)
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp - (0.5 * d2)
@nb.vectorize([F(F,I64,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_3b(data, count, sq, m1, m2):
tmp = (m1.real-m2.real)* (m1.real- m2.real)
tmp = tmp + (m1.imag-m2.imag)* (m1.imag- m2.imag)
tmp = 0.5*tmp/count
tmp = data + tmp - 0.5 * sq
return tmp/count
@nb.vectorize([F(F,I64,C,C,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_1(data, count, bg1, bg2, sq):
tmp = data - 0.5 * sq
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp + (0.5 * d2)
#because of numba bug, this does not work for np.nan inputs
# @nb.jit([F(F,F)], cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def _weight_from_g(g,delta):
# tmp1 = 2*g
# tmp2 = g**2 + 1 + 2*delta**2
# return tmp1/tmp2
@nb.vectorize([F(F,F),C(C,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def weight_from_g(g, delta):
"""Computes weight for weighted normalization from normalized and scaled
correlation function"""
tmp1 = 2*g
g2 = g.real**2 + g.imag**2
tmp2 = g2 + 1 + delta**2
return tmp1/tmp2
@nb.vectorize([F(C,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def weight_prime_from_g(g,delta, b1, b2):
"""Computes weight for weighted normalization from normalized and scaled
correlation function"""
# s1 = |b1|^2
s1 = b1.real * b1.real + b1.imag * b1.imag
# s2 = |b2|^2
s2 = b2.real * b2.real + b2.imag * b2.imag
# r = Re(conj(b2)*b1)
r = b1.real * b2.real + b1.imag * b2.imag
#i = Im(conj(b2)*b1)
i = b2.real * b1.imag - b2.imag * b1.real
d2 = delta**2
g2 = g.real**2 + g.imag**2
tmp1 = 2 * g.real + 2 * r + (s1 + s2) * g.real
tmp2 = g2 + 1 + d2 + s1 + s2 + (s2 - s1) * delta + 2 * r * g.real + 2 * i * g.imag
return tmp1/tmp2
def weight_prime_from_d(d, delta, b1, b2):
g = 1 - d/2.
return weight_prime_from_g(g,delta, b1, b2)
def weight_from_d(d, delta):
g = 1 - d/2.
return weight_from_g(g, delta)
@nb.vectorize([F(F,C,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def sigma_weighted(w,g,delta):
"""Computes standard deviation of the weighted normalization."""
g2 = g.real**2
c2= g2 + g.imag**2
d2 = delta**2
return (0.5 * (w**2 * (c2 + 1 + d2) - 4 * w * g.real + 2*g2 - c2 + 1 - d2))**0.5
@nb.vectorize([F(F,C,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def sigma_prime_weighted(w,g,delta, b1, b2):
g2 = g.real**2
c2 = g2 + g.imag**2
d2 = delta**2
# s1 = |b1|^2
s1 = b1.real * b1.real + b1.imag * b1.imag
# s2 = |b2|^2
s2 = b2.real * b2.real + b2.imag * b2.imag
# r = Re(conj(b2)*b1)
r = b1.real * b2.real + b1.imag * b2.imag
#i = Im(conj(b2)*b1)
i = b2.real * b1.imag - b2.imag * b1.real
return (0.5 * (w**2 * (c2 + 1 + d2 + s1 + s2 + (s2 - s1) * delta + 2 * r * g.real + 2 * i * g.imag) \
- 4 * w * (g.real + r + 0.5 * (s1 + s2) * g.real ) \
+ 2*g2 - c2 + 1 - d2 + s1 + s2 - (s2 - s1) * delta + 2 * r * g.real - 2 * i * g.imag))**0.5
@nb.jit
def _g(a,index):
index = abs(index)
if index > len(a):
return 0.
else:
return a[index]
# @nb.guvectorize([(F[:],F[:],F[:],F[:],I64[:,:],I64[:,:],I64[:,:],F[:])],"(),(n),(),(),(n,m),(n,m),(n,m)->(n)",target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def sigma_weighted_cross_general(weight,g,noise, delta, pp,pm,mm, out):
# w = weight[0]
# d2 = delta[0]**2
# b2 = noise[0]**2
# for i in range(len(g)):
# g2 = g[i]**2
# out[i] = (0.5 * (w**2 * (g2 + 1 + b2 + 2 * d2) - 4 * w * g[i] + g2 + 1 - d2))
# #correction terms, skipping p = 0 because it was computed above.
# for p in range(1,pp.shape[1]):
# tmp = (pp[i, p] + mm[i, p])*(_g(g, p)**2 + _g(g, p + i) * _g(g, p-i))
# tmp += pm[i, p] *(_g(g, p + i)**2 + _g(g, p - i)**2 + _g(g, p)*_g(g, p + 2 * i) + _g(g, p)*_g(g, p - 2 * i))
# tmp -= 2*w * (pp[i,p] + mm[i,p])* (_g(g,p+i)*_g(g,p) + _g(g,p-i)*_g(g,p))
# tmp -= 2*w * pm[i,p]* (_g(g,p+i)*_g(g,p) + _g(g,p-i)*_g(g,p) + _g(g,p+i)*_g(g,p+2*i) + _g(g,p-i)*_g(g,p-2*i) )
# tmp += w**2 * (pp[i,p] + mm[i,p])* (_g(g,p)**2 + _g(g,p+i)**2 + _g(g,p-i)**2)
# tmp += w**2 * pm[i,p] * (_g(g,p)**2 +_g(g,p-i)**2 + _g(g,p+i)**2 + 0.5* _g(g,p+2*i)**2+ 0.5* _g(g,p-2*i)**2 )
# out[i] = out[i] + 0.5 * tmp / (pp[i,0] + mm[i,0])
# out[i] = out[i] ** 0.5
# @nb.guvectorize([(F[:],F[:],F[:],I64[:,:],F[:])],"(n),(n),(n),(n,m)->(n)",target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def sigma_weighted_auto_general(weight,g,noise, pp, out):
# for i in range(len(g)):
# w = weight[i]
# b2 = noise[i]**2
# g2 = g[i]**2
# out[i] = 0.
# out[i] = (0.5 * (w**2 * (g2 + 1 + b2) - 4 * w * g[i] + g2 + 1))
# #correction terms, skipping p = 0 because it was computed above.
# for p in range(1,pp.shape[1]):
# tmp = pp[i, p] * (_g(g, p)**2 + _g(g, p + i) * _g(g, p-i))
# tmp -= 2*w * pp[i,p] * (_g(g,p+i)*_g(g,p) + _g(g,p-i)*_g(g,p))
# tmp += w**2 * pp[i,p] * (_g(g,p)**2 + 0.5*_g(g,p+i)**2 + 0.5*_g(g,p-i)**2)
# out[i] = out[i] + 0.5 * tmp / pp[i,0]
# out[i] = out[i] ** 0.5
| 36.259608
| 197
| 0.531137
| 7,328
| 46,231
| 3.226801
| 0.039438
| 0.009896
| 0.058361
| 0.089487
| 0.84564
| 0.817009
| 0.781401
| 0.75573
| 0.714793
| 0.688742
| 0
| 0.040988
| 0.271203
| 46,231
| 1,274
| 198
| 36.288069
| 0.66082
| 0.111094
| 0
| 0.59116
| 0
| 0.003315
| 0.017597
| 0.008786
| 0
| 0
| 0
| 0
| 0.00884
| 1
| 0.114917
| false
| 0
| 0.00663
| 0.00442
| 0.19558
| 0.001105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4cc297220bb6209cd8957ec4a0d72a854f9d3a0e
| 120
|
py
|
Python
|
packages/api-server/api_server/models/tortoise_models/dispenser_state.py
|
Sald-for-Communication-and-IT/rmf-web
|
ec5996ab0b06440d7147170f3030b14c73d26116
|
[
"Apache-2.0"
] | 23
|
2021-04-13T23:01:12.000Z
|
2022-03-21T02:15:24.000Z
|
packages/api-server/api_server/models/tortoise_models/dispenser_state.py
|
Sald-for-Communication-and-IT/rmf-web
|
ec5996ab0b06440d7147170f3030b14c73d26116
|
[
"Apache-2.0"
] | 326
|
2021-03-10T17:32:17.000Z
|
2022-03-30T04:42:14.000Z
|
packages/api-server/api_server/models/tortoise_models/dispenser_state.py
|
Sald-for-Communication-and-IT/rmf-web
|
ec5996ab0b06440d7147170f3030b14c73d26116
|
[
"Apache-2.0"
] | 13
|
2021-04-10T10:33:36.000Z
|
2022-02-22T15:39:58.000Z
|
from tortoise.models import Model
from .json_mixin import JsonMixin
class DispenserState(Model, JsonMixin):
pass
| 15
| 39
| 0.791667
| 15
| 120
| 6.266667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158333
| 120
| 7
| 40
| 17.142857
| 0.930693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4cfc4cc18de867d30548767817a505712ffadee7
| 3,354
|
py
|
Python
|
src/main/python/aut/udfs.py
|
ruebot/aut
|
4200482d4c1e0238898f1ecb4e765f52a936a846
|
[
"Apache-2.0"
] | 113
|
2017-08-01T15:33:37.000Z
|
2022-03-11T14:19:36.000Z
|
src/main/python/aut/udfs.py
|
ruebot/aut
|
4200482d4c1e0238898f1ecb4e765f52a936a846
|
[
"Apache-2.0"
] | 510
|
2017-07-06T10:33:55.000Z
|
2022-03-29T13:40:11.000Z
|
src/main/python/aut/udfs.py
|
ruebot/aut
|
4200482d4c1e0238898f1ecb4e765f52a936a846
|
[
"Apache-2.0"
] | 36
|
2017-09-20T03:32:52.000Z
|
2021-11-23T18:10:30.000Z
|
from pyspark import SparkContext
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.functions import col
def compute_image_size(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeImageSize().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def compute_md5(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeMD5().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def compute_sha1(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeSHA1().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def detect_language(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.detectLanguage().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def detect_mime_type_tika(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.detectMimeTypeTika()
.apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_boilerplate(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.extractBoilerpipeText()
.apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_date(col, dates):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDate().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_domain(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDomain().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_image_links(col, image_links):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.extractImageLinks()
.apply
)
return Column(udf(_to_seq(sc, [col, image_links], _to_java_column)))
def extract_links(col, links):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractLinks().apply
return Column(udf(_to_seq(sc, [col, links], _to_java_column)))
def get_extension_mime(col, mime):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.getExtensionMime().apply
)
return Column(udf(_to_seq(sc, [col, mime], _to_java_column)))
def remove_http_header(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTTPHeader().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def remove_html(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTML().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def remove_prefix_www(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removePrefixWWW().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
| 31.055556
| 88
| 0.706619
| 415
| 3,354
| 5.440964
| 0.144578
| 0.039858
| 0.079717
| 0.173605
| 0.793623
| 0.763508
| 0.763508
| 0.763508
| 0.723649
| 0.723649
| 0
| 0.00142
| 0.160107
| 3,354
| 107
| 89
| 31.345794
| 0.800142
| 0
| 0
| 0.481013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177215
| false
| 0
| 0.037975
| 0
| 0.392405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
980647cf4e23cbacd0782dbd7c70aa88115cdac8
| 177
|
py
|
Python
|
phileo/signals.py
|
dheeru0198/phileo
|
625dd239621ea15ba978d89eff00962930e1a68c
|
[
"BSD-3-Clause"
] | null | null | null |
phileo/signals.py
|
dheeru0198/phileo
|
625dd239621ea15ba978d89eff00962930e1a68c
|
[
"BSD-3-Clause"
] | null | null | null |
phileo/signals.py
|
dheeru0198/phileo
|
625dd239621ea15ba978d89eff00962930e1a68c
|
[
"BSD-3-Clause"
] | 1
|
2018-09-19T05:03:24.000Z
|
2018-09-19T05:03:24.000Z
|
import django.dispatch
object_liked = django.dispatch.Signal(providing_args=["like", "request"])
object_unliked = django.dispatch.Signal(providing_args=["object", "request"])
| 29.5
| 77
| 0.779661
| 21
| 177
| 6.380952
| 0.52381
| 0.313433
| 0.298507
| 0.432836
| 0.492537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 177
| 5
| 78
| 35.4
| 0.812121
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e21ad4f612ca8c6a4b0a1d557998a14212650661
| 3,242
|
py
|
Python
|
Python/stochastic.py
|
Hieuqng/Stochastic-Modelling
|
d81f37a3d6e361ec417564a1b67046f70e8c1998
|
[
"MIT"
] | null | null | null |
Python/stochastic.py
|
Hieuqng/Stochastic-Modelling
|
d81f37a3d6e361ec417564a1b67046f70e8c1998
|
[
"MIT"
] | null | null | null |
Python/stochastic.py
|
Hieuqng/Stochastic-Modelling
|
d81f37a3d6e361ec417564a1b67046f70e8c1998
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
from scipy.stats import norm
def bachelier(So, K, sigma, T, option_type):
'''
Calculate European option price using Bachelier model:
dSt = sigma * S0 * dWt
St = S0*(1 + sigma*Wt)
Parameter
---------
So: float
price of underlying asset at time 0
K: float
strike price of option
sigma: float
variance of Brownian motion
T: float
length of time
option_type: str
type of European option.
Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing)
Return
------
val: value of the option at time 0
'''
xs = (K-So) / (So * sigma * np.sqrt(T))
val = None
if So == K:
return sigma*So*np.sqrt(T/(2*np.pi))
if option_type == 'van call':
val = (So - K) * norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs)
elif option_type == 'van put':
val = (K - So) * norm.cdf(xs) + So*sigma*np.sqrt(T)*norm.pdf(xs)
elif option_type == 'con call':
val = norm.cdf(-xs)
elif option_type == 'con put':
val = norm.cdf(xs)
elif option_type == 'aon call':
val = So*norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs)
elif option_type == 'aon put':
val = So*norm.cdf(xs) - So*sigma*np.sqrt(T)*norm.pdf(xs)
else:
raise(ValueError("Option type is invalid. " +
"Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'"))
return val
def black_scholes(So, K, r, sigma, T, option_type):
'''
Calculate European option price using Black-Scholes (1973) model:
dSt = r*dSt + sigma*St*dWt
St = S0*exp{(r-sigma^2/2)t + sigma*Wt}
Parameter
---------
So: float
price of underlying asset at time 0
K: float
strike price of option
r: float
drift of St
sigma: float
variance of Brownian motion
T: float
length of time
option_type: str
type of European option.
Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing)
Return
------
val: value of the option at time 0
'''
d1 = (np.log(So/K) + (r+sigma**2/2)*T) / (sigma*np.sqrt(T))
d2 = (np.log(So/K) + (r-sigma**2/2)*T) / (sigma*np.sqrt(T))
val = None
if So == K:
return sigma*So*np.sqrt(T/(2*np.pi))
if option_type == 'van call':
val = So*norm.cdf(d1) - K*np.e**(-r*T)*norm.cdf(d2)
elif option_type == 'van put':
val = -So*norm.cdf(-d1) + K*np.e**(-r*T)*norm.cdf(-d2)
elif option_type == 'con call':
val = np.e**(-r*T) * norm.cdf(d2)
elif option_type == 'con put':
val = np.e**(-r*T) * norm.cdf(-d2)
elif option_type == 'aon call':
val = So*norm.cdf(d1)
elif option_type == 'aon put':
val = So*norm.cdf(-d1)
else:
raise(ValueError("Option type is invalid. " +
"Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'"))
return val
| 30.018519
| 116
| 0.534547
| 485
| 3,242
| 3.538144
| 0.17732
| 0.104895
| 0.081585
| 0.048951
| 0.888695
| 0.887529
| 0.859557
| 0.837413
| 0.837413
| 0.734848
| 0
| 0.013471
| 0.313078
| 3,242
| 108
| 117
| 30.018519
| 0.757072
| 0.325725
| 0
| 0.577778
| 0
| 0.044444
| 0.156942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.044444
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e254b05b71d66f384b33227e8cf7c7658f2a37a9
| 126
|
py
|
Python
|
src/utils.py
|
Filco306/ds-project-template
|
7f4f435aefbcdef34ca9d585cb3944569a5f466f
|
[
"Apache-2.0"
] | null | null | null |
src/utils.py
|
Filco306/ds-project-template
|
7f4f435aefbcdef34ca9d585cb3944569a5f466f
|
[
"Apache-2.0"
] | null | null | null |
src/utils.py
|
Filco306/ds-project-template
|
7f4f435aefbcdef34ca9d585cb3944569a5f466f
|
[
"Apache-2.0"
] | null | null | null |
import os
def fix_filename(filename):
return os.path.join("config", filename) if filename[:6] != "config" else filename
| 21
| 85
| 0.714286
| 18
| 126
| 4.944444
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.150794
| 126
| 5
| 86
| 25.2
| 0.82243
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e25afe3d353504a87d808eaaaeb498cdabcf5f12
| 29
|
py
|
Python
|
sparse/_compressed/__init__.py
|
pettni/sparse
|
06f420daf8a88e8328e8464a462c9907601e6b01
|
[
"BSD-3-Clause"
] | null | null | null |
sparse/_compressed/__init__.py
|
pettni/sparse
|
06f420daf8a88e8328e8464a462c9907601e6b01
|
[
"BSD-3-Clause"
] | null | null | null |
sparse/_compressed/__init__.py
|
pettni/sparse
|
06f420daf8a88e8328e8464a462c9907601e6b01
|
[
"BSD-3-Clause"
] | null | null | null |
from .compressed import GXCS
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e268f1b60846dc7677f46bfe44bac5575ce543c3
| 28,089
|
py
|
Python
|
pyLMS7002Soapy/LMS7002_DCCAL.py
|
Surfndez/pyLMS7002Soapy
|
ea230dcb12048007300477e1e2e4decc5414f954
|
[
"Apache-2.0"
] | 46
|
2016-11-29T05:10:36.000Z
|
2021-10-31T19:27:46.000Z
|
pyLMS7002M/LMS7002_DCCAL.py
|
myriadrf/pyLMS7002M
|
b866deea1f05dba44c9ed1a1a4666352b811b66b
|
[
"Apache-2.0"
] | 2
|
2017-04-15T21:36:01.000Z
|
2017-06-08T09:44:26.000Z
|
pyLMS7002Soapy/LMS7002_DCCAL.py
|
Surfndez/pyLMS7002Soapy
|
ea230dcb12048007300477e1e2e4decc5414f954
|
[
"Apache-2.0"
] | 16
|
2016-11-28T20:47:55.000Z
|
2021-04-07T01:48:20.000Z
|
#***************************************************************
#* Name: LMS7002_DCCAL.py
#* Purpose: Class implementing LMS7002 DCCAL functions
#* Author: Lime Microsystems ()
#* Created: 2017-02-10
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
class LMS7002_DCCAL(LMS7002_base):
__slots__ = [] # Used to generate error on typos
def __init__(self, chip):
self.chip = chip
self.channel = None
self.prefix = "DCCAL_"
#
# DCCAL_CFG (0x05C0)
#
# DCMODE
@property
def DCMODE(self):
"""
Get the value of DCMODE
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'DCMODE')
else:
raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID))
@DCMODE.setter
def DCMODE(self, value):
"""
Set the value of DCMODE
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1, 'MANUAL', 'AUTO']:
raise ValueError("Value must be [0,1,'MANUAL','AUTO']")
if value==0 or value=='MANUAL':
val = 0
else:
val = 1
self._writeReg('CFG', 'DCMODE', val)
else:
raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_RXB
@property
def PD_DCDAC_RXB(self):
"""
Get the value of PD_DCDAC_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXB')
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_RXB.setter
def PD_DCDAC_RXB(self, value):
"""
Set the value of PD_DCDAC_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_RXA
@property
def PD_DCDAC_RXA(self):
"""
Get the value of PD_DCDAC_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXA')
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_RXA.setter
def PD_DCDAC_RXA(self, value):
"""
Set the value of PD_DCDAC_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_TXB
@property
def PD_DCDAC_TXB(self):
"""
Get the value of PD_DCDAC_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXB')
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_TXB.setter
def PD_DCDAC_TXB(self, value):
"""
Set the value of PD_DCDAC_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_TXA
@property
def PD_DCDAC_TXA(self):
"""
Get the value of PD_DCDAC_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXA')
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_TXA.setter
def PD_DCDAC_TXA(self, value):
"""
Set the value of PD_DCDAC_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_RXB
@property
def PD_DCCMP_RXB(self):
"""
Get the value of PD_DCCMP_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXB')
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_RXB.setter
def PD_DCCMP_RXB(self, value):
"""
Set the value of PD_DCCMP_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_RXA
@property
def PD_DCCMP_RXA(self):
"""
Get the value of PD_DCCMP_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXA')
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_RXA.setter
def PD_DCCMP_RXA(self, value):
"""
Set the value of PD_DCCMP_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_TXB
@property
def PD_DCCMP_TXB(self):
"""
Get the value of PD_DCCMP_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXB')
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_TXB.setter
def PD_DCCMP_TXB(self, value):
"""
Set the value of PD_DCCMP_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_TXA
@property
def PD_DCCMP_TXA(self):
"""
Get the value of PD_DCCMP_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXA')
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_TXA.setter
def PD_DCCMP_TXA(self, value):
"""
Set the value of PD_DCCMP_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_STAT (0x05C1)
#
# DCCAL_CALSTATUS<7:0>
@property
def DCCAL_CALSTATUS(self):
"""
Get the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CALSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CALSTATUS.setter
def DCCAL_CALSTATUS(self, value):
"""
Set the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CALSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_CMPSTATUS<7:0>
@property
def DCCAL_CMPSTATUS(self):
"""
Get the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CMPSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPSTATUS.setter
def DCCAL_CMPSTATUS(self, value):
"""
Set the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CMPSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_CFG2 (0x05C2)
#
# DCCAL_CMPCFG<7:0>
@property
def DCCAL_CMPCFG(self):
"""
Get the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_CMPCFG<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPCFG.setter
def DCCAL_CMPCFG(self, value):
"""
Set the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_CMPCFG<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_START<7:0>
@property
def DCCAL_START(self):
"""
Get the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_START<7:0>')
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_START.setter
def DCCAL_START(self, value):
"""
Set the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_START<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
def startRXBQ(self):
"""
Starts RXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<7
self.DCCAL_START = 0
def startRXBI(self):
"""
Starts RXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<6
self.DCCAL_START = 0
def startRXAQ(self):
"""
Starts RXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<5
self.DCCAL_START = 0
def startRXAI(self):
"""
Starts RXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<4
self.DCCAL_START = 0
def startTXBQ(self):
"""
Starts TXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<3
self.DCCAL_START = 0
def startTXBI(self):
"""
Starts TXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<2
self.DCCAL_START = 0
def startTXAQ(self):
"""
Starts TXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<1
self.DCCAL_START = 0
def startTXAI(self):
"""
Starts TXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1
self.DCCAL_START = 0
#
# DCCAL_TXAI (0x05C3)
#
@property
def DC_TXAI(self):
"""
Get the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAI', 'DCRD_TXAI', 0)
self._writeReg('TXAI', 'DCRD_TXAI', 1)
self._writeReg('TXAI', 'DCRD_TXAI', 0)
val = self._readReg('TXAI', 'DC_TXAI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAI is not supported on chip version "+str(self.chip.chipID))
@DC_TXAI.setter
def DC_TXAI(self, value):
"""
Set the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAI', 'DC_TXAI<10:0>', val)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
self._writeReg('TXAI', 'DCWR_TXAI', 1)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
else:
raise ValueError("Bitfield TXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXAQ (0x05C4)
#
@property
def DC_TXAQ(self):
"""
Get the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
self._writeReg('TXAQ', 'DCRD_TXAQ', 1)
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
val = self._readReg('TXAQ', 'DC_TXAQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXAQ.setter
def DC_TXAQ(self, value):
"""
Set the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAQ', 'DC_TXAQ<10:0>', val)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
self._writeReg('TXAQ', 'DCWR_TXAQ', 1)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
else:
raise ValueError("Bitfield TXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBI (0x05C5)
#
@property
def DC_TXBI(self):
"""
Get the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBI', 'DCRD_TXBI', 0)
self._writeReg('TXBI', 'DCRD_TXBI', 1)
self._writeReg('TXBI', 'DCRD_TXBI', 0)
val = self._readReg('TXBI', 'DC_TXBI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBI is not supported on chip version "+str(self.chip.chipID))
@DC_TXBI.setter
def DC_TXBI(self, value):
"""
Set the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBI', 'DC_TXBI<10:0>', val)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
self._writeReg('TXBI', 'DCWR_TXBI', 1)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
else:
raise ValueError("Bitfield TXBI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBQ (0x05C6)
#
@property
def DC_TXBQ(self):
"""
Get the value of DC_TXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
self._writeReg('TXBQ', 'DCRD_TXBQ', 1)
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
val = self._readReg('TXBQ', 'DC_TXBQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXBQ.setter
def DC_TXBQ(self, value):
"""
Set the value of DC_TXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBQ', 'DC_TXBQ<10:0>', val)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
self._writeReg('TXBQ', 'DCWR_TXBQ', 1)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
else:
raise ValueError("Bitfield TXBQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXAI (0x05C7)
#
@property
def DC_RXAI(self):
"""
Get the value of DC_RXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAI', 'DCRD_RXAI', 0)
self._writeReg('RXAI', 'DCRD_RXAI', 1)
self._writeReg('RXAI', 'DCRD_RXAI', 0)
val = self._readReg('RXAI', 'DC_RXAI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAI is not supported on chip version "+str(self.chip.chipID))
@DC_RXAI.setter
def DC_RXAI(self, value):
"""
Set the value of DC_RXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAI', 'DC_RXAI<6:0>', val)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
self._writeReg('RXAI', 'DCWR_RXAI', 1)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
else:
raise ValueError("Bitfield RXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXAQ (0x05C8)
#
@property
def DC_RXAQ(self):
"""
Get the value of DC_RXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
self._writeReg('RXAQ', 'DCRD_RXAQ', 1)
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
val = self._readReg('RXAQ', 'DC_RXAQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_RXAQ.setter
def DC_RXAQ(self, value):
"""
Set the value of DC_RXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAQ', 'DC_RXAQ<6:0>', val)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
self._writeReg('RXAQ', 'DCWR_RXAQ', 1)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
else:
raise ValueError("Bitfield RXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXBI (0x05C9)
#
@property
def DC_RXBI(self):
"""
Get the value of DC_RXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBI', 'DCRD_RXBI', 0)
self._writeReg('RXBI', 'DCRD_RXBI', 1)
self._writeReg('RXBI', 'DCRD_RXBI', 0)
val = self._readReg('RXBI', 'DC_RXBI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBI is not supported on chip version "+str(self.chip.chipID))
@DC_RXBI.setter
def DC_RXBI(self, value):
"""
Set the value of DC_RXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBI', 'DC_RXBI<6:0>', val)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
self._writeReg('RXBI', 'DCWR_RXBI', 1)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
else:
raise ValueError("Bitfield RXBI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXBQ (0x05CA)
#
@property
def DC_RXBQ(self):
"""
Get the value of DC_RXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBQ', 'DCRD_RXBQ', 0)
self._writeReg('RXBQ', 'DCRD_RXBQ', 1)
self._writeReg('RXBQ', 'DCRD_RXBQ', 0)
val = self._readReg('RXBQ', 'DC_RXBQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBQ is not supported on chip version "+str(self.chip.chipID))
@DC_RXBQ.setter
def DC_RXBQ(self, value):
"""
Set the value of DC_RXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBQ', 'DC_RXBQ<6:0>', val)
self._writeReg('RXBQ', 'DCWR_RXBQ', 0)
self._writeReg('RXBQ', 'DCWR_RXBQ', 1)
self._writeReg('RXBQ', 'DCWR_RXBQ', 0)
else:
raise ValueError("Bitfield RXBQ is not supported on chip version "+str(self.chip.chipID))
# DC_RXCDIV<7:0>
@property
def DC_RXCDIV(self):
"""
Get the value of DC_RXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CLKDIV', 'DC_RXCDIV<7:0>')
else:
raise ValueError("Bitfield DC_RXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
@DC_RXCDIV.setter
def DC_RXCDIV(self, value):
"""
Set the value of DC_RXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CLKDIV', 'DC_RXCDIV<7:0>', value)
else:
raise ValueError("Bitfield DC_RXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
# DC_TXCDIV<7:0>
@property
def DC_TXCDIV(self):
"""
Get the value of DC_TXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CLKDIV', 'DC_TXCDIV<7:0>')
else:
raise ValueError("Bitfield DC_TXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
@DC_TXCDIV.setter
def DC_TXCDIV(self, value):
"""
Set the value of DC_TXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CLKDIV', 'DC_TXCDIV<7:0>', value)
else:
raise ValueError("Bitfield DC_TXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_RXB<2:0>
@property
def HYSCMP_RXB(self):
"""
Get the value of HYSCMP_RXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_RXB<2:0>')
else:
raise ValueError("Bitfield HYSCMP_RXB<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_RXB.setter
def HYSCMP_RXB(self, value):
"""
Set the value of HYSCMP_RXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_RXB<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_RXB<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_RXA<2:0>
@property
def HYSCMP_RXA(self):
"""
Get the value of HYSCMP_RXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_RXA<2:0>')
else:
raise ValueError("Bitfield HYSCMP_RXA<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_RXA.setter
def HYSCMP_RXA(self, value):
"""
Set the value of HYSCMP_RXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_RXA<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_RXA<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_TXB<2:0>
@property
def HYSCMP_TXB(self):
"""
Get the value of HYSCMP_TXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_TXB<2:0>')
else:
raise ValueError("Bitfield HYSCMP_TXB<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_TXB.setter
def HYSCMP_TXB(self, value):
"""
Set the value of HYSCMP_TXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_TXB<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_TXB<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_TXA<2:0>
@property
def HYSCMP_TXA(self):
"""
Get the value of HYSCMP_TXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_TXA<2:0>')
else:
raise ValueError("Bitfield HYSCMP_TXA<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_TXA.setter
def HYSCMP_TXA(self, value):
"""
Set the value of HYSCMP_TXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_TXA<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_TXA<2:0> is not supported on chip version "+str(self.chip.chipID))
| 34.047273
| 117
| 0.546904
| 3,465
| 28,089
| 4.282828
| 0.037807
| 0.08841
| 0.101887
| 0.058221
| 0.899461
| 0.804245
| 0.745081
| 0.716779
| 0.703976
| 0.675404
| 0
| 0.032383
| 0.327174
| 28,089
| 824
| 118
| 34.088592
| 0.752844
| 0.095411
| 0
| 0.597938
| 0
| 0
| 0.214749
| 0.000878
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129897
| false
| 0
| 0.002062
| 0
| 0.191753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e27e86f2b271ce013027a7b84740529b93fb34b8
| 27
|
py
|
Python
|
littlecheck/__init__.py
|
faho/littlecheck
|
12cad505fa6c3c4b45e5e2a0d25b043435ed0cee
|
[
"CC0-1.0"
] | 26
|
2019-06-08T22:04:46.000Z
|
2022-01-11T22:08:04.000Z
|
littlecheck/__init__.py
|
0verk1ll/littlecheck
|
5f6c024fbdf6654e7851d3fd756a6d56e167476e
|
[
"CC0-1.0"
] | 7
|
2019-06-24T15:36:59.000Z
|
2022-01-28T11:10:00.000Z
|
littlecheck/__init__.py
|
0verk1ll/littlecheck
|
5f6c024fbdf6654e7851d3fd756a6d56e167476e
|
[
"CC0-1.0"
] | 3
|
2019-06-24T15:38:18.000Z
|
2021-03-21T21:24:28.000Z
|
from .littlecheck import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e28fc3f6eb0d1260175698af5dc43fccdbf6b620
| 99
|
py
|
Python
|
grid/utils.py
|
parthatom/Grid
|
7ca680b545b1fd6955ca2f3d2e0088df9d7391f4
|
[
"Apache-2.0"
] | null | null | null |
grid/utils.py
|
parthatom/Grid
|
7ca680b545b1fd6955ca2f3d2e0088df9d7391f4
|
[
"Apache-2.0"
] | null | null | null |
grid/utils.py
|
parthatom/Grid
|
7ca680b545b1fd6955ca2f3d2e0088df9d7391f4
|
[
"Apache-2.0"
] | 1
|
2019-07-03T12:01:51.000Z
|
2019-07-03T12:01:51.000Z
|
"""Utility functions."""
import os
def exec_os_cmd(command):
return os.popen(command).read()
| 14.142857
| 35
| 0.69697
| 14
| 99
| 4.785714
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141414
| 99
| 6
| 36
| 16.5
| 0.788235
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
2c373df7b9914d27f5ce9ba2731bf4095689d632
| 214
|
py
|
Python
|
flask_secret_generator.py
|
amahlaka/SpotifyPythonControl
|
a95679acc4ec11c7b8141217d3ff76b8040bd866
|
[
"MIT"
] | 1
|
2018-10-11T17:12:00.000Z
|
2018-10-11T17:12:00.000Z
|
flask_secret_generator.py
|
amahlaka/SpotifyPythonControl
|
a95679acc4ec11c7b8141217d3ff76b8040bd866
|
[
"MIT"
] | 3
|
2018-10-11T17:09:54.000Z
|
2018-10-11T18:01:47.000Z
|
flask_secret_generator.py
|
amahlaka/SpotifyPythonControl
|
a95679acc4ec11c7b8141217d3ff76b8040bd866
|
[
"MIT"
] | null | null | null |
import random
import string
def generate_activation_token():
secret = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(64))
print(secret)
generate_activation_token()
| 35.666667
| 107
| 0.775701
| 27
| 214
| 5.925926
| 0.703704
| 0.225
| 0.2875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.11215
| 214
| 6
| 108
| 35.666667
| 0.831579
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.5
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e2be6c37006936281446d9bdbd0cd3feacbb29b0
| 8,246
|
py
|
Python
|
data/results/centralities/pagerank/networkX_pagerank_performance.py
|
cassinius/graphinius
|
cb7191671f867432da1707627eeda23ed4397c8d
|
[
"MIT"
] | 17
|
2019-12-09T13:14:32.000Z
|
2021-06-22T05:28:34.000Z
|
data/results/centralities/pagerank/networkX_pagerank_performance.py
|
cassinius/Graphinius
|
cb7191671f867432da1707627eeda23ed4397c8d
|
[
"MIT"
] | 89
|
2017-03-05T19:56:35.000Z
|
2019-08-12T15:54:27.000Z
|
data/results/centralities/pagerank/networkX_pagerank_performance.py
|
cassinius/Graphinius
|
cb7191671f867432da1707627eeda23ed4397c8d
|
[
"MIT"
] | 7
|
2017-03-05T03:03:14.000Z
|
2018-11-22T22:46:47.000Z
|
import networkx as nx
from networkx import pagerank, pagerank_numpy, pagerank_scipy
import time
import json
output_folder = 'comparison_selected'
'''
Unweighted graphs
'''
print("========================================")
print("========== UNWEIGHTED GRAPHS ===========")
print("========================================")
G_social_300 = nx.read_edgelist('../../social_network_edges_300.csv', create_using=nx.DiGraph())
G_social_1K = nx.read_edgelist('../../social_network_edges_1K.csv', create_using=nx.DiGraph())
G_social_20K = nx.read_edgelist('../../social_network_edges_20K.csv', create_using=nx.DiGraph())
start = time.time()
cb_300 = pagerank(G_social_300, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank on ~300 node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_social_network_edges_300.csv_results.json', 'w')
file.write( json.dumps(cb_300) )
file.close
start = time.time()
cb_1K = pagerank(G_social_1K, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank on ~1K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_social_network_edges_1K.csv_results.json', 'w')
file.write( json.dumps(cb_1K) )
file.close
start = time.time()
cb_20K = pagerank(G_social_20K, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank on ~20K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_social_network_edges_20K.csv_results.json', 'w')
file.write( json.dumps(cb_20K) )
file.close
'''
NUMPY - Unweighted
'''
print("========================================")
print("========= NUMPY - UNWEIGHTED ===========")
print("========================================")
start = time.time()
cb_300 = pagerank_numpy(G_social_300, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank NUMPY on ~300 node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_numpy_social_network_edges_300.csv_results.json', 'w')
file.write( json.dumps(cb_300) )
file.close
start = time.time()
cb_1K = pagerank_numpy(G_social_1K, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank NUMPY on ~1K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_numpy_social_network_edges_1K.csv_results.json', 'w')
file.write( json.dumps(cb_1K) )
file.close
start = time.time()
cb_20K = pagerank_numpy(G_social_20K, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank NUMPY on ~20K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_numpy_social_network_edges_20K.csv_results.json', 'w')
file.write( json.dumps(cb_20K) )
file.close
'''
SCIPY - Unweighted
'''
print("========================================")
print("========= SCIPY - UNWEIGHTED ===========")
print("========================================")
start = time.time()
cb_300 = pagerank_scipy(G_social_300, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank SCIPY on ~300 node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_scipy_social_network_edges_300.csv_results.json', 'w')
file.write( json.dumps(cb_300) )
file.close
start = time.time()
cb_1K = pagerank_scipy(G_social_1K, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank SCIPY on ~1K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_scipy_social_network_edges_1K.csv_results.json', 'w')
file.write( json.dumps(cb_1K) )
file.close
start = time.time()
cb_20K = pagerank_scipy(G_social_20K, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank SCIPY on ~20K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_scipy_social_network_edges_20K.csv_results.json', 'w')
file.write( json.dumps(cb_20K) )
file.close
'''
Weighted graphs
'''
print("========================================")
print("=========== WEIGHTED GRAPHS ============")
print("========================================")
G_social_300_weighted = nx.read_weighted_edgelist('../../social_network_edges_300_weighted.csv', create_using=nx.DiGraph())
G_social_1K_weighted = nx.read_weighted_edgelist('../../social_network_edges_1K_weighted.csv', create_using=nx.DiGraph())
G_social_20K_weighted = nx.read_weighted_edgelist('../../social_network_edges_20K_weighted.csv', create_using=nx.DiGraph())
start = time.time()
cb_300_w = pagerank(G_social_300_weighted, alpha=0.85, weight="weight")
end = time.time()
duration = (end-start)*1000
print("PageRank on ~300 node weighted social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_social_network_edges_300.csv_weighted_results.json', 'w')
file.write( json.dumps(cb_300_w) )
file.close
start = time.time()
cb_1K_w = pagerank(G_social_1K_weighted, alpha=0.85, weight="weight")
end = time.time()
duration = (end-start)*1000
print("PageRank on ~1K node weighted social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_social_network_edges_1K.csv_weighted_results.json', 'w')
file.write( json.dumps(cb_1K_w) )
file.close
start = time.time()
cb_20K_w = pagerank(G_social_20K_weighted, alpha=0.85, weight="weight")
end = time.time()
duration = (end-start)*1000
print("PageRank on ~20K node weighted social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_social_network_edges_20K.csv_weighted_results.json', 'w')
file.write( json.dumps(cb_20K_w) )
file.close
'''
NUMPY - Weighted
'''
print("========================================")
print("=========== NUMPY - WEIGHTED ===========")
print("========================================")
start = time.time()
cb_300 = pagerank_numpy(G_social_300_weighted, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank NUMPY on ~300 node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_numpy_social_network_edges_300_weighted.csv_results.json', 'w')
file.write( json.dumps(cb_300) )
file.close
start = time.time()
cb_1K = pagerank_numpy(G_social_1K_weighted, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank NUMPY on ~1K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_numpy_social_network_edges_1K_weighted.csv_results.json', 'w')
file.write( json.dumps(cb_1K) )
file.close
# start = time.time()
# cb_20K = pagerank_numpy(G_social_20K_weighted, alpha=0.85)
# end = time.time()
# duration = (end-start)*1000
# print("PageRank NUMPY on ~20K node social net took " + str(duration) + " ms.")
# file = open(output_folder + '/pagerank_numpy_social_network_edges_20K_weighted.csv_results.json', 'w')
# file.write( json.dumps(cb_20K) )
# file.close
'''
SCIPY - Weighted
'''
print("========================================")
print("=========== SCIPY - WEIGHTED ===========")
print("========================================")
start = time.time()
cb_300 = pagerank_scipy(G_social_300_weighted, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank SCIPY on ~300 node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_scipy_social_network_edges_300_weighted.csv_results.json', 'w')
file.write( json.dumps(cb_300) )
file.close
start = time.time()
cb_1K = pagerank_scipy(G_social_1K_weighted, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank SCIPY on ~1K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_scipy_social_network_edges_1K_weighted.csv_results.json', 'w')
file.write( json.dumps(cb_1K) )
file.close
start = time.time()
cb_20K = pagerank_scipy(G_social_20K_weighted, alpha=0.85)
end = time.time()
duration = (end-start)*1000
print("PageRank SCIPY on ~20K node social net took " + str(duration) + " ms.")
file = open(output_folder + '/pagerank_scipy_social_network_edges_20K_weighted.csv_results.json', 'w')
file.write( json.dumps(cb_20K) )
file.close
# print("Dimensions of graph: ")
# print("#Nodes: " + str(nx.number_of_nodes(G_social_20K_weighted)))
# print("#Edges: " + str(nx.number_of_edges(G_social_20K_weighted)))
# print(G_social_300_weighted.edges(data = True))
| 35.390558
| 123
| 0.679845
| 1,182
| 8,246
| 4.492386
| 0.050761
| 0.054237
| 0.081356
| 0.050847
| 0.921469
| 0.9
| 0.877401
| 0.861582
| 0.800188
| 0.798682
| 0
| 0.045528
| 0.102353
| 8,246
| 232
| 124
| 35.543103
| 0.671845
| 0.068033
| 0
| 0.630303
| 0
| 0
| 0.372888
| 0.227218
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024242
| 0
| 0.024242
| 0.212121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e2ead9fbe8037b6ce63b56848fa4aec2781a3c95
| 102
|
py
|
Python
|
hello_world.py
|
albertonietos/git-project
|
24445c3eeb46df9fabb315b5f87e0f08b22ebbd5
|
[
"MIT"
] | null | null | null |
hello_world.py
|
albertonietos/git-project
|
24445c3eeb46df9fabb315b5f87e0f08b22ebbd5
|
[
"MIT"
] | null | null | null |
hello_world.py
|
albertonietos/git-project
|
24445c3eeb46df9fabb315b5f87e0f08b22ebbd5
|
[
"MIT"
] | null | null | null |
print("Hello world!")
print("Hello darkness my old friend")
print("I've come to talk with you again")
| 25.5
| 41
| 0.72549
| 18
| 102
| 4.111111
| 0.833333
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 102
| 3
| 42
| 34
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
392e05c3cb7ca0b9906eb2a9d156a5d75c879181
| 23,812
|
py
|
Python
|
methods/smoking-behavior.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 1
|
2020-04-18T11:16:02.000Z
|
2020-04-18T11:16:02.000Z
|
methods/smoking-behavior.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 6
|
2020-04-13T18:38:04.000Z
|
2022-03-12T00:55:56.000Z
|
methods/smoking-behavior.py
|
wdempsey/sense2stop-lvm
|
ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2
|
[
"CECILL-B"
] | 1
|
2020-07-02T04:47:00.000Z
|
2020-07-02T04:47:00.000Z
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Summary
#
# * ADD LATER
# * ADD LATER
# * ADD LATER
# %% [markdown]
# # Estimation
# %%
import pymc3 as pm
import arviz as az
import pandas as pd
import numpy as np
from datetime import datetime
import os
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
# %% [markdown]
# Only self-report data will be used to estimate time between events for now.
# %%
data_selfreport = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'work_with_datapoints.csv'))
use_this_data = data_selfreport
# %% [markdown]
# Let's define the distribution of censored data.
# %%
def exponential_log_complementary_cdf(x, lam):
''' log complementary CDF of exponential distribution '''
return -lam*x
# %% [markdown]
# Let's pull out variables that will be used in all models.
# %%
censored = use_this_data['censored'].values.astype(bool)
time_to_next_event = use_this_data['time_to_next_event'].values.astype(float)
is_post_quit = use_this_data['is_post_quit'].values.astype(float)
# %% [markdown]
# Let's pull out features we have constructed.
# %%
# Features applicable to pre- and post-quit periods
day_within_period = use_this_data['day_within_period'].values.astype(float)
hours_since_previous_sr_within_day = use_this_data['hours_since_previous_sr_within_day'].values.astype(float)
hours_since_previous_sr_within_period = use_this_data['hours_since_previous_sr_within_period'].values.astype(float)
is_first_sr_within_day = use_this_data['is_first_sr_within_day'].values.astype(float)
is_first_sr_within_period = use_this_data['is_first_sr_within_period'].values.astype(float)
order_within_day = use_this_data['order_within_day'].values.astype(float)
order_within_period = use_this_data['order_within_period'].values.astype(float)
hours_since_start_of_study = use_this_data['hours_since_start_of_study'].values.astype(float)
hours_since_start_of_period = use_this_data['hours_since_start_of_period'].values.astype(float)
hour_of_day = use_this_data['hour_of_day'].values.astype(float)
sleep = use_this_data['sleep'].values.astype(float) # 1=if between 1am to 6am, 0=outside of this time
# Features applicable only to the post-quit period
is_within24hours_quit = use_this_data['is_within24hours_quit'].values.astype(float)
is_within48hours_quit = use_this_data['is_within48hours_quit'].values.astype(float)
is_within72hours_quit = use_this_data['is_within72hours_quit'].values.astype(float)
# %% [markdown]
# ## Model 1
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_prequit_day = pm.Normal('beta_prequit_day', mu=0, sd=10)
beta_postquit_day = pm.Normal('beta_postquit_day', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_prequit_day*day_within_period[~censored]*(1-is_post_quit[~censored])
+ beta_postquit*is_post_quit[~censored] + beta_postquit_day*day_within_period[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_prequit_day*day_within_period[censored]*(1-is_post_quit[censored])
+ beta_postquit*is_post_quit[censored] + beta_postquit_day*day_within_period[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
# Sample from posterior distribution
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
# Calculate 95% credible interval
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
summary_expscale = {'mean': [np.mean(np.exp(posterior_samples['beta_prequit_day'])), np.mean(np.exp(posterior_samples['beta_postquit_day']))],
'LB': [np.quantile(np.exp(posterior_samples['beta_prequit_day']), q=.125), np.quantile(np.exp(posterior_samples['beta_postquit_day']), q=.125)],
'UB': [np.quantile(np.exp(posterior_samples['beta_prequit_day']), q=.975), np.quantile(np.exp(posterior_samples['beta_postquit_day']), q=.975)]}
summary_expscale = pd.DataFrame(summary_expscale)
summary_expscale.index = ['exp_beta_prequit_day','exp_beta_postquit_day']
summary_expscale
# %%
pm.traceplot(posterior_samples)
# %%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
# %% [markdown]
# ## Model 2
# %%
feature1 = hours_since_previous_sr_within_period
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_prequit_feature1 = pm.Normal('beta_prequit_feature1', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features = (
beta_prequit_feature1*feature1[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features = (
beta_prequit_feature1*feature1[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature1*feature1[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
#%%
# Sample from posterior distribution
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
posterior_samples_expscale_prequit_feature1 = np.exp(posterior_samples['beta_prequit_feature1'])
posterior_samples_expscale_postquit_feature1 = np.exp(posterior_samples['beta_postquit_feature1'])
model_summary_expscale = {'mean': [np.mean(posterior_samples_expscale_prequit_feature1), np.mean(posterior_samples_expscale_postquit_feature1)],
'LB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.125), np.quantile(posterior_samples_expscale_postquit_feature1, q=.125)],
'UB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.975), np.quantile(posterior_samples_expscale_postquit_feature1, q=.975)]}
model_summary_expscale = pd.DataFrame(model_summary_expscale)
model_summary_expscale.index = ['exp_beta_prequit_feature1', 'exp_beta_postquit_feature1']
model_summary_expscale
# %%
diff_prepost_feature1 = posterior_samples['beta_postquit_feature1'] - posterior_samples['beta_prequit_feature1']
exp_diff_prepost_feature1 = np.exp(diff_prepost_feature1)
diff_summary_expscale = {'mean': [np.mean(exp_diff_prepost_feature1)],
'LB': [np.quantile(exp_diff_prepost_feature1, q=.125)],
'UB': [np.quantile(exp_diff_prepost_feature1, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_prepost_feature1']
diff_summary_expscale
# %%
pm.traceplot(posterior_samples)
# %% [markdown]
# ## Model 3
# %%
feature1 = is_within48hours_quit
feature2 = hours_since_previous_sr_within_period
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
beta_prequit_feature2 = pm.Normal('beta_prequit_feature2', mu=0, sd=10)
beta_postquit_feature2 = pm.Normal('beta_postquit_feature2', mu=0, sd=10)
beta_postquit_feature_product = pm.Normal('beta_postquit_feature_product', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features1 = (
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored] +
beta_prequit_feature2*feature2[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature2*feature2[~censored]*is_post_quit[~censored] +
beta_postquit_feature_product*feature1[~censored]*feature2[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features1)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features1 = (
beta_postquit_feature1*feature1[censored]*is_post_quit[censored] +
beta_prequit_feature2*feature2[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature2*feature2[censored]*is_post_quit[censored] +
beta_postquit_feature_product*feature1[censored]*feature2[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features1)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
# Slope of hours since previous self-report within period:
# Difference between within first 48 hours in post-quit period vs. after first 48 hours in post-quit period
diff_feature_postquitwithin48_postquitafter48 = posterior_samples['beta_postquit_feature_product']
exp_diff_feature_postquitwithin48_postquitafter48 = np.exp(diff_feature_postquitwithin48_postquitafter48)
# Difference between within first 48 hours in post-quit period vs. pre-quit
diff_feature_postquitwithin48_prequit = posterior_samples['beta_postquit_feature2'] + posterior_samples['beta_postquit_feature_product'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitwithin48_prequit = np.exp(diff_feature_postquitwithin48_prequit)
# Difference between after 48 hours in post-quit period vs. pre-quit
diff_feature_postquitafter48_prequit = posterior_samples['beta_postquit_feature2'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitafter48_prequit = np.exp(diff_feature_postquitafter48_prequit)
diff_summary_expscale = {'mean': [np.mean(exp_diff_feature_postquitwithin48_postquitafter48), np.mean(exp_diff_feature_postquitwithin48_prequit), np.mean(exp_diff_feature_postquitafter48_prequit)],
'LB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.125), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.125), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.125)],
'UB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.975), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.975), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_feature_postquitwithin48_postquitafter48','exp_diff_feature_postquitwithin48_prequit','exp_diff_feature_postquitafter48_prequit']
diff_summary_expscale
# %%
pm.traceplot(posterior_samples)
# %%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
# %% [markdown]
# ## Model 4
# %%
feature1 = order_within_day
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_prequit_feature1 = pm.Normal('beta_prequit_feature1', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features = (
beta_prequit_feature1*feature1[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features = (
beta_prequit_feature1*feature1[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature1*feature1[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
#%%
# Sample from posterior distribution
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
posterior_samples_expscale_prequit_feature1 = np.exp(posterior_samples['beta_prequit_feature1'])
posterior_samples_expscale_postquit_feature1 = np.exp(posterior_samples['beta_postquit_feature1'])
model_summary_expscale = {'mean': [np.mean(posterior_samples_expscale_prequit_feature1), np.mean(posterior_samples_expscale_postquit_feature1)],
'LB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.125), np.quantile(posterior_samples_expscale_postquit_feature1, q=.125)],
'UB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.975), np.quantile(posterior_samples_expscale_postquit_feature1, q=.975)]}
model_summary_expscale = pd.DataFrame(model_summary_expscale)
model_summary_expscale.index = ['exp_beta_prequit_feature1', 'exp_beta_postquit_feature1']
model_summary_expscale
# %%
# Difference between pre-quit and post-quit periods:
# time to first self-report
diff_prepost_feature1 = posterior_samples['beta_postquit_feature1'] - posterior_samples['beta_prequit_feature1']
exp_diff_prepost_feature1 = np.exp(diff_prepost_feature1)
diff_summary_expscale = {'mean': [np.mean(exp_diff_prepost_feature1)],
'LB': [np.quantile(exp_diff_prepost_feature1, q=.125)],
'UB': [np.quantile(exp_diff_prepost_feature1, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_prepost_feature1']
diff_summary_expscale
# %%
pm.traceplot(posterior_samples)
# %% [markdown]
# ## Model 5
# %%
feature1 = is_within48hours_quit
feature2 = order_within_day
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
beta_prequit_feature2 = pm.Normal('beta_prequit_feature2', mu=0, sd=10)
beta_postquit_feature2 = pm.Normal('beta_postquit_feature2', mu=0, sd=10)
beta_postquit_feature_product = pm.Normal('beta_postquit_feature_product', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features1 = (
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored] +
beta_prequit_feature2*feature2[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature2*feature2[~censored]*is_post_quit[~censored] +
beta_postquit_feature_product*feature1[~censored]*feature2[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features1)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features1 = (
beta_postquit_feature1*feature1[censored]*is_post_quit[censored] +
beta_prequit_feature2*feature2[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature2*feature2[censored]*is_post_quit[censored] +
beta_postquit_feature_product*feature1[censored]*feature2[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features1)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
posterior_samples_expscale_postquit_feature1 = np.exp(posterior_samples['beta_postquit_feature1'])
posterior_samples_expscale_prequit_feature2 = np.exp(posterior_samples['beta_prequit_feature2'])
posterior_samples_expscale_postquit_feature2 = np.exp(posterior_samples['beta_postquit_feature2'])
posterior_samples_expscale_postquit_feature_product = np.exp(posterior_samples['beta_postquit_feature_product'])
model_summary_expscale = {'mean': [np.mean(posterior_samples_expscale_postquit_feature1),
np.mean(posterior_samples_expscale_prequit_feature2),
np.mean(posterior_samples_expscale_postquit_feature2),
np.mean(posterior_samples_expscale_postquit_feature_product)],
'LB': [np.quantile(posterior_samples_expscale_postquit_feature1, q=.125),
np.quantile(posterior_samples_expscale_prequit_feature2, q=.125),
np.quantile(posterior_samples_expscale_postquit_feature2, q=.125),
np.quantile(posterior_samples_expscale_postquit_feature_product, q=.125)],
'UB': [np.quantile(posterior_samples_expscale_postquit_feature1, q=.975),
np.quantile(posterior_samples_expscale_prequit_feature2, q=.975),
np.quantile(posterior_samples_expscale_postquit_feature2, q=.975),
np.quantile(posterior_samples_expscale_postquit_feature_product, q=.975)]}
model_summary_expscale = pd.DataFrame(model_summary_expscale)
model_summary_expscale.index = ['exp_beta_postquit_feature1','exp_beta_prequit_feature2', 'exp_beta_postquit_feature2','exp_beta_postquit_feature_product']
model_summary_expscale
# %%
# Time to first self-report within period:
# Difference between within first 48 hours in post-quit period vs. after first 48 hours in post-quit period
diff_feature_postquitwithin48_postquitafter48 = posterior_samples['beta_postquit_feature_product']
exp_diff_feature_postquitwithin48_postquitafter48 = np.exp(diff_feature_postquitwithin48_postquitafter48)
# Difference between within first 48 hours in post-quit period vs. pre-quit
diff_feature_postquitwithin48_prequit = posterior_samples['beta_postquit_feature2'] + posterior_samples['beta_postquit_feature_product'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitwithin48_prequit = np.exp(diff_feature_postquitwithin48_prequit)
# Difference between after 48 hours in post-quit period vs. pre-quit
diff_feature_postquitafter48_prequit = posterior_samples['beta_postquit_feature2'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitafter48_prequit = np.exp(diff_feature_postquitafter48_prequit)
diff_summary_expscale = {'mean': [np.mean(exp_diff_feature_postquitwithin48_postquitafter48), np.mean(exp_diff_feature_postquitwithin48_prequit), np.mean(exp_diff_feature_postquitafter48_prequit)],
'LB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.125), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.125), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.125)],
'UB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.975), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.975), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_feature_postquitwithin48_postquitafter48','exp_diff_feature_postquitwithin48_prequit','exp_diff_feature_postquitafter48_prequit']
diff_summary_expscale
# %%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
# %%
| 45.442748
| 232
| 0.711532
| 2,902
| 23,812
| 5.425224
| 0.065128
| 0.060976
| 0.031758
| 0.054878
| 0.915396
| 0.883575
| 0.863504
| 0.825711
| 0.810023
| 0.784172
| 0
| 0.029365
| 0.126197
| 23,812
| 523
| 233
| 45.529637
| 0.727303
| 0.140265
| 0
| 0.696296
| 0
| 0
| 0.118318
| 0.080108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003704
| false
| 0
| 0.022222
| 0
| 0.02963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1a32c7cf858e7ec3d8b83fbe23815676d192ee60
| 221
|
py
|
Python
|
geotrek/trekking/tests/__init__.py
|
camillemonchicourt/Geotrek
|
c33eac7e4479e3aa5b16608c0aa7665c4a72e9a1
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/trekking/tests/__init__.py
|
camillemonchicourt/Geotrek
|
c33eac7e4479e3aa5b16608c0aa7665c4a72e9a1
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/trekking/tests/__init__.py
|
camillemonchicourt/Geotrek
|
c33eac7e4479e3aa5b16608c0aa7665c4a72e9a1
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint: disable=W0401
from .base import *
from .test_views import *
from .test_filters import *
from .test_translation import *
from .test_trek_relationship import *
from .test_models import *
from .test_admin import *
| 24.555556
| 37
| 0.782805
| 31
| 221
| 5.354839
| 0.451613
| 0.361446
| 0.506024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 0.140271
| 221
| 9
| 38
| 24.555556
| 0.852632
| 0.095023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a5e5fb217baebfcedf4e50cad55013f61e8299e
| 38
|
py
|
Python
|
__init__.py
|
Vladimir37/finam_stock_data
|
51a1e1b8d21068d8ad0177166e14cb87b4f3f66e
|
[
"MIT"
] | 6
|
2016-10-18T04:17:43.000Z
|
2022-02-21T18:48:52.000Z
|
__init__.py
|
Vladimir37/finam_stock_data
|
51a1e1b8d21068d8ad0177166e14cb87b4f3f66e
|
[
"MIT"
] | null | null | null |
__init__.py
|
Vladimir37/finam_stock_data
|
51a1e1b8d21068d8ad0177166e14cb87b4f3f66e
|
[
"MIT"
] | 5
|
2017-01-04T14:26:52.000Z
|
2019-04-01T07:33:07.000Z
|
from .finam_stock_data import get_data
| 38
| 38
| 0.894737
| 7
| 38
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a6013ce91f944eb86511be5a0b350651a855a3e
| 64
|
py
|
Python
|
deep_learn/dataset/sampler/__init__.py
|
ImbesatRizvi/Accio
|
b0ad2d245f4f7c42d85b722db9fad435c0d06a99
|
[
"Apache-2.0"
] | 2
|
2019-07-30T09:39:53.000Z
|
2019-07-30T09:40:06.000Z
|
deep_learn/dataset/sampler/__init__.py
|
ImbesatRizvi/Accio
|
b0ad2d245f4f7c42d85b722db9fad435c0d06a99
|
[
"Apache-2.0"
] | null | null | null |
deep_learn/dataset/sampler/__init__.py
|
ImbesatRizvi/Accio
|
b0ad2d245f4f7c42d85b722db9fad435c0d06a99
|
[
"Apache-2.0"
] | 2
|
2018-11-07T22:45:29.000Z
|
2019-10-24T09:53:41.000Z
|
from .BinaryPairedWindowSampler import BinaryPairedWindowSampler
| 64
| 64
| 0.9375
| 4
| 64
| 15
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 64
| 1
| 64
| 64
| 0.983607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a6160c3c3343d98680dd31a86c1e0535bddbf51
| 7,996
|
py
|
Python
|
test/test_utilities.py
|
2b-t/stereo-matching
|
6c2e6944a2859763f4110f3e90e99f9267e97e78
|
[
"MIT"
] | 1
|
2022-03-21T04:33:30.000Z
|
2022-03-21T04:33:30.000Z
|
test/test_utilities.py
|
2b-t/stereo-matching
|
6c2e6944a2859763f4110f3e90e99f9267e97e78
|
[
"MIT"
] | null | null | null |
test/test_utilities.py
|
2b-t/stereo-matching
|
6c2e6944a2859763f4110f3e90e99f9267e97e78
|
[
"MIT"
] | null | null | null |
# Tobit Flatscher - github.com/2b-t (2022)
# @file utilities_test.py
# @brief Different testing routines for utility functions for accuracy calculation and file import and export
import numpy as np
from parameterized import parameterized
from typing import Tuple
import unittest
from src.utilities import AccX, IO
class TestAccX(unittest.TestCase):
_shape = (10,20)
_disparities = [ ["disparity = 1", 1],
["disparity = 2", 2],
["disparity = 3", 3]
]
@parameterized.expand(_disparities)
def test_same_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_slightly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its slightly shifted counterpart result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity-1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_no_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images with no given mask result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = None
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_inverse_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two inverse images result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = np.zeros(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_significantly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its significantly shifted counterpart result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity+1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_zero_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two equal images with a mask of zero results in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = groundtruth_image
mask_image = np.zeros(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
class TestIO(unittest.TestCase):
_resolutions = [ ["resolution = (10, 20)", (10, 20)],
["resolution = (30, 4)", (30, 4)],
["resolution = (65, 24)", (65, 24)]
]
def test_import_image(self) -> None:
# TODO(tobit): Implement
pass
def test_export_image(self) -> None:
# TODO(tobit): Implement
pass
def test_str_comma(self) -> None:
# Function for testing conversion of numbers to comma-separated numbers
self.assertEqual(IO._str_comma(10, 2), "10")
self.assertEqual(IO._str_comma(9.3, 2), "9,3")
self.assertEqual(IO._str_comma(1.234, 2), "1,23")
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_no_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a positive image with a no ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = None
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a regular image with a regular ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = 2*image
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_negative_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative image which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
groundtruth_image = mag*np.ones(shape)
image = -2*groundtruth_image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_negative_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative ground-truth which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = -2*image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
if __name__ == '__main__':
unittest.main()
| 43.693989
| 133
| 0.728239
| 1,066
| 7,996
| 5.312383
| 0.124765
| 0.101713
| 0.02119
| 0.029666
| 0.86244
| 0.837365
| 0.830479
| 0.829419
| 0.823062
| 0.78492
| 0
| 0.016258
| 0.184592
| 7,996
| 183
| 134
| 43.693989
| 0.852301
| 0.336043
| 0
| 0.60177
| 0
| 0
| 0.022568
| 0
| 0
| 0
| 0
| 0.005464
| 0.132743
| 1
| 0.115044
| false
| 0.017699
| 0.053097
| 0
| 0.309735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
46c92af4cb0ca06bb4b92f96e9c5695f2ec78d14
| 76
|
py
|
Python
|
utils.py
|
valmsmith39a/u-capstone-casting
|
0476e167a2ef051abe36cba6e27e4788ee7571e3
|
[
"MIT"
] | null | null | null |
utils.py
|
valmsmith39a/u-capstone-casting
|
0476e167a2ef051abe36cba6e27e4788ee7571e3
|
[
"MIT"
] | null | null | null |
utils.py
|
valmsmith39a/u-capstone-casting
|
0476e167a2ef051abe36cba6e27e4788ee7571e3
|
[
"MIT"
] | null | null | null |
import json
def format(data):
return [item.format() for item in data]
| 12.666667
| 43
| 0.684211
| 12
| 76
| 4.333333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 76
| 5
| 44
| 15.2
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
20174ac43ede89aa030ccb34cb375f352725b586
| 66
|
py
|
Python
|
python/import.py
|
mkanenobu/trashbox
|
c691dbf9a07991fd42304020c8aac58e1e4b9644
|
[
"WTFPL"
] | 2
|
2020-05-11T13:43:27.000Z
|
2020-07-31T11:57:19.000Z
|
python/import.py
|
mkanenobu/trashbox
|
c691dbf9a07991fd42304020c8aac58e1e4b9644
|
[
"WTFPL"
] | 2
|
2020-09-27T02:35:38.000Z
|
2021-03-08T08:33:02.000Z
|
python/import.py
|
mkanenobu/trashbox
|
c691dbf9a07991fd42304020c8aac58e1e4b9644
|
[
"WTFPL"
] | 1
|
2020-05-11T13:44:04.000Z
|
2020-05-11T13:44:04.000Z
|
#!/usr/bin/python3
# name_main.pyをモジュールとして読み込む
import name_main
| 11
| 27
| 0.787879
| 9
| 66
| 5.555556
| 0.777778
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.106061
| 66
| 5
| 28
| 13.2
| 0.830508
| 0.651515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2044a3a29392441fe29e2517a107e3103b1538b5
| 25
|
py
|
Python
|
actionrules/reduction/__init__.py
|
KIZI/actionrules
|
227e021fa60ce40a1492322fe9bec35f0469e19c
|
[
"MIT"
] | 8
|
2019-10-11T09:49:20.000Z
|
2022-03-21T23:23:55.000Z
|
actionrules/reduction/__init__.py
|
hhl60492/actionrules
|
cdd1f58b44278e033d2eed7c603938e29368c9fa
|
[
"MIT"
] | 15
|
2019-12-29T20:14:36.000Z
|
2021-12-10T13:16:00.000Z
|
actionrules/reduction/__init__.py
|
KIZI/actionrules
|
227e021fa60ce40a1492322fe9bec35f0469e19c
|
[
"MIT"
] | 7
|
2019-10-10T15:51:36.000Z
|
2022-03-23T00:33:30.000Z
|
from .reduction import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2049894227d714c669e2ce487707b8fdeb950613
| 28
|
py
|
Python
|
grafana_client/__init__.py
|
peekjef72/grafana-client
|
25470ae6a567c92ccccf9c8fcdbe9db71194a544
|
[
"MIT"
] | 11
|
2022-02-07T03:37:40.000Z
|
2022-03-31T17:39:02.000Z
|
grafana_client/__init__.py
|
peekjef72/grafana-client
|
25470ae6a567c92ccccf9c8fcdbe9db71194a544
|
[
"MIT"
] | 8
|
2022-02-02T02:39:12.000Z
|
2022-03-16T22:15:01.000Z
|
grafana_client/__init__.py
|
peekjef72/grafana-client
|
25470ae6a567c92ccccf9c8fcdbe9db71194a544
|
[
"MIT"
] | 3
|
2022-02-05T17:09:35.000Z
|
2022-02-11T09:35:54.000Z
|
from .api import GrafanaApi
| 14
| 27
| 0.821429
| 4
| 28
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
204d55c7599404e4839c555a1ce0b95b5bbac6ab
| 136
|
py
|
Python
|
app/controller/report/__init__.py
|
bhzunami/reanalytics
|
2ea4396b81529057765d2f95cea8168cacf7f0d6
|
[
"Apache-2.0"
] | null | null | null |
app/controller/report/__init__.py
|
bhzunami/reanalytics
|
2ea4396b81529057765d2f95cea8168cacf7f0d6
|
[
"Apache-2.0"
] | null | null | null |
app/controller/report/__init__.py
|
bhzunami/reanalytics
|
2ea4396b81529057765d2f95cea8168cacf7f0d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Blueprint
report = Blueprint('report', __name__)
from . import views
| 17
| 38
| 0.691176
| 18
| 136
| 5
| 0.777778
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.154412
| 136
| 8
| 39
| 17
| 0.765217
| 0.316176
| 0
| 0
| 0
| 0
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
20501d5c85614c98f0bfe369ce264014188672ca
| 29
|
py
|
Python
|
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/models/__init__.py
|
nishaq503/polus-plugins-dl
|
511689e82eb29a84761538144277d1be1af7aa44
|
[
"MIT"
] | null | null | null |
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/models/__init__.py
|
nishaq503/polus-plugins-dl
|
511689e82eb29a84761538144277d1be1af7aa44
|
[
"MIT"
] | 1
|
2021-09-09T23:22:16.000Z
|
2021-09-09T23:22:16.000Z
|
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/models/__init__.py
|
nishaq503/polus-plugins-dl
|
511689e82eb29a84761538144277d1be1af7aa44
|
[
"MIT"
] | 4
|
2021-06-22T13:54:52.000Z
|
2022-01-26T19:23:39.000Z
|
from .wider_resnet import *
| 14.5
| 28
| 0.758621
| 4
| 29
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 29
| 1
| 29
| 29
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2054ad941170e51f0dc019b3102806130ecc66fd
| 3,109
|
py
|
Python
|
capiq/tests/unit/test_capiq_client_gdsp.py
|
vy-labs/capiq-python
|
19ab17b3dc9354d0112f69f640a87cfd5d469047
|
[
"MIT"
] | 29
|
2017-03-13T19:03:18.000Z
|
2022-01-23T21:00:19.000Z
|
capiq/tests/unit/test_capiq_client_gdsp.py
|
vy-labs/capiq-python
|
19ab17b3dc9354d0112f69f640a87cfd5d469047
|
[
"MIT"
] | 13
|
2016-10-15T07:52:48.000Z
|
2022-01-24T11:35:25.000Z
|
capiq/tests/unit/test_capiq_client_gdsp.py
|
vy-labs/capiq-python
|
19ab17b3dc9354d0112f69f640a87cfd5d469047
|
[
"MIT"
] | 20
|
2017-03-13T04:24:24.000Z
|
2021-09-10T17:02:07.000Z
|
import unittest
from mock import mock
from capiq.capiq_client import CapIQClient
def mocked_gdsp_data_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if args[0] is not None:
return MockResponse({"GDSSDKResponse": [{
"Headers": ["IQ_CLOSEPRICE"],
"Rows": [{"Row": ["46.80"]}],
"NumCols": 1,
"Seniority": "",
"Mnemonic": "IQ_CLOSEPRICE",
"Function": "GDSP",
"ErrMsg": None,
"Properties": {},
"StartDate": "",
"NumRows": 1,
"CacheExpiryTime": "0",
"SnapType": "",
"Frequency": "",
"Identifier": "TRIP:",
"Limit": ""
}]}, 200)
def mocked_gdsp_no_data_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
"""
if args[0] == 'http://someurl.com/test.json':
return MockResponse({"key1": "value1"}, 200)
elif args[0] == 'http://someotherurl.com/anothertest.json':
"""
if args[0] is not None:
return MockResponse(
{
"GDSSDKResponse":
[
{
"Headers": ["IQ_CLOSEPRICE"],
"Rows": [{"Row": ["46.80"]}],
"NumCols": 1,
"Seniority": "",
"Mnemonic": "IQ_CLOSEPRICE",
"Function": "GDSP",
"ErrMsg": "SOME ERROR",
"Properties": {},
"StartDate": "",
"NumRows": 1,
"CacheExpiryTime": "0",
"SnapType": "",
"Frequency": "",
"Identifier": "TRIP:",
"Limit": ""
}
]
}, 200)
class TestCapiqClientGdsp(unittest.TestCase):
@mock.patch('capiq.capiq_client.requests.post', side_effect=mocked_gdsp_data_requests_post)
def test_gdsp_data(self, mocked_post):
ciq_client = CapIQClient("username", "password")
return_value = ciq_client.gdsp(["TRIP"], ["IQ_CLOSEPRICE"], ["close_price"], [{}])
self.assertEqual(return_value, {'TRIP:': {'close_price': '46.80'}})
@mock.patch('capiq.capiq_client.requests.post', side_effect=mocked_gdsp_no_data_requests_post)
def test_gdsp_no_data(self, mocked_post):
ciq_client = CapIQClient("username", "password")
return_value = ciq_client.gdsp(["TRIP"], ["IQ_CLOSEPRICE"], ["close_price"], [{}])
self.assertEqual(return_value, {'TRIP:': {'close_price': None}})
| 36.151163
| 98
| 0.486008
| 274
| 3,109
| 5.259124
| 0.277372
| 0.044414
| 0.049965
| 0.030534
| 0.837613
| 0.830673
| 0.783484
| 0.783484
| 0.783484
| 0.783484
| 0
| 0.016932
| 0.37311
| 3,109
| 86
| 99
| 36.151163
| 0.722422
| 0
| 0
| 0.657143
| 0
| 0
| 0.190655
| 0.021828
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.114286
| false
| 0.028571
| 0.042857
| 0.028571
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
64504bd9c428b293a712da981c34aadb0ef817c1
| 85
|
py
|
Python
|
test_work/tree_views/core/views.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
test_work/tree_views/core/views.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
test_work/tree_views/core/views.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
from django.views.generic import TemplateView
class Slide(TemplateView):
pass
| 12.142857
| 45
| 0.776471
| 10
| 85
| 6.6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164706
| 85
| 6
| 46
| 14.166667
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
645a5603b589906d8995d0677c70a7c59af8de5a
| 1,036
|
py
|
Python
|
tests/unit/test_std_stream_replacer.py
|
matthewgdv/miscutils
|
f605ded914e355214533b06e7a768272409769c0
|
[
"MIT"
] | null | null | null |
tests/unit/test_std_stream_replacer.py
|
matthewgdv/miscutils
|
f605ded914e355214533b06e7a768272409769c0
|
[
"MIT"
] | null | null | null |
tests/unit/test_std_stream_replacer.py
|
matthewgdv/miscutils
|
f605ded914e355214533b06e7a768272409769c0
|
[
"MIT"
] | null | null | null |
# import pytest
class TestBaseReplacerMixin:
def test_target(self): # synced
assert True
def test_write(self): # synced
assert True
def test_flush(self): # synced
assert True
def test_close(self): # synced
assert True
class TestStdOutReplacerMixin:
def test_target(self): # synced
assert True
class TestStdErrReplacerMixin:
def test_target(self): # synced
assert True
class TestStdOutFileRedirector:
def test___str__(self): # synced
assert True
def test_write(self): # synced
assert True
class TestBaseStreamRedirector:
def test___str__(self): # synced
assert True
def test_write(self): # synced
assert True
def test_flush(self): # synced
assert True
def test_close(self): # synced
assert True
class TestStdOutStreamRedirector:
pass
class TestStdErrStreamRedirector:
pass
class TestSupressor:
def test_write(self): # synced
assert True
| 16.983607
| 37
| 0.65251
| 111
| 1,036
| 5.900901
| 0.207207
| 0.138931
| 0.317557
| 0.396947
| 0.671756
| 0.664122
| 0.664122
| 0.59542
| 0.479389
| 0.479389
| 0
| 0
| 0.28861
| 1,036
| 60
| 38
| 17.266667
| 0.888738
| 0.100386
| 0
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.361111
| 1
| 0.361111
| false
| 0.055556
| 0
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
649d4a739de60a36d880cb409f0614c96f63248f
| 26
|
py
|
Python
|
accountifie/common/apiv1/__init__.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 4
|
2017-06-02T08:48:48.000Z
|
2021-11-21T23:57:15.000Z
|
accountifie/common/apiv1/__init__.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 3
|
2020-06-05T16:55:42.000Z
|
2021-06-10T17:43:12.000Z
|
accountifie/common/apiv1/__init__.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 4
|
2015-12-15T14:27:51.000Z
|
2017-04-21T21:42:27.000Z
|
from .server_info import *
| 26
| 26
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
64c500a680157947850aad8d4222911d4d998581
| 13,111
|
py
|
Python
|
task_manager.py
|
technetbytes/Nested-Object-Serialization
|
86dc7812c2002010247af9f4edabaf29c78c3be9
|
[
"MIT"
] | null | null | null |
task_manager.py
|
technetbytes/Nested-Object-Serialization
|
86dc7812c2002010247af9f4edabaf29c78c3be9
|
[
"MIT"
] | null | null | null |
task_manager.py
|
technetbytes/Nested-Object-Serialization
|
86dc7812c2002010247af9f4edabaf29c78c3be9
|
[
"MIT"
] | null | null | null |
from task import Task
from tasks import Tasks
from status import Status
import redis
import datetime
import json
from json_extension import check_update_list
from converter import datetime_converter
class TaskManager:
_redis = None
_task_management_key = None
def __redis():
server = "localhost"
port = 6379
db = 0
TaskManager._redis = redis.Redis(server, port, db)
TaskManager._task_management_key = "object-serial"
@staticmethod
def __find_task_object(json_object, name):
for dict in json_object:
x = json.loads(dict)
if x['task_id'] == name:
return x
@staticmethod
def __find_task(json_object, name):
task = [obj for obj in json_object if obj['task_id']==name]
if len(task) > 1 and task is not None:
return task[0]
return None
@staticmethod
def clear_task_tasks_obj_as_dict():
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
TaskManager._redis.delete(TaskManager._task_management_key)
def get_task_management():
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
tasks_data_as_bytes = TaskManager._redis.get(TaskManager._task_management_key)
if tasks_data_as_bytes is not None:
tasks_data_as_str = tasks_data_as_bytes.decode("utf-8")
tasks_obj_as_dict = json.loads(tasks_data_as_str)
return tasks_obj_as_dict
else:
return None
@staticmethod
def __update_json_object(tasks_obj_as_dict, replace_obj):
for task in tasks_obj_as_dict:
if json.loads(task)['task_id'] == replace_obj['task_id']:
task = json.dumps(replace_obj)
break
return tasks_obj_as_dict
@staticmethod
def update_task_management_ext(event, name, status, id):
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is not None:
for element in tasks_obj_as_dict:
#print("@@@@@",tasks_obj_as_dict[element])
for elt in tasks_obj_as_dict[element]:
#print(";;;;;;;;;;;",elt['task_id'])
if elt['task_id'] == id:
new_status = Status(id, name, str(datetime.datetime.now()), status)
elt['conditions'].append(new_status)
print("***->",elt)
# print("!!!!!!!->",tasks_obj_as_dict['conditions'])
tasks = Tasks(tasks_obj_as_dict['conditions'])
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# #Iterating all the fields of the JSON
# for element in tasks_obj_as_dict:
# #If Json Field value is a list
# if (isinstance(tasks_obj_as_dict[element], list)):
# # add new status in the task_conditions list
# new_status = Status(id, name, datetime.datetime.now(), status)
# check_update_list(tasks_obj_as_dict[element], element, new_status)
# print(tasks_obj_as_dict['conditions'])
# tasks = Tasks(tasks_obj_as_dict['conditions'])
# #TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
@staticmethod
def update_task_management(event, name, status, id):
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is not None:
#convert dict into json object called cache_object and add new item in the existing collection
cache_data = json.loads(json.dumps(tasks_obj_as_dict))
if cache_data is not None:
current_task = TaskManager.__find_task_object(cache_data['conditions'], id)
if current_task is not None:
# get task status list
current_task_conditions = current_task['conditions']
# add new status in the task_conditions list
new_status = Status(id, name, datetime.datetime.now(), status)
current_task_conditions.append(new_status)
# update object
update_json_obj = TaskManager.__update_json_object(cache_data['conditions'], current_task)
@staticmethod
def create_new_task(message_type, task):
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
_conditions = []
_tasks = []
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is None:
#first time creating task in the redis
if task is not None:
new_task = Task(message_type, task['id'], "init", _conditions)
_tasks.append(new_task)
tasks = Tasks(_tasks)
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
else:
new_task = Task(message_type, task['id'], "init", _conditions)
# #convert dict into json object called cache_object and add new item in the existing collection
cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# # print(cache_data)
cache_data['conditions'].append(new_task)
#print("---->",type(*cache_data.values()))
#print(len(*cache_data.values()))
# # prefixed by an asterisk operator to unpack the values in order to create a typename tuple subclass
tasks = Tasks(*cache_data.values())
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# return tasks_obj_as_dict
# new_task = Task(message_type, task['id'], "init", _conditions)
# #convert dict into json object called cache_object and add new item in the existing collection
# cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# # print(cache_data)
# cache_data['conditions'].append(new_task)
# # prefixed by an asterisk operator to unpack the values in order to create a typename tuple subclass
# tasks = Tasks(*cache_data.values())
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# return tasks_obj_as_dict
@staticmethod
def testing_create_new_task(task):
# check and then create redis server object
if TaskManager._redis is None:
TaskManager.__redis()
tasks_obj_as_dict = TaskManager.get_task_management()
if tasks_obj_as_dict is None:
TaskManager._redis.set(TaskManager._task_management_key, json.dumps(task))
else:
return tasks_obj_as_dict
# from task_store.task import Task
# from task_store.tasks import Tasks
# from task_store.status import Status
# import redis
# import datetime
# from config.setting import Config
# import json
# from utilities.json_extension import check_update_list
# from task_store.converter import datetime_converter
# class TaskManager:
# _redis = None
# _task_management_key = None
# def __redis():
# server = Config.get_complete_property('redis','server')
# port = Config.get_complete_property('redis','port')
# db = Config.get_complete_property('redis','db')
# TaskManager._redis = redis.Redis(server, port, db)
# TaskManager._task_management_key = Config.get_complete_property('redis','task_management_key')
# @staticmethod
# def __find_task_object(json_object, name):
# for dict in json_object:
# x = json.loads(dict)
# if x['task_id'] == name:
# return x
# @staticmethod
# def __find_task(json_object, name):
# task = [obj for obj in json_object if obj['task_id']==name]
# if len(task) > 1 and task is not None:
# return task[0]
# return None
# @staticmethod
# def clear_task_tasks_obj_as_dict():
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# TaskManager._redis.delete(TaskManager._task_management_key)
# def get_task_management():
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# tasks_data_as_bytes = TaskManager._redis.get(TaskManager._task_management_key)
# if tasks_data_as_bytes is not None:
# tasks_data_as_str = tasks_data_as_bytes.decode("utf-8")
# tasks_obj_as_dict = json.loads(tasks_data_as_str)
# return tasks_obj_as_dict
# else:
# return None
# @staticmethod
# def __update_json_object(tasks_obj_as_dict, replace_obj):
# for task in tasks_obj_as_dict:
# if json.loads(task)['task_id'] == replace_obj['task_id']:
# task = json.dumps(replace_obj)
# break
# return tasks_obj_as_dict
# @staticmethod
# def update_task_management_ext(event, name, status, id):
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is not None:
# #Iterating all the fields of the JSON
# for element in tasks_obj_as_dict:
# #If Json Field value is a list
# if (isinstance(tasks_obj_as_dict[element], list)):
# # add new status in the task_conditions list
# new_status = Status(id, name, datetime.datetime.now(), status)
# check_update_list(tasks_obj_as_dict[element], element, new_status)
# tasks = Tasks(tasks_obj_as_dict['conditions'])
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# @staticmethod
# def update_task_management(event, name, status, id):
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is not None:
# #convert dict into json object called cache_object and add new item in the existing collection
# cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# if cache_data is not None:
# current_task = TaskManager.__find_task_object(cache_data['conditions'], id)
# if current_task is not None:
# # get task status list
# current_task_conditions = current_task['conditions']
# # add new status in the task_conditions list
# new_status = Status(id, name, datetime.datetime.now(), status)
# current_task_conditions.append(new_status)
# # update object
# update_json_obj = TaskManager.__update_json_object(cache_data['conditions'], current_task)
# @staticmethod
# def create_new_task(message_type, task):
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# _conditions = []
# _tasks = []
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is None:
# #first time creating task in the redis
# if task is not None:
# new_task = Task(message_type, task.id, "init", _conditions)
# _tasks.append(new_task)
# tasks = Tasks(_tasks)
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# else:
# new_task = Task(message_type, task.id, "init", _conditions)
# #convert dict into json object called cache_object and add new item in the existing collection
# cache_data = json.loads(json.dumps(tasks_obj_as_dict))
# # print(cache_data)
# cache_data['conditions'].append(new_task)
# # prefixed by an asterisk operator to unpack the values in order to create a typename tuple subclass
# tasks = Tasks(*cache_data.values())
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(tasks.to_json()))
# return tasks_obj_as_dict
# @staticmethod
# def testing_create_new_task(task):
# # check and then create redis server object
# if TaskManager._redis is None:
# TaskManager.__redis()
# tasks_obj_as_dict = TaskManager.get_task_management()
# if tasks_obj_as_dict is None:
# TaskManager._redis.set(TaskManager._task_management_key, json.dumps(task))
# else:
# return tasks_obj_as_dict
| 44.74744
| 114
| 0.617039
| 1,580
| 13,111
| 4.793038
| 0.071519
| 0.054932
| 0.068665
| 0.096131
| 0.947577
| 0.922488
| 0.922488
| 0.897663
| 0.897663
| 0.897663
| 0
| 0.001195
| 0.297765
| 13,111
| 292
| 115
| 44.900685
| 0.821332
| 0.583098
| 0
| 0.378641
| 0
| 0
| 0.026301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097087
| false
| 0
| 0.07767
| 0
| 0.271845
| 0.009709
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b3833539bfa8d99aaa58dc88e1bed22ac78ca1b8
| 231
|
py
|
Python
|
models/modelzoo/__init__.py
|
naivelamb/kaggle-cloud-organization
|
08750c89d56235eee68c8827afb075610e53569d
|
[
"BSD-2-Clause"
] | 30
|
2019-12-23T01:38:23.000Z
|
2021-06-29T19:40:39.000Z
|
models/modelzoo/__init__.py
|
naivelamb/kaggle-cloud-organization
|
08750c89d56235eee68c8827afb075610e53569d
|
[
"BSD-2-Clause"
] | 8
|
2020-03-24T17:58:50.000Z
|
2022-01-13T02:00:44.000Z
|
models/modelzoo/__init__.py
|
naivelamb/kaggle-cloud-organization
|
08750c89d56235eee68c8827afb075610e53569d
|
[
"BSD-2-Clause"
] | 6
|
2019-12-23T01:38:32.000Z
|
2020-10-22T09:06:07.000Z
|
from .dpn import *
from .inceptionV4 import *
from .inceptionresnetv2 import *
from .resnet import *
from .senet import *
from .xception import *
from .senet2 import seresnext26_32x4d
from .efficientNet import EfficientNet
| 25.666667
| 39
| 0.766234
| 27
| 231
| 6.518519
| 0.444444
| 0.340909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041885
| 0.17316
| 231
| 8
| 40
| 28.875
| 0.879581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b38e6344faa8d39059e97bf8ad200ef2ff0477df
| 1,626
|
py
|
Python
|
trdg/labels_csv.py
|
BismarckBamfo/ocr-paper
|
56a60486fb25613fc18ac984c6ca22a4475b3c4b
|
[
"MIT"
] | 1
|
2022-01-21T06:31:16.000Z
|
2022-01-21T06:31:16.000Z
|
trdg/labels_csv.py
|
BismarckBamfo/ocr-paper
|
56a60486fb25613fc18ac984c6ca22a4475b3c4b
|
[
"MIT"
] | null | null | null |
trdg/labels_csv.py
|
BismarckBamfo/ocr-paper
|
56a60486fb25613fc18ac984c6ca22a4475b3c4b
|
[
"MIT"
] | 1
|
2022-01-18T21:54:04.000Z
|
2022-01-18T21:54:04.000Z
|
import pandas as pd
from fire import Fire
def make_train_csv(path):
filename = []
words = []
with open(f'{path}/train/labels.txt', 'r') as f:
train_text = f.readlines()
for idx, x in enumerate(train_text):
split_line = x.split('\t')
filename.append(split_line[0])
words.append(split_line[1].rstrip('\n').lstrip())
df = pd.DataFrame(list(zip(filename, words)), columns=['filename', 'words'])
df.to_csv(f'{path}/train/labels.csv', sep=';', encoding='utf-8', index=False)
def make_val_csv(path):
filename = []
words = []
with open(f'{path}/val/labels.txt', 'r') as f:
train_text = f.readlines()
for idx, x in enumerate(train_text):
split_line = x.split('\t')
filename.append(split_line[0])
words.append(split_line[1].rstrip('\n').lstrip())
df = pd.DataFrame(list(zip(filename, words)), columns=['filename', 'words'])
df.to_csv(f'{path}/val/labels.csv', sep=';', encoding='utf-8', index=False)
def make_test_csv(path):
filename = []
words = []
with open(f'{path}/test/labels.txt', 'r') as f:
train_text = f.readlines()
for idx, x in enumerate(train_text):
split_line = x.split('\t')
filename.append(split_line[0])
words.append(split_line[1].rstrip('\n').lstrip())
df = pd.DataFrame(list(zip(filename, words)), columns=['filename', 'words'])
df.to_csv(f'{path}/test/labels.csv', sep=';', encoding='utf-8', index=False)
def main(path):
make_train_csv(path)
make_val_csv(path)
make_test_csv(path)
if __name__ == '__main__':
Fire(main)
| 29.035714
| 81
| 0.613776
| 238
| 1,626
| 4.033613
| 0.218487
| 0.121875
| 0.09375
| 0.0625
| 0.845833
| 0.845833
| 0.845833
| 0.845833
| 0.742708
| 0.704167
| 0
| 0.006912
| 0.199262
| 1,626
| 55
| 82
| 29.563636
| 0.730415
| 0
| 0
| 0.585366
| 0
| 0
| 0.130381
| 0.081181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.04878
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b3ae6dfb9d2e837a5f53b26250dfbac00873f765
| 105
|
py
|
Python
|
leapp/cli/__main__.py
|
dhodovsk/leapp
|
bcd6580a19dabd132b3da8bcf2ed61fa8864ef18
|
[
"Apache-2.0"
] | 29
|
2019-05-29T05:34:52.000Z
|
2022-03-14T19:09:34.000Z
|
leapp/cli/__main__.py
|
dhodovsk/leapp
|
bcd6580a19dabd132b3da8bcf2ed61fa8864ef18
|
[
"Apache-2.0"
] | 373
|
2018-11-21T11:41:49.000Z
|
2022-03-31T11:40:56.000Z
|
leapp/cli/__main__.py
|
dhodovsk/leapp
|
bcd6580a19dabd132b3da8bcf2ed61fa8864ef18
|
[
"Apache-2.0"
] | 27
|
2018-11-26T17:14:15.000Z
|
2022-03-10T13:30:50.000Z
|
from leapp.cli import main
import leapp.utils.i18n # noqa: F401; pylint: disable=unused-import
main()
| 17.5
| 68
| 0.752381
| 16
| 105
| 4.9375
| 0.75
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.142857
| 105
| 5
| 69
| 21
| 0.822222
| 0.390476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b3e37e88982699bfc3eed5ddb39ee2cb55eef201
| 163
|
py
|
Python
|
frappe/patches/v12_0/copy_to_parent_for_tags.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/patches/v12_0/copy_to_parent_for_tags.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/patches/v12_0/copy_to_parent_for_tags.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
import frappe
def execute():
frappe.db.sql("UPDATE `tabTag Link` SET parenttype=document_type")
frappe.db.sql("UPDATE `tabTag Link` SET parent=document_name")
| 23.285714
| 67
| 0.760736
| 24
| 163
| 5.083333
| 0.625
| 0.131148
| 0.180328
| 0.278689
| 0.491803
| 0.491803
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0.110429
| 163
| 6
| 68
| 27.166667
| 0.841379
| 0
| 0
| 0
| 0
| 0
| 0.576687
| 0.147239
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3736f996efbaf73fa665ceb73ccaf2cfc50c07fa
| 133
|
py
|
Python
|
webpages/admin.py
|
18praneeth/udayagiri-scl-maxo
|
67ac939265d7837e39329162d7dd935a52130978
|
[
"MIT"
] | 8
|
2021-01-01T17:04:45.000Z
|
2021-06-24T05:53:13.000Z
|
webpages/admin.py
|
18praneeth/udayagiri-scl-maxo
|
67ac939265d7837e39329162d7dd935a52130978
|
[
"MIT"
] | 11
|
2021-01-01T15:04:04.000Z
|
2021-01-10T07:47:12.000Z
|
webpages/admin.py
|
18praneeth/udayagiri-scl-maxo
|
67ac939265d7837e39329162d7dd935a52130978
|
[
"MIT"
] | 7
|
2020-12-14T12:44:17.000Z
|
2021-01-15T14:29:13.000Z
|
from django.contrib import admin
from .models import Contact
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
pass
| 22.166667
| 37
| 0.804511
| 17
| 133
| 6.294118
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120301
| 133
| 6
| 38
| 22.166667
| 0.91453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3750e70df091403cf10f1666d19111c3cde5abaf
| 141
|
py
|
Python
|
blog/admin.py
|
MysteryCoder456/Blog-App
|
56d28c1b93c113487a36df265ecc677e426b1c62
|
[
"MIT"
] | 3
|
2020-06-17T07:35:17.000Z
|
2020-06-17T07:45:15.000Z
|
blog/admin.py
|
MysteryCoder456/Blog-App
|
56d28c1b93c113487a36df265ecc677e426b1c62
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
MysteryCoder456/Blog-App
|
56d28c1b93c113487a36df265ecc677e426b1c62
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(BlogList)
admin.site.register(Blog)
admin.site.register(Comment)
| 20.142857
| 32
| 0.808511
| 20
| 141
| 5.7
| 0.55
| 0.236842
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 141
| 6
| 33
| 23.5
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3775b2330eebd91d9b1fa6ec7d79e297b1c8cc13
| 10,559
|
py
|
Python
|
tests/unittests/commands/test_cmd_cs_beacon.py
|
f5devcentral/f5-cli
|
22a5c6902e3f78a969a86116a73fcad817f220be
|
[
"Apache-2.0"
] | 13
|
2020-03-06T22:35:47.000Z
|
2021-06-28T23:08:46.000Z
|
tests/unittests/commands/test_cmd_cs_beacon.py
|
f5devcentral/f5-cli
|
22a5c6902e3f78a969a86116a73fcad817f220be
|
[
"Apache-2.0"
] | 19
|
2020-03-11T15:14:06.000Z
|
2022-01-26T23:56:56.000Z
|
tests/unittests/commands/test_cmd_cs_beacon.py
|
f5devcentral/f5-cli
|
22a5c6902e3f78a969a86116a73fcad817f220be
|
[
"Apache-2.0"
] | 1
|
2020-03-24T13:29:30.000Z
|
2020-03-24T13:29:30.000Z
|
""" Test Beacon command """
import json
from f5sdk.cs import ManagementClient
from f5sdk.cs.beacon.insights import InsightsClient
from f5sdk.cs.beacon.declare import DeclareClient
from f5sdk.cs.beacon.token import TokenClient
from f5cli.config import AuthConfigurationClient
from f5cli.commands.cmd_cs import cli
from ...global_test_imports import pytest, CliRunner
# Test Constants
MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE = {
'user': 'test_user',
'password': 'test_password'
}
class TestCommandBeacon(object):
""" Test Class: command beacon """
@classmethod
def setup_class(cls):
""" Setup func """
cls.runner = CliRunner()
@classmethod
def teardown_class(cls):
""" Teardown func """
@staticmethod
@pytest.fixture
def config_client_read_auth_fixture(mocker):
""" PyTest fixture mocking AuthConfigurationClient's read_auth method """
mock_config_client_read_auth = mocker.patch.object(
AuthConfigurationClient, "read_auth")
mock_config_client_read_auth.return_value = MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE
return mock_config_client_read_auth
@staticmethod
@pytest.fixture
def mgmt_client_fixture(mocker):
""" PyTest fixture returning mocked Cloud Services Management Client """
mock_management_client = mocker.patch.object(ManagementClient, '__init__')
mock_management_client.return_value = None
return mock_management_client
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_list(self, mocker):
""" List all configured beacon insights
Given
- The Insights Client returns a successful response
When
- User executes a 'list'
Then
- The 'list' command returns a successful response
"""
mock_response = {
'foo': 'bar'
}
mocker.patch.object(
InsightsClient, "list", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'list'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_create(self, mocker):
""" Creating a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'create' with a declaration
Then
- The 'create' command returns a successful response
and creates an insight
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
InsightsClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_update(self, mocker):
""" Updating a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'update' with a declaration with the same name
Then
- The 'update' command returns a successful response
and updates the specified insight
"""
mock_response = {
'title': 'foo',
'description': 'blah2'
}
mocker.patch.object(
InsightsClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'update',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_delete(self, mocker):
""" Deleting a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'delete' with the name of the insight to be deleted
Then
- The 'delete' command returns a successful response
and delete the specified insight
"""
mocker.patch.object(
InsightsClient, "delete", return_value={})
result = self.runner.invoke(cli, [
'beacon', 'insights', 'delete', '--name', 'foo', '--auto-approve'])
assert result.output == json.dumps(
{'message': 'Insight deleted successfully'},
indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_show(self, mocker):
""" Show a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'show' with a name of the insight
Then
- The 'show' command returns requested insight
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
InsightsClient, "show", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'show', '--name', 'foo'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_declare_show(self, mocker):
""" Show a beacon declaration
Given
- The Declare Client returns a mocked response
When
- User executes a 'show'
Then
- The 'show' command returns the mocked response
"""
mock_response = {'foo': 'bar'}
mocker.patch.object(
DeclareClient, "create", return_value=mock_response
)
result = self.runner.invoke(cli, ['beacon', 'declare', 'show'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_declare_create(self, mocker):
""" Create/update a beacon declaration
Given
- The Declare Client returns a mocked response
When
- User executes a 'create'
Then
- The 'create' command returns the mocked response
"""
mock_response = {'foo': 'bar'}
mocker.patch.object(
DeclareClient, "create", return_value=mock_response
)
result = self.runner.invoke(
cli, ['beacon', 'declare', 'create', '--declaration', './foo.json']
)
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_create(self, mocker):
""" Creating a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'create' with a declaration
Then
- The 'create' command returns a successful response
and creates an token
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
TokenClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_delete(self, mocker):
""" Deleting a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'delete' with the name of the token to be deleted
Then
- The 'delete' command returns a successful response
and delete the specified token
"""
mocker.patch.object(
TokenClient, "delete", return_value={})
result = self.runner.invoke(cli, [
'beacon', 'token', 'delete', '--name', 'foo', '--auto-approve'])
assert result.output == json.dumps(
{'message': 'Token deleted successfully'},
indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_show(self, mocker):
""" Show a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'show' with a name of the token
Then
- The 'show' command returns requested token
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
TokenClient, "show", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'show', '--name', 'foo'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_list(self, mocker):
""" List all configured beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'list'
Then
- The 'list' command returns a successful response
"""
mock_response = {
'foo': 'bar'
}
mocker.patch.object(
TokenClient, "list", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'list'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
| 32.389571
| 93
| 0.623733
| 1,162
| 10,559
| 5.484509
| 0.101549
| 0.050839
| 0.072493
| 0.05335
| 0.817511
| 0.801977
| 0.75459
| 0.725404
| 0.725404
| 0.710341
| 0
| 0.002342
| 0.272185
| 10,559
| 325
| 94
| 32.489231
| 0.826936
| 0.235912
| 0
| 0.543624
| 0
| 0
| 0.177446
| 0.058235
| 0
| 0
| 0
| 0
| 0.073826
| 1
| 0.100671
| false
| 0.006711
| 0.053691
| 0
| 0.174497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
37afe5413eb087c8018083e5be2bc3959be0c131
| 33
|
py
|
Python
|
util/__init__.py
|
Str4thus/BraiNN
|
3f015dbbac4011798a7557cd45329854b1015804
|
[
"MIT"
] | null | null | null |
util/__init__.py
|
Str4thus/BraiNN
|
3f015dbbac4011798a7557cd45329854b1015804
|
[
"MIT"
] | null | null | null |
util/__init__.py
|
Str4thus/BraiNN
|
3f015dbbac4011798a7557cd45329854b1015804
|
[
"MIT"
] | null | null | null |
from .managers import HtmlManager
| 33
| 33
| 0.878788
| 4
| 33
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80652051dadf9734681491e7eab4c53d2d52bd4d
| 4,526
|
py
|
Python
|
cottonformation/res/ram.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/ram.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/ram.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
@attr.s
class ResourceShare(Resource):
"""
AWS Object Type = "AWS::RAM::ResourceShare"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-name
- ``p_AllowExternalPrincipals``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-allowexternalprincipals
- ``p_PermissionArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-permissionarns
- ``p_Principals``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-principals
- ``p_ResourceArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-resourcearns
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-tags
"""
AWS_OBJECT_TYPE = "AWS::RAM::ResourceShare"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-name"""
p_AllowExternalPrincipals: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "AllowExternalPrincipals"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-allowexternalprincipals"""
p_PermissionArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PermissionArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-permissionarns"""
p_Principals: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Principals"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-principals"""
p_ResourceArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ResourceArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-resourcearns"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#aws-resource-ram-resourceshare-return-values"""
return GetAtt(resource=self, attr_name="Arn")
| 54.53012
| 204
| 0.752541
| 527
| 4,526
| 6.354839
| 0.134725
| 0.138549
| 0.062705
| 0.120932
| 0.834279
| 0.834279
| 0.815169
| 0.815169
| 0.815169
| 0.804718
| 0
| 0.000247
| 0.107159
| 4,526
| 82
| 205
| 55.195122
| 0.828508
| 0.296509
| 0
| 0.209302
| 0
| 0
| 0.04106
| 0.020309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.093023
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
80b536638811e8b11941942b46f25f99ef6bdac3
| 130
|
py
|
Python
|
transcrypt/development/automated_tests/relimport/rimport.py
|
kochelmonster/Transcrypt
|
ab150cdea872e945950d53f1d276ce76e42619ce
|
[
"Apache-2.0"
] | null | null | null |
transcrypt/development/automated_tests/relimport/rimport.py
|
kochelmonster/Transcrypt
|
ab150cdea872e945950d53f1d276ce76e42619ce
|
[
"Apache-2.0"
] | null | null | null |
transcrypt/development/automated_tests/relimport/rimport.py
|
kochelmonster/Transcrypt
|
ab150cdea872e945950d53f1d276ce76e42619ce
|
[
"Apache-2.0"
] | null | null | null |
import tpackage
def run(test):
test.check(type(tpackage.peer2.func).__name__)
test.check(type(tpackage.func1).__name__)
| 18.571429
| 50
| 0.738462
| 18
| 130
| 4.888889
| 0.611111
| 0.204545
| 0.295455
| 0.477273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.123077
| 130
| 6
| 51
| 21.666667
| 0.754386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
03b1bcdadbbbc5fe5dc1e9fe8c2869aa9ba48609
| 260
|
py
|
Python
|
authlib/django/client/__init__.py
|
bobh66/authlib
|
e3e18da74d689b61a8dc8db46775ff77a57c6c2a
|
[
"BSD-3-Clause"
] | 1
|
2021-12-09T07:11:05.000Z
|
2021-12-09T07:11:05.000Z
|
authlib/django/client/__init__.py
|
bobh66/authlib
|
e3e18da74d689b61a8dc8db46775ff77a57c6c2a
|
[
"BSD-3-Clause"
] | 4
|
2021-03-19T08:17:59.000Z
|
2021-06-10T19:34:36.000Z
|
authlib/django/client/__init__.py
|
bobh66/authlib
|
e3e18da74d689b61a8dc8db46775ff77a57c6c2a
|
[
"BSD-3-Clause"
] | 2
|
2021-05-24T20:34:12.000Z
|
2022-03-26T07:46:17.000Z
|
# flake8: noqa
from authlib.deprecate import deprecate
from authlib.integrations.django_client import OAuth, DjangoRemoteApp as RemoteApp
deprecate('Deprecate "authlib.django.client", USE "authlib.integrations.django_client" instead.', '1.0', 'Jeclj', 'rn')
| 37.142857
| 119
| 0.792308
| 32
| 260
| 6.375
| 0.59375
| 0.176471
| 0.245098
| 0.303922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012712
| 0.092308
| 260
| 6
| 120
| 43.333333
| 0.851695
| 0.046154
| 0
| 0
| 0
| 0
| 0.382114
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
03ccf27a360ba3b6f27c457158ea0174f834eb17
| 20
|
py
|
Python
|
constant/__init__.py
|
Naopil/EldenBot
|
2b6f4e98dcfdf3720a6c4add4f694d0e15cd575a
|
[
"MIT"
] | null | null | null |
constant/__init__.py
|
Naopil/EldenBot
|
2b6f4e98dcfdf3720a6c4add4f694d0e15cd575a
|
[
"MIT"
] | 1
|
2019-11-16T19:01:01.000Z
|
2019-11-16T19:01:01.000Z
|
constant/__init__.py
|
Naopil/EldenBot
|
2b6f4e98dcfdf3720a6c4add4f694d0e15cd575a
|
[
"MIT"
] | 4
|
2018-07-22T23:13:26.000Z
|
2022-03-29T17:06:50.000Z
|
from .rgapi import *
| 20
| 20
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2076ab8123a0a4df3d62bd7bbcb3e7948cc3d940
| 27
|
py
|
Python
|
src/euler_python_package/euler_python/medium/p374.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p374.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p374.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
def problem374():
pass
| 9
| 17
| 0.62963
| 3
| 27
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.259259
| 27
| 2
| 18
| 13.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
207911daed740dde0bdd5ae6e05869fc91703e7a
| 6,845
|
py
|
Python
|
test/test_model.py
|
vkazei/deepwave
|
032bb06328673f4f824fbca20f09ba7bb277c8d1
|
[
"MIT"
] | 73
|
2018-07-16T13:57:09.000Z
|
2022-03-24T04:08:27.000Z
|
test/test_model.py
|
vkazei/deepwave
|
032bb06328673f4f824fbca20f09ba7bb277c8d1
|
[
"MIT"
] | 41
|
2018-07-14T15:44:13.000Z
|
2022-03-25T09:35:08.000Z
|
test/test_model.py
|
vkazei/deepwave
|
032bb06328673f4f824fbca20f09ba7bb277c8d1
|
[
"MIT"
] | 20
|
2018-12-02T14:42:59.000Z
|
2022-03-21T15:52:52.000Z
|
import torch
import pytest
import deepwave.base.model
def test_init_scalar():
"""Init model with scalars"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
dx = 5.0
model = deepwave.base.model.Model(properties, dx, pad_width=1, origin=2.0)
assert model.properties == properties
assert model.device == properties['a'].device
assert model.ndim == 2
assert (model.shape == torch.Tensor([3, 4, 1]).long()).all()
assert (model.dx == dx * torch.ones(2)).all()
assert (model.pad_width == torch.Tensor([1, 1, 1, 1, 0, 0]).long()).all()
assert (model.origin == torch.Tensor([2.0, 2.0])).all()
assert model.interior == [slice(1, 2), slice(1, 3)]
def test_init_list():
"""Init model with lists"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
dx = [5.0, 5.0]
pad_width = [1, 1, 1, 1, 0, 0]
origin = [2.0, 2.0]
model = deepwave.base.model.Model(properties, dx, pad_width=pad_width,
origin=origin)
assert model.properties == properties
assert model.device == properties['a'].device
assert model.ndim == 2
assert (model.shape == torch.Tensor([3, 4, 1]).long()).all()
assert (model.dx == torch.Tensor(dx)).all()
assert (model.pad_width == torch.Tensor([1, 1, 1, 1, 0, 0]).long()).all()
assert (model.origin == torch.Tensor([2.0, 2.0])).all()
assert model.interior == [slice(1, 2), slice(1, 3)]
def test_not_tensor():
"""One of the properties is not a Tensor"""
properties = {'a': torch.ones(3, 4),
'b': [0, 1]}
with pytest.raises(TypeError):
deepwave.base.model.Model(properties, 5.0, pad_width=1,
origin=2.0)
def test_different_types():
"""Properties have different types"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4, dtype=torch.double)}
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, 5.0, pad_width=1,
origin=2.0)
def test_different_sizes1():
"""Properties have different sizes (same ndim)"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 5)}
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, 5.0, pad_width=1,
origin=2.0)
def test_different_sizes2():
"""Properties have different sizes (different ndim)"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4, 1)}
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, 5.0, pad_width=1,
origin=2.0)
def test_nonpositive_dx1():
"""Nonpositive dx (scalar)"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, -5.0, pad_width=1,
origin=2.0)
def test_nonpositive_dx2():
"""Nonpositive dx (list)"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
dx = [5.0, 0.0]
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, dx, pad_width=1,
origin=2.0)
def test_negative_pad1():
"""Negative pad (scalar)"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, 5.0, pad_width=-1,
origin=2.0)
def test_negative_pad2():
"""Negative pad (list)"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
pad_width = [1, 1, -1, 1, 0, 0]
with pytest.raises(RuntimeError):
deepwave.base.model.Model(properties, 5.0, pad_width=pad_width,
origin=2.0)
def test_integer_origin():
"""Origin is int instead of float"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
with pytest.raises(TypeError):
deepwave.base.model.Model(properties, 5.0, pad_width=1,
origin=2)
def test_extract():
"""Extract portion of model"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
model = deepwave.base.model.Model(properties, 5.0, pad_width=1, origin=2.0)
model_extract = model[:, 1:2]
assert (model_extract.shape == torch.Tensor([3, 3, 1]).long()).all()
assert model_extract.properties['a'].shape == torch.Size([3, 3])
assert model_extract.properties['b'].shape == torch.Size([3, 3])
assert model_extract.ndim == 2
assert (model_extract.pad_width ==
torch.Tensor([1, 1, 1, 1, 0, 0]).long()).all()
assert (model_extract.origin == torch.Tensor([2.0, 7.0])).all()
assert model_extract.interior == [slice(1, 2), slice(1, 2)]
def test_pad1():
"""Change pad_width from 1 to 2"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
model = deepwave.base.model.Model(properties, 5.0, pad_width=1, origin=2.0)
model_pad = model.pad(2)
assert (model_pad.shape == torch.Tensor([5, 6, 1]).long()).all()
assert model_pad.properties['a'].shape == torch.Size([5, 6])
assert model_pad.properties['b'].shape == torch.Size([5, 6])
assert model_pad.ndim == 2
assert (model_pad.pad_width ==
torch.Tensor([2, 2, 2, 2, 0, 0]).long()).all()
assert (model_pad.origin == torch.Tensor([2.0, 2.0])).all()
assert model_pad.interior == [slice(2, 3), slice(2, 4)]
def test_pad2():
"""Add two pad_widths"""
properties = {'a': torch.ones(3, 4),
'b': torch.zeros(3, 4)}
model = deepwave.base.model.Model(properties, 5.0, pad_width=1, origin=2.0)
model_pad = model.pad(1, 1)
assert (model_pad.shape == torch.Tensor([5, 6, 1]).long()).all()
assert model_pad.properties['a'].shape == torch.Size([5, 6])
assert model_pad.properties['b'].shape == torch.Size([5, 6])
assert model_pad.ndim == 2
assert (model_pad.pad_width ==
torch.Tensor([2, 2, 2, 2, 0, 0]).long()).all()
assert (model_pad.origin == torch.Tensor([2.0, 2.0])).all()
assert model_pad.interior == [slice(2, 3), slice(2, 4)]
def test_pad3():
"""Verify that padded model has correct values"""
properties = {'a': torch.arange(6).float().reshape(2, 3)}
model = deepwave.base.model.Model(properties, 5.0)
model_pad = model.pad([1,0,0,0,0,0])
assert (model_pad.properties['a'] == torch.tensor([[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0]])).all()
| 38.027778
| 79
| 0.559825
| 960
| 6,845
| 3.911458
| 0.088542
| 0.111318
| 0.063382
| 0.087883
| 0.810652
| 0.773901
| 0.76032
| 0.754194
| 0.718775
| 0.71265
| 0
| 0.057448
| 0.262527
| 6,845
| 179
| 80
| 38.240223
| 0.68641
| 0.064865
| 0
| 0.589552
| 0
| 0
| 0.006008
| 0
| 0
| 0
| 0
| 0
| 0.283582
| 1
| 0.11194
| false
| 0
| 0.022388
| 0
| 0.134328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
207e3bec45ee694017b38934e74b53df272205d2
| 91
|
py
|
Python
|
code/message/image_to_text_message.py
|
ITE-5th/skill-socket
|
3255a07369568283be844ca1551975b1e73a23ce
|
[
"MIT"
] | 1
|
2019-07-08T09:45:02.000Z
|
2019-07-08T09:45:02.000Z
|
code/message/image_to_text_message.py
|
ITE-5th/skill-image-caption
|
1a77d27b4fbadd89a6390e8707d4a7975b1edb8d
|
[
"MIT"
] | null | null | null |
code/message/image_to_text_message.py
|
ITE-5th/skill-image-caption
|
1a77d27b4fbadd89a6390e8707d4a7975b1edb8d
|
[
"MIT"
] | null | null | null |
from .image_message import ImageMessage
class ImageToTextMessage(ImageMessage):
pass
| 15.166667
| 39
| 0.813187
| 9
| 91
| 8.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 5
| 40
| 18.2
| 0.935897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
20ccf8de54dd6dfa4114b226ebab81963bbf2e81
| 132
|
py
|
Python
|
src/Engine/Trajectory/__init__.py
|
MiguelReuter/Volley-ball-game
|
67d830cc528f3540b236d8191f582adb1827dbde
|
[
"MIT"
] | 4
|
2019-04-15T20:39:29.000Z
|
2022-02-04T10:51:37.000Z
|
src/Engine/Trajectory/__init__.py
|
MiguelReuter/Volley-ball-game
|
67d830cc528f3540b236d8191f582adb1827dbde
|
[
"MIT"
] | null | null | null |
src/Engine/Trajectory/__init__.py
|
MiguelReuter/Volley-ball-game
|
67d830cc528f3540b236d8191f582adb1827dbde
|
[
"MIT"
] | 1
|
2019-11-30T01:05:29.000Z
|
2019-11-30T01:05:29.000Z
|
# encoding : UTF-8
from .trajectory_solver import *
from .thrower_manager import ThrowerManager
from .trajectory import Trajectory
| 22
| 43
| 0.818182
| 16
| 132
| 6.625
| 0.625
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 0.128788
| 132
| 5
| 44
| 26.4
| 0.913043
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4597e474798d4f6819a4fb3d0ebf5f2e86ec6c57
| 200,304
|
py
|
Python
|
python/examples/kaitai/icc_4.py
|
carsonharmon/binaryninja-api
|
f7ad332ad69d370aa29cd54f4c7307da4d9173e2
|
[
"MIT"
] | 20
|
2019-09-28T01:44:58.000Z
|
2022-03-09T08:35:56.000Z
|
python/examples/kaitai/icc_4.py
|
carsonharmon/binaryninja-api
|
f7ad332ad69d370aa29cd54f4c7307da4d9173e2
|
[
"MIT"
] | 4
|
2020-12-23T01:51:26.000Z
|
2021-12-15T14:41:50.000Z
|
python/examples/kaitai/icc_4.py
|
carsonharmon/binaryninja-api
|
f7ad332ad69d370aa29cd54f4c7307da4d9173e2
|
[
"MIT"
] | 4
|
2020-02-20T18:47:27.000Z
|
2021-06-17T01:24:09.000Z
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Icc4(KaitaiStruct):
SEQ_FIELDS = ["header", "tag_table"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.ProfileHeader(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['tag_table']['start'] = self._io.pos()
self.tag_table = self._root.TagTable(self._io, self, self._root)
self.tag_table._read()
self._debug['tag_table']['end'] = self._io.pos()
class U8Fixed8Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(2)
self._debug['number']['end'] = self._io.pos()
class U16Fixed16Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(4)
self._debug['number']['end'] = self._io.pos()
class StandardIlluminantEncoding(KaitaiStruct):
class StandardIlluminantEncodings(Enum):
unknown = 0
d50 = 1
d65 = 2
d93 = 3
f2 = 4
d55 = 5
a = 6
equi_power = 7
f8 = 8
SEQ_FIELDS = ["standard_illuminant_encoding"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['standard_illuminant_encoding']['start'] = self._io.pos()
self.standard_illuminant_encoding = KaitaiStream.resolve_enum(self._root.StandardIlluminantEncoding.StandardIlluminantEncodings, self._io.read_u4be())
self._debug['standard_illuminant_encoding']['end'] = self._io.pos()
class ProfileHeader(KaitaiStruct):
class CmmSignatures(Enum):
the_imaging_factory_cmm = 858931796
agfa_cmm = 1094929747
adobe_cmm = 1094992453
color_gear_cmm = 1128484179
logosync_cmm = 1147629395
efi_cmm = 1162234144
exact_scan_cmm = 1163411779
fuji_film_cmm = 1179000864
harlequin_rip_cmm = 1212370253
heidelberg_cmm = 1212435744
kodak_cmm = 1262701907
konica_minolta_cmm = 1296256324
device_link_cmm = 1380404563
sample_icc_cmm = 1397310275
mutoh_cmm = 1397311310
toshiba_cmm = 1413696845
color_gear_cmm_lite = 1430471501
color_gear_cmm_c = 1430474067
windows_color_system_cmm = 1464029984
ware_to_go_cmm = 1465141024
apple_cmm = 1634758764
argyll_cms_cmm = 1634887532
little_cms_cmm = 1818455411
zoran_cmm = 2053320752
class PrimaryPlatforms(Enum):
apple_computer_inc = 1095782476
microsoft_corporation = 1297303124
silicon_graphics_inc = 1397180704
sun_microsystems = 1398099543
class ProfileClasses(Enum):
abstract_profile = 1633842036
device_link_profile = 1818848875
display_device_profile = 1835955314
named_color_profile = 1852662636
output_device_profile = 1886549106
input_device_profile = 1935896178
color_space_profile = 1936744803
class RenderingIntents(Enum):
perceptual = 0
media_relative_colorimetric = 1
saturation = 2
icc_absolute_colorimetric = 3
class DataColourSpaces(Enum):
two_colour = 843271250
three_colour = 860048466
four_colour = 876825682
five_colour = 893602898
six_colour = 910380114
seven_colour = 927157330
eight_colour = 943934546
nine_colour = 960711762
ten_colour = 1094929490
eleven_colour = 1111706706
twelve_colour = 1128483922
cmy = 1129142560
cmyk = 1129142603
thirteen_colour = 1145261138
fourteen_colour = 1162038354
fifteen_colour = 1178815570
gray = 1196573017
hls = 1212961568
hsv = 1213421088
cielab_or_pcslab = 1281450528
cieluv = 1282766368
rgb = 1380401696
nciexyz_or_pcsxyz = 1482250784
ycbcr = 1497588338
cieyxy = 1501067552
SEQ_FIELDS = ["size", "preferred_cmm_type", "version", "device_class", "color_space", "pcs", "creation_date_time", "file_signature", "primary_platform", "profile_flags", "device_manufacturer", "device_model", "device_attributes", "rendering_intent", "nciexyz_values_of_illuminant_of_pcs", "creator", "identifier", "reserved_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u4be()
self._debug['size']['end'] = self._io.pos()
self._debug['preferred_cmm_type']['start'] = self._io.pos()
self.preferred_cmm_type = KaitaiStream.resolve_enum(self._root.ProfileHeader.CmmSignatures, self._io.read_u4be())
self._debug['preferred_cmm_type']['end'] = self._io.pos()
self._debug['version']['start'] = self._io.pos()
self.version = self._root.ProfileHeader.VersionField(self._io, self, self._root)
self.version._read()
self._debug['version']['end'] = self._io.pos()
self._debug['device_class']['start'] = self._io.pos()
self.device_class = KaitaiStream.resolve_enum(self._root.ProfileHeader.ProfileClasses, self._io.read_u4be())
self._debug['device_class']['end'] = self._io.pos()
self._debug['color_space']['start'] = self._io.pos()
self.color_space = KaitaiStream.resolve_enum(self._root.ProfileHeader.DataColourSpaces, self._io.read_u4be())
self._debug['color_space']['end'] = self._io.pos()
self._debug['pcs']['start'] = self._io.pos()
self.pcs = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['pcs']['end'] = self._io.pos()
self._debug['creation_date_time']['start'] = self._io.pos()
self.creation_date_time = self._root.DateTimeNumber(self._io, self, self._root)
self.creation_date_time._read()
self._debug['creation_date_time']['end'] = self._io.pos()
self._debug['file_signature']['start'] = self._io.pos()
self.file_signature = self._io.ensure_fixed_contents(b"\x61\x63\x73\x70")
self._debug['file_signature']['end'] = self._io.pos()
self._debug['primary_platform']['start'] = self._io.pos()
self.primary_platform = KaitaiStream.resolve_enum(self._root.ProfileHeader.PrimaryPlatforms, self._io.read_u4be())
self._debug['primary_platform']['end'] = self._io.pos()
self._debug['profile_flags']['start'] = self._io.pos()
self.profile_flags = self._root.ProfileHeader.ProfileFlags(self._io, self, self._root)
self.profile_flags._read()
self._debug['profile_flags']['end'] = self._io.pos()
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = self._root.DeviceManufacturer(self._io, self, self._root)
self.device_manufacturer._read()
self._debug['device_manufacturer']['end'] = self._io.pos()
self._debug['device_model']['start'] = self._io.pos()
self.device_model = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['device_model']['end'] = self._io.pos()
self._debug['device_attributes']['start'] = self._io.pos()
self.device_attributes = self._root.DeviceAttributes(self._io, self, self._root)
self.device_attributes._read()
self._debug['device_attributes']['end'] = self._io.pos()
self._debug['rendering_intent']['start'] = self._io.pos()
self.rendering_intent = KaitaiStream.resolve_enum(self._root.ProfileHeader.RenderingIntents, self._io.read_u4be())
self._debug['rendering_intent']['end'] = self._io.pos()
self._debug['nciexyz_values_of_illuminant_of_pcs']['start'] = self._io.pos()
self.nciexyz_values_of_illuminant_of_pcs = self._root.XyzNumber(self._io, self, self._root)
self.nciexyz_values_of_illuminant_of_pcs._read()
self._debug['nciexyz_values_of_illuminant_of_pcs']['end'] = self._io.pos()
self._debug['creator']['start'] = self._io.pos()
self.creator = self._root.DeviceManufacturer(self._io, self, self._root)
self.creator._read()
self._debug['creator']['end'] = self._io.pos()
self._debug['identifier']['start'] = self._io.pos()
self.identifier = self._io.read_bytes(16)
self._debug['identifier']['end'] = self._io.pos()
self._debug['reserved_data']['start'] = self._io.pos()
self.reserved_data = self._io.read_bytes(28)
self._debug['reserved_data']['end'] = self._io.pos()
class VersionField(KaitaiStruct):
SEQ_FIELDS = ["major", "minor", "bug_fix_level", "reserved"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['major']['start'] = self._io.pos()
self.major = self._io.ensure_fixed_contents(b"\x04")
self._debug['major']['end'] = self._io.pos()
self._debug['minor']['start'] = self._io.pos()
self.minor = self._io.read_bits_int(4)
self._debug['minor']['end'] = self._io.pos()
self._debug['bug_fix_level']['start'] = self._io.pos()
self.bug_fix_level = self._io.read_bits_int(4)
self._debug['bug_fix_level']['end'] = self._io.pos()
self._io.align_to_byte()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
class ProfileFlags(KaitaiStruct):
SEQ_FIELDS = ["embedded_profile", "profile_can_be_used_independently_of_embedded_colour_data", "other_flags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['embedded_profile']['start'] = self._io.pos()
self.embedded_profile = self._io.read_bits_int(1) != 0
self._debug['embedded_profile']['end'] = self._io.pos()
self._debug['profile_can_be_used_independently_of_embedded_colour_data']['start'] = self._io.pos()
self.profile_can_be_used_independently_of_embedded_colour_data = self._io.read_bits_int(1) != 0
self._debug['profile_can_be_used_independently_of_embedded_colour_data']['end'] = self._io.pos()
self._debug['other_flags']['start'] = self._io.pos()
self.other_flags = self._io.read_bits_int(30)
self._debug['other_flags']['end'] = self._io.pos()
class XyzNumber(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "z"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_bytes(4)
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_bytes(4)
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._io.read_bytes(4)
self._debug['z']['end'] = self._io.pos()
class DateTimeNumber(KaitaiStruct):
SEQ_FIELDS = ["year", "month", "day", "hour", "minute", "second"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['year']['start'] = self._io.pos()
self.year = self._io.read_u2be()
self._debug['year']['end'] = self._io.pos()
self._debug['month']['start'] = self._io.pos()
self.month = self._io.read_u2be()
self._debug['month']['end'] = self._io.pos()
self._debug['day']['start'] = self._io.pos()
self.day = self._io.read_u2be()
self._debug['day']['end'] = self._io.pos()
self._debug['hour']['start'] = self._io.pos()
self.hour = self._io.read_u2be()
self._debug['hour']['end'] = self._io.pos()
self._debug['minute']['start'] = self._io.pos()
self.minute = self._io.read_u2be()
self._debug['minute']['end'] = self._io.pos()
self._debug['second']['start'] = self._io.pos()
self.second = self._io.read_u2be()
self._debug['second']['end'] = self._io.pos()
class Response16Number(KaitaiStruct):
SEQ_FIELDS = ["number", "reserved", "measurement_value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_u4be()
self._debug['number']['end'] = self._io.pos()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['measurement_value']['start'] = self._io.pos()
self.measurement_value = self._root.S15Fixed16Number(self._io, self, self._root)
self.measurement_value._read()
self._debug['measurement_value']['end'] = self._io.pos()
class U1Fixed15Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(2)
self._debug['number']['end'] = self._io.pos()
class TagTable(KaitaiStruct):
SEQ_FIELDS = ["tag_count", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_count']['start'] = self._io.pos()
self.tag_count = self._io.read_u4be()
self._debug['tag_count']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = [None] * (self.tag_count)
for i in range(self.tag_count):
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = self._root.TagTable.TagDefinition(self._io, self, self._root)
_t_tags._read()
self.tags[i] = _t_tags
self._debug['tags']['arr'][i]['end'] = self._io.pos()
self._debug['tags']['end'] = self._io.pos()
class TagDefinition(KaitaiStruct):
class TagSignatures(Enum):
a_to_b_0 = 1093812784
a_to_b_1 = 1093812785
a_to_b_2 = 1093812786
b_to_a_0 = 1110589744
b_to_a_1 = 1110589745
b_to_a_2 = 1110589746
b_to_d_0 = 1110590512
b_to_d_1 = 1110590513
b_to_d_2 = 1110590514
b_to_d_3 = 1110590515
d_to_b_0 = 1144144432
d_to_b_1 = 1144144433
d_to_b_2 = 1144144434
d_to_b_3 = 1144144435
blue_trc = 1649693251
blue_matrix_column = 1649957210
calibration_date_time = 1667329140
chromatic_adaptation = 1667785060
chromaticity = 1667789421
colorimetric_intent_image_state = 1667852659
colorant_table_out = 1668050804
colorant_order = 1668051567
colorant_table = 1668051572
copyright = 1668313716
profile_description = 1684370275
device_model_desc = 1684890724
device_mfg_desc = 1684893284
green_trc = 1733579331
green_matrix_column = 1733843290
gamut = 1734438260
gray_trc = 1800688195
luminance = 1819635049
measurement = 1835360627
named_color_2 = 1852009522
preview_0 = 1886545200
preview_1 = 1886545201
preview_2 = 1886545202
profile_sequence = 1886610801
profile_sequence_identifier = 1886611812
red_trc = 1918128707
red_matrix_column = 1918392666
output_response = 1919251312
perceptual_rendering_intent_gamut = 1919510320
saturation_rendering_intent_gamut = 1919510322
char_target = 1952543335
technology = 1952801640
viewing_conditions = 1986618743
viewing_cond_desc = 1987405156
media_white_point = 2004119668
class TagTypeSignatures(Enum):
xyz_type = 1482250784
colorant_table_type = 1668051572
curve_type = 1668641398
data_type = 1684108385
date_time_type = 1685350765
multi_function_a_to_b_table_type = 1832993312
multi_function_b_to_a_table_type = 1833058592
measurement_type = 1835360627
multi_function_table_with_one_byte_precision_type = 1835430961
multi_function_table_with_two_byte_precision_type = 1835430962
multi_localized_unicode_type = 1835824483
multi_process_elements_type = 1836082548
named_color_2_type = 1852009522
parametric_curve_type = 1885434465
profile_sequence_desc_type = 1886610801
profile_sequence_identifier_type = 1886611812
response_curve_set_16_type = 1919120178
s_15_fixed_16_array_type = 1936077618
signature_type = 1936287520
text_type = 1952807028
u_16_fixed_16_array_type = 1969632050
u_int_8_array_type = 1969827896
u_int_16_array_type = 1969828150
u_int_32_array_type = 1969828658
u_int_64_array_type = 1969829428
viewing_conditions_type = 1986618743
class MultiProcessElementsTypes(Enum):
bacs_element_type = 1648444243
clut_element_type = 1668052340
one_dimensional_curves_type = 1668641382
eacs_element_type = 1698775891
matrix_element_type = 1835103334
curve_set_element_table_type = 1835428980
formula_curve_segments_type = 1885434470
sampled_curve_segment_type = 1935764838
SEQ_FIELDS = ["tag_signature", "offset_to_data_element", "size_of_data_element"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_signature']['start'] = self._io.pos()
self.tag_signature = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagSignatures, self._io.read_u4be())
self._debug['tag_signature']['end'] = self._io.pos()
self._debug['offset_to_data_element']['start'] = self._io.pos()
self.offset_to_data_element = self._io.read_u4be()
self._debug['offset_to_data_element']['end'] = self._io.pos()
self._debug['size_of_data_element']['start'] = self._io.pos()
self.size_of_data_element = self._io.read_u4be()
self._debug['size_of_data_element']['end'] = self._io.pos()
class BlueMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DeviceMfgDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class NamedColor2Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "vendor_specific_flag", "count_of_named_colours", "number_of_device_coordinates_for_each_named_colour", "prefix_for_each_colour_name", "prefix_for_each_colour_name_padding", "suffix_for_each_colour_name", "suffix_for_each_colour_name_padding", "named_colour_definitions"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['vendor_specific_flag']['start'] = self._io.pos()
self.vendor_specific_flag = self._io.read_u4be()
self._debug['vendor_specific_flag']['end'] = self._io.pos()
self._debug['count_of_named_colours']['start'] = self._io.pos()
self.count_of_named_colours = self._io.read_u4be()
self._debug['count_of_named_colours']['end'] = self._io.pos()
self._debug['number_of_device_coordinates_for_each_named_colour']['start'] = self._io.pos()
self.number_of_device_coordinates_for_each_named_colour = self._io.read_u4be()
self._debug['number_of_device_coordinates_for_each_named_colour']['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name']['start'] = self._io.pos()
self.prefix_for_each_colour_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['prefix_for_each_colour_name']['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name_padding']['start'] = self._io.pos()
self.prefix_for_each_colour_name_padding = [None] * ((32 - len(self.prefix_for_each_colour_name)))
for i in range((32 - len(self.prefix_for_each_colour_name))):
if not 'arr' in self._debug['prefix_for_each_colour_name_padding']:
self._debug['prefix_for_each_colour_name_padding']['arr'] = []
self._debug['prefix_for_each_colour_name_padding']['arr'].append({'start': self._io.pos()})
self.prefix_for_each_colour_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['prefix_for_each_colour_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['prefix_for_each_colour_name_padding']['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name']['start'] = self._io.pos()
self.suffix_for_each_colour_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['suffix_for_each_colour_name']['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name_padding']['start'] = self._io.pos()
self.suffix_for_each_colour_name_padding = [None] * ((32 - len(self.suffix_for_each_colour_name)))
for i in range((32 - len(self.suffix_for_each_colour_name))):
if not 'arr' in self._debug['suffix_for_each_colour_name_padding']:
self._debug['suffix_for_each_colour_name_padding']['arr'] = []
self._debug['suffix_for_each_colour_name_padding']['arr'].append({'start': self._io.pos()})
self.suffix_for_each_colour_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['suffix_for_each_colour_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['suffix_for_each_colour_name_padding']['end'] = self._io.pos()
self._debug['named_colour_definitions']['start'] = self._io.pos()
self.named_colour_definitions = [None] * (self.count_of_named_colours)
for i in range(self.count_of_named_colours):
if not 'arr' in self._debug['named_colour_definitions']:
self._debug['named_colour_definitions']['arr'] = []
self._debug['named_colour_definitions']['arr'].append({'start': self._io.pos()})
_t_named_colour_definitions = self._root.TagTable.TagDefinition.NamedColor2Type.NamedColourDefinition(self._io, self, self._root)
_t_named_colour_definitions._read()
self.named_colour_definitions[i] = _t_named_colour_definitions
self._debug['named_colour_definitions']['arr'][i]['end'] = self._io.pos()
self._debug['named_colour_definitions']['end'] = self._io.pos()
class NamedColourDefinition(KaitaiStruct):
SEQ_FIELDS = ["root_name", "root_name_padding", "pcs_coordinates", "device_coordinates"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['root_name']['start'] = self._io.pos()
self.root_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['root_name']['end'] = self._io.pos()
self._debug['root_name_padding']['start'] = self._io.pos()
self.root_name_padding = [None] * ((32 - len(self.root_name)))
for i in range((32 - len(self.root_name))):
if not 'arr' in self._debug['root_name_padding']:
self._debug['root_name_padding']['arr'] = []
self._debug['root_name_padding']['arr'].append({'start': self._io.pos()})
self.root_name_padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['root_name_padding']['arr'][i]['end'] = self._io.pos()
self._debug['root_name_padding']['end'] = self._io.pos()
self._debug['pcs_coordinates']['start'] = self._io.pos()
self.pcs_coordinates = self._io.read_bytes(6)
self._debug['pcs_coordinates']['end'] = self._io.pos()
if self._parent.number_of_device_coordinates_for_each_named_colour > 0:
self._debug['device_coordinates']['start'] = self._io.pos()
self.device_coordinates = [None] * (self._parent.count_of_named_colours)
for i in range(self._parent.count_of_named_colours):
if not 'arr' in self._debug['device_coordinates']:
self._debug['device_coordinates']['arr'] = []
self._debug['device_coordinates']['arr'].append({'start': self._io.pos()})
self.device_coordinates[i] = self._io.read_u2be()
self._debug['device_coordinates']['arr'][i]['end'] = self._io.pos()
self._debug['device_coordinates']['end'] = self._io.pos()
class ViewingConditionsTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.viewing_conditions_type:
self.tag_data = self._root.TagTable.TagDefinition.ViewingConditionsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BlueTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ResponseCurveSet16Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_channels", "count_of_measurement_types", "response_curve_structure_offsets", "response_curve_structures"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_channels']['start'] = self._io.pos()
self.number_of_channels = self._io.read_u2be()
self._debug['number_of_channels']['end'] = self._io.pos()
self._debug['count_of_measurement_types']['start'] = self._io.pos()
self.count_of_measurement_types = self._io.read_u2be()
self._debug['count_of_measurement_types']['end'] = self._io.pos()
self._debug['response_curve_structure_offsets']['start'] = self._io.pos()
self.response_curve_structure_offsets = [None] * (self.count_of_measurement_types)
for i in range(self.count_of_measurement_types):
if not 'arr' in self._debug['response_curve_structure_offsets']:
self._debug['response_curve_structure_offsets']['arr'] = []
self._debug['response_curve_structure_offsets']['arr'].append({'start': self._io.pos()})
self.response_curve_structure_offsets[i] = self._io.read_u4be()
self._debug['response_curve_structure_offsets']['arr'][i]['end'] = self._io.pos()
self._debug['response_curve_structure_offsets']['end'] = self._io.pos()
self._debug['response_curve_structures']['start'] = self._io.pos()
self.response_curve_structures = self._io.read_bytes_full()
self._debug['response_curve_structures']['end'] = self._io.pos()
class CurveType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_entries", "curve_values", "curve_value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_entries']['start'] = self._io.pos()
self.number_of_entries = self._io.read_u4be()
self._debug['number_of_entries']['end'] = self._io.pos()
if self.number_of_entries > 1:
self._debug['curve_values']['start'] = self._io.pos()
self.curve_values = [None] * (self.number_of_entries)
for i in range(self.number_of_entries):
if not 'arr' in self._debug['curve_values']:
self._debug['curve_values']['arr'] = []
self._debug['curve_values']['arr'].append({'start': self._io.pos()})
self.curve_values[i] = self._io.read_u4be()
self._debug['curve_values']['arr'][i]['end'] = self._io.pos()
self._debug['curve_values']['end'] = self._io.pos()
if self.number_of_entries == 1:
self._debug['curve_value']['start'] = self._io.pos()
self.curve_value = self._io.read_u1()
self._debug['curve_value']['end'] = self._io.pos()
class SaturationRenderingIntentGamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class XyzType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.XyzNumber(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class Lut8Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_clut_grid_points", "padding", "encoded_e_parameters", "number_of_input_table_entries", "number_of_output_table_entries", "input_tables", "clut_values", "output_tables"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_clut_grid_points']['start'] = self._io.pos()
self.number_of_clut_grid_points = self._io.read_u1()
self._debug['number_of_clut_grid_points']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['encoded_e_parameters']['start'] = self._io.pos()
self.encoded_e_parameters = [None] * (9)
for i in range(9):
if not 'arr' in self._debug['encoded_e_parameters']:
self._debug['encoded_e_parameters']['arr'] = []
self._debug['encoded_e_parameters']['arr'].append({'start': self._io.pos()})
self.encoded_e_parameters[i] = self._io.read_s4be()
self._debug['encoded_e_parameters']['arr'][i]['end'] = self._io.pos()
self._debug['encoded_e_parameters']['end'] = self._io.pos()
self._debug['number_of_input_table_entries']['start'] = self._io.pos()
self.number_of_input_table_entries = self._io.read_u4be()
self._debug['number_of_input_table_entries']['end'] = self._io.pos()
self._debug['number_of_output_table_entries']['start'] = self._io.pos()
self.number_of_output_table_entries = self._io.read_u4be()
self._debug['number_of_output_table_entries']['end'] = self._io.pos()
self._debug['input_tables']['start'] = self._io.pos()
self.input_tables = self._io.read_bytes((256 * self.number_of_input_channels))
self._debug['input_tables']['end'] = self._io.pos()
self._debug['clut_values']['start'] = self._io.pos()
self.clut_values = self._io.read_bytes(((self.number_of_clut_grid_points ^ self.number_of_input_channels) * self.number_of_output_channels))
self._debug['clut_values']['end'] = self._io.pos()
self._debug['output_tables']['start'] = self._io.pos()
self.output_tables = self._io.read_bytes((256 * self.number_of_output_channels))
self._debug['output_tables']['end'] = self._io.pos()
class BToA2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class LutAToBType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "padding", "offset_to_first_b_curve", "offset_to_matrix", "offset_to_first_m_curve", "offset_to_clut", "offset_to_first_a_curve", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['offset_to_first_b_curve']['start'] = self._io.pos()
self.offset_to_first_b_curve = self._io.read_u4be()
self._debug['offset_to_first_b_curve']['end'] = self._io.pos()
self._debug['offset_to_matrix']['start'] = self._io.pos()
self.offset_to_matrix = self._io.read_u4be()
self._debug['offset_to_matrix']['end'] = self._io.pos()
self._debug['offset_to_first_m_curve']['start'] = self._io.pos()
self.offset_to_first_m_curve = self._io.read_u4be()
self._debug['offset_to_first_m_curve']['end'] = self._io.pos()
self._debug['offset_to_clut']['start'] = self._io.pos()
self.offset_to_clut = self._io.read_u4be()
self._debug['offset_to_clut']['end'] = self._io.pos()
self._debug['offset_to_first_a_curve']['start'] = self._io.pos()
self.offset_to_first_a_curve = self._io.read_u4be()
self._debug['offset_to_first_a_curve']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class BToA0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MediaWhitePointTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Lut16Type(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_clut_grid_points", "padding", "encoded_e_parameters", "number_of_input_table_entries", "number_of_output_table_entries", "input_tables", "clut_values", "output_tables"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_clut_grid_points']['start'] = self._io.pos()
self.number_of_clut_grid_points = self._io.read_u1()
self._debug['number_of_clut_grid_points']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['encoded_e_parameters']['start'] = self._io.pos()
self.encoded_e_parameters = [None] * (9)
for i in range(9):
if not 'arr' in self._debug['encoded_e_parameters']:
self._debug['encoded_e_parameters']['arr'] = []
self._debug['encoded_e_parameters']['arr'].append({'start': self._io.pos()})
self.encoded_e_parameters[i] = self._io.read_s4be()
self._debug['encoded_e_parameters']['arr'][i]['end'] = self._io.pos()
self._debug['encoded_e_parameters']['end'] = self._io.pos()
self._debug['number_of_input_table_entries']['start'] = self._io.pos()
self.number_of_input_table_entries = self._io.read_u4be()
self._debug['number_of_input_table_entries']['end'] = self._io.pos()
self._debug['number_of_output_table_entries']['start'] = self._io.pos()
self.number_of_output_table_entries = self._io.read_u4be()
self._debug['number_of_output_table_entries']['end'] = self._io.pos()
self._debug['input_tables']['start'] = self._io.pos()
self.input_tables = self._io.read_bytes(((2 * self.number_of_input_channels) * self.number_of_input_table_entries))
self._debug['input_tables']['end'] = self._io.pos()
self._debug['clut_values']['start'] = self._io.pos()
self.clut_values = self._io.read_bytes(((2 * (self.number_of_clut_grid_points ^ self.number_of_input_channels)) * self.number_of_output_channels))
self._debug['clut_values']['end'] = self._io.pos()
self._debug['output_tables']['start'] = self._io.pos()
self.output_tables = self._io.read_bytes(((2 * self.number_of_output_channels) * self.number_of_output_table_entries))
self._debug['output_tables']['end'] = self._io.pos()
class PerceptualRenderingIntentGamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class U16Fixed16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.U16Fixed16Number(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ColorantTableOutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_table_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantTableType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MeasurementTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.measurement_type:
self.tag_data = self._root.TagTable.TagDefinition.MeasurementType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileSequenceTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.profile_sequence_desc_type:
self.tag_data = self._root.TagTable.TagDefinition.ProfileSequenceDescType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class TechnologyTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class AToB0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DToB0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class OutputResponseTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.response_curve_set_16_type:
self.tag_data = self._root.TagTable.TagDefinition.ResponseCurveSet16Type(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class GreenMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileDescriptionTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class RedTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DToB1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToA1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ParametricCurveType(KaitaiStruct):
class ParametricCurveTypeFunctions(Enum):
y_equals_x_to_power_of_g = 0
cie_122_1996 = 1
iec_61966_3 = 2
iec_61966_2_1 = 3
y_equals_ob_ax_plus_b_cb_to_power_of_g_plus_c = 4
SEQ_FIELDS = ["reserved", "function_type", "reserved_2", "parameters"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['function_type']['start'] = self._io.pos()
self.function_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions, self._io.read_u2be())
self._debug['function_type']['end'] = self._io.pos()
self._debug['reserved_2']['start'] = self._io.pos()
self.reserved_2 = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['reserved_2']['end'] = self._io.pos()
self._debug['parameters']['start'] = self._io.pos()
_on = self.function_type
if _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.cie_122_1996:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsCie1221996(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.iec_61966_3:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsIec619663(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.iec_61966_2_1:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsIec6196621(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.y_equals_ob_ax_plus_b_cb_to_power_of_g_plus_c:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsYEqualsObAxPlusBCbToPowerOfGPlusC(self._io, self, self._root)
self.parameters._read()
elif _on == self._root.TagTable.TagDefinition.ParametricCurveType.ParametricCurveTypeFunctions.y_equals_x_to_power_of_g:
self.parameters = self._root.TagTable.TagDefinition.ParametricCurveType.ParamsYEqualsXToPowerOfG(self._io, self, self._root)
self.parameters._read()
self._debug['parameters']['end'] = self._io.pos()
class ParamsIec619663(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
class ParamsIec6196621(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c", "d"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
self._debug['d']['start'] = self._io.pos()
self.d = self._io.read_s4be()
self._debug['d']['end'] = self._io.pos()
class ParamsYEqualsXToPowerOfG(KaitaiStruct):
SEQ_FIELDS = ["g"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
class ParamsYEqualsObAxPlusBCbToPowerOfGPlusC(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b", "c", "d", "e", "f"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
self._debug['c']['start'] = self._io.pos()
self.c = self._io.read_s4be()
self._debug['c']['end'] = self._io.pos()
self._debug['d']['start'] = self._io.pos()
self.d = self._io.read_s4be()
self._debug['d']['end'] = self._io.pos()
self._debug['e']['start'] = self._io.pos()
self.e = self._io.read_s4be()
self._debug['e']['end'] = self._io.pos()
self._debug['f']['start'] = self._io.pos()
self.f = self._io.read_s4be()
self._debug['f']['end'] = self._io.pos()
class ParamsCie1221996(KaitaiStruct):
SEQ_FIELDS = ["g", "a", "b"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_s4be()
self._debug['g']['end'] = self._io.pos()
self._debug['a']['start'] = self._io.pos()
self.a = self._io.read_s4be()
self._debug['a']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_s4be()
self._debug['b']['end'] = self._io.pos()
class ChromaticityTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.chromaticity_type:
self.tag_data = self._root.TagTable.TagDefinition.ChromaticityType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ChromaticAdaptationTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.s_15_fixed_16_array_type:
self.tag_data = self._root.TagTable.TagDefinition.S15Fixed16ArrayType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MeasurementType(KaitaiStruct):
class StandardObserverEncodings(Enum):
unknown = 0
cie_1931_standard_colorimetric_observer = 1
cie_1964_standard_colorimetric_observer = 2
class MeasurementGeometryEncodings(Enum):
unknown = 0
zero_degrees_to_45_degrees_or_45_degrees_to_zero_degrees = 1
zero_degrees_to_d_degrees_or_d_degrees_to_zero_degrees = 2
class MeasurementFlareEncodings(Enum):
zero_percent = 0
one_hundred_percent = 65536
SEQ_FIELDS = ["reserved", "standard_observer_encoding", "nciexyz_tristimulus_values_for_measurement_backing", "measurement_geometry_encoding", "measurement_flare_encoding", "standard_illuminant_encoding"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['standard_observer_encoding']['start'] = self._io.pos()
self.standard_observer_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.StandardObserverEncodings, self._io.read_u4be())
self._debug['standard_observer_encoding']['end'] = self._io.pos()
self._debug['nciexyz_tristimulus_values_for_measurement_backing']['start'] = self._io.pos()
self.nciexyz_tristimulus_values_for_measurement_backing = self._root.XyzNumber(self._io, self, self._root)
self.nciexyz_tristimulus_values_for_measurement_backing._read()
self._debug['nciexyz_tristimulus_values_for_measurement_backing']['end'] = self._io.pos()
self._debug['measurement_geometry_encoding']['start'] = self._io.pos()
self.measurement_geometry_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.MeasurementGeometryEncodings, self._io.read_u4be())
self._debug['measurement_geometry_encoding']['end'] = self._io.pos()
self._debug['measurement_flare_encoding']['start'] = self._io.pos()
self.measurement_flare_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.MeasurementType.MeasurementFlareEncodings, self._io.read_u4be())
self._debug['measurement_flare_encoding']['end'] = self._io.pos()
self._debug['standard_illuminant_encoding']['start'] = self._io.pos()
self.standard_illuminant_encoding = self._root.StandardIlluminantEncoding(self._io, self, self._root)
self.standard_illuminant_encoding._read()
self._debug['standard_illuminant_encoding']['end'] = self._io.pos()
class TextType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['value']['start'] = self._io.pos()
self.value = (KaitaiStream.bytes_terminate(self._io.read_bytes_full(), 0, False)).decode(u"ASCII")
self._debug['value']['end'] = self._io.pos()
class ProfileSequenceIdentifierType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_structures", "positions_table", "profile_identifiers"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_structures']['start'] = self._io.pos()
self.number_of_structures = self._io.read_u4be()
self._debug['number_of_structures']['end'] = self._io.pos()
self._debug['positions_table']['start'] = self._io.pos()
self.positions_table = [None] * (self.number_of_structures)
for i in range(self.number_of_structures):
if not 'arr' in self._debug['positions_table']:
self._debug['positions_table']['arr'] = []
self._debug['positions_table']['arr'].append({'start': self._io.pos()})
_t_positions_table = self._root.PositionNumber(self._io, self, self._root)
_t_positions_table._read()
self.positions_table[i] = _t_positions_table
self._debug['positions_table']['arr'][i]['end'] = self._io.pos()
self._debug['positions_table']['end'] = self._io.pos()
self._debug['profile_identifiers']['start'] = self._io.pos()
self.profile_identifiers = [None] * (self.number_of_structures)
for i in range(self.number_of_structures):
if not 'arr' in self._debug['profile_identifiers']:
self._debug['profile_identifiers']['arr'] = []
self._debug['profile_identifiers']['arr'].append({'start': self._io.pos()})
_t_profile_identifiers = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierType.ProfileIdentifier(self._io, self, self._root)
_t_profile_identifiers._read()
self.profile_identifiers[i] = _t_profile_identifiers
self._debug['profile_identifiers']['arr'][i]['end'] = self._io.pos()
self._debug['profile_identifiers']['end'] = self._io.pos()
class ProfileIdentifier(KaitaiStruct):
SEQ_FIELDS = ["profile_id", "profile_description"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['profile_id']['start'] = self._io.pos()
self.profile_id = self._io.read_bytes(16)
self._debug['profile_id']['end'] = self._io.pos()
self._debug['profile_description']['start'] = self._io.pos()
self.profile_description = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.profile_description._read()
self._debug['profile_description']['end'] = self._io.pos()
class ColorantTableType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "count_of_colorants", "colorants"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['count_of_colorants']['start'] = self._io.pos()
self.count_of_colorants = self._io.read_u4be()
self._debug['count_of_colorants']['end'] = self._io.pos()
self._debug['colorants']['start'] = self._io.pos()
self.colorants = [None] * (self.count_of_colorants)
for i in range(self.count_of_colorants):
if not 'arr' in self._debug['colorants']:
self._debug['colorants']['arr'] = []
self._debug['colorants']['arr'].append({'start': self._io.pos()})
_t_colorants = self._root.TagTable.TagDefinition.ColorantTableType.Colorant(self._io, self, self._root)
_t_colorants._read()
self.colorants[i] = _t_colorants
self._debug['colorants']['arr'][i]['end'] = self._io.pos()
self._debug['colorants']['end'] = self._io.pos()
class Colorant(KaitaiStruct):
SEQ_FIELDS = ["name", "padding", "pcs_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['name']['start'] = self._io.pos()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['name']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = [None] * ((32 - len(self.name)))
for i in range((32 - len(self.name))):
if not 'arr' in self._debug['padding']:
self._debug['padding']['arr'] = []
self._debug['padding']['arr'].append({'start': self._io.pos()})
self.padding = self._io.ensure_fixed_contents(b"\x00")
self._debug['padding']['arr'][i]['end'] = self._io.pos()
self._debug['padding']['end'] = self._io.pos()
self._debug['pcs_values']['start'] = self._io.pos()
self.pcs_values = self._io.read_bytes(6)
self._debug['pcs_values']['end'] = self._io.pos()
class SignatureType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "signature"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['signature']['start'] = self._io.pos()
self.signature = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['signature']['end'] = self._io.pos()
class CopyrightTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview0Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DateTimeType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "date_and_time"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['date_and_time']['start'] = self._io.pos()
self.date_and_time = self._root.DateTimeNumber(self._io, self, self._root)
self.date_and_time._read()
self._debug['date_and_time']['end'] = self._io.pos()
class DToB3Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class Preview2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DeviceModelDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class MultiProcessElementsType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "number_of_processing_elements", "process_element_positions_table", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u2be()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u2be()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['number_of_processing_elements']['start'] = self._io.pos()
self.number_of_processing_elements = self._io.read_u4be()
self._debug['number_of_processing_elements']['end'] = self._io.pos()
self._debug['process_element_positions_table']['start'] = self._io.pos()
self.process_element_positions_table = [None] * (self.number_of_processing_elements)
for i in range(self.number_of_processing_elements):
if not 'arr' in self._debug['process_element_positions_table']:
self._debug['process_element_positions_table']['arr'] = []
self._debug['process_element_positions_table']['arr'].append({'start': self._io.pos()})
_t_process_element_positions_table = self._root.PositionNumber(self._io, self, self._root)
_t_process_element_positions_table._read()
self.process_element_positions_table[i] = _t_process_element_positions_table
self._debug['process_element_positions_table']['arr'][i]['end'] = self._io.pos()
self._debug['process_element_positions_table']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class UInt16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u2be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class ColorantOrderTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_order_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantOrderType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class DataType(KaitaiStruct):
class DataTypes(Enum):
ascii_data = 0
binary_data = 1
SEQ_FIELDS = ["data_flag"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data_flag']['start'] = self._io.pos()
self.data_flag = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.DataType.DataTypes, self._io.read_u4be())
self._debug['data_flag']['end'] = self._io.pos()
class ChromaticityType(KaitaiStruct):
class ColorantAndPhosphorEncodings(Enum):
unknown = 0
itu_r_bt_709_2 = 1
smpte_rp145 = 2
ebu_tech_3213_e = 3
p22 = 4
SEQ_FIELDS = ["reserved", "number_of_device_channels", "colorant_and_phosphor_encoding", "ciexy_coordinates_per_channel"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_device_channels']['start'] = self._io.pos()
self.number_of_device_channels = self._io.read_u2be()
self._debug['number_of_device_channels']['end'] = self._io.pos()
self._debug['colorant_and_phosphor_encoding']['start'] = self._io.pos()
self.colorant_and_phosphor_encoding = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.ChromaticityType.ColorantAndPhosphorEncodings, self._io.read_u2be())
self._debug['colorant_and_phosphor_encoding']['end'] = self._io.pos()
self._debug['ciexy_coordinates_per_channel']['start'] = self._io.pos()
self.ciexy_coordinates_per_channel = [None] * (self.number_of_device_channels)
for i in range(self.number_of_device_channels):
if not 'arr' in self._debug['ciexy_coordinates_per_channel']:
self._debug['ciexy_coordinates_per_channel']['arr'] = []
self._debug['ciexy_coordinates_per_channel']['arr'].append({'start': self._io.pos()})
_t_ciexy_coordinates_per_channel = self._root.TagTable.TagDefinition.ChromaticityType.CiexyCoordinateValues(self._io, self, self._root)
_t_ciexy_coordinates_per_channel._read()
self.ciexy_coordinates_per_channel[i] = _t_ciexy_coordinates_per_channel
self._debug['ciexy_coordinates_per_channel']['arr'][i]['end'] = self._io.pos()
self._debug['ciexy_coordinates_per_channel']['end'] = self._io.pos()
class CiexyCoordinateValues(KaitaiStruct):
SEQ_FIELDS = ["x_coordinate", "y_coordinate"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x_coordinate']['start'] = self._io.pos()
self.x_coordinate = self._io.read_u2be()
self._debug['x_coordinate']['end'] = self._io.pos()
self._debug['y_coordinate']['start'] = self._io.pos()
self.y_coordinate = self._io.read_u2be()
self._debug['y_coordinate']['end'] = self._io.pos()
class LuminanceTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class S15Fixed16ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
_t_values = self._root.S15Fixed16Number(self._io, self, self._root)
_t_values._read()
self.values.append(_t_values)
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class MultiLocalizedUnicodeType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_records", "record_size", "records"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_records']['start'] = self._io.pos()
self.number_of_records = self._io.read_u4be()
self._debug['number_of_records']['end'] = self._io.pos()
self._debug['record_size']['start'] = self._io.pos()
self.record_size = self._io.read_u4be()
self._debug['record_size']['end'] = self._io.pos()
self._debug['records']['start'] = self._io.pos()
self.records = [None] * (self.number_of_records)
for i in range(self.number_of_records):
if not 'arr' in self._debug['records']:
self._debug['records']['arr'] = []
self._debug['records']['arr'].append({'start': self._io.pos()})
_t_records = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType.Record(self._io, self, self._root)
_t_records._read()
self.records[i] = _t_records
self._debug['records']['arr'][i]['end'] = self._io.pos()
self._debug['records']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["language_code", "country_code", "string_length", "string_offset"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['language_code']['start'] = self._io.pos()
self.language_code = self._io.read_u2be()
self._debug['language_code']['end'] = self._io.pos()
self._debug['country_code']['start'] = self._io.pos()
self.country_code = self._io.read_u2be()
self._debug['country_code']['end'] = self._io.pos()
self._debug['string_length']['start'] = self._io.pos()
self.string_length = self._io.read_u4be()
self._debug['string_length']['end'] = self._io.pos()
self._debug['string_offset']['start'] = self._io.pos()
self.string_offset = self._io.read_u4be()
self._debug['string_offset']['end'] = self._io.pos()
@property
def string_data(self):
if hasattr(self, '_m_string_data'):
return self._m_string_data if hasattr(self, '_m_string_data') else None
_pos = self._io.pos()
self._io.seek(self.string_offset)
self._debug['_m_string_data']['start'] = self._io.pos()
self._m_string_data = (self._io.read_bytes(self.string_length)).decode(u"UTF-16BE")
self._debug['_m_string_data']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_string_data if hasattr(self, '_m_string_data') else None
class AToB2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class AToB1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_a_to_b_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutAToBType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorimetricIntentImageStateTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.signature_type:
self.tag_data = self._root.TagTable.TagDefinition.SignatureType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class CharTargetTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.text_type:
self.tag_data = self._root.TagTable.TagDefinition.TextType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorantTableTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.colorant_table_type:
self.tag_data = self._root.TagTable.TagDefinition.ColorantTableType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class CalibrationDateTimeTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.date_time_type:
self.tag_data = self._root.TagTable.TagDefinition.DateTimeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class NamedColor2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.named_color_2_type:
self.tag_data = self._root.TagTable.TagDefinition.NamedColor2Type(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ViewingCondDescTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_localized_unicode_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiLocalizedUnicodeType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD3Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ProfileSequenceDescType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_description_structures", "profile_descriptions"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_description_structures']['start'] = self._io.pos()
self.number_of_description_structures = self._io.read_u4be()
self._debug['number_of_description_structures']['end'] = self._io.pos()
self._debug['profile_descriptions']['start'] = self._io.pos()
self.profile_descriptions = [None] * (self.number_of_description_structures)
for i in range(self.number_of_description_structures):
if not 'arr' in self._debug['profile_descriptions']:
self._debug['profile_descriptions']['arr'] = []
self._debug['profile_descriptions']['arr'].append({'start': self._io.pos()})
_t_profile_descriptions = self._root.TagTable.TagDefinition.ProfileSequenceDescType.ProfileDescription(self._io, self, self._root)
_t_profile_descriptions._read()
self.profile_descriptions[i] = _t_profile_descriptions
self._debug['profile_descriptions']['arr'][i]['end'] = self._io.pos()
self._debug['profile_descriptions']['end'] = self._io.pos()
class ProfileDescription(KaitaiStruct):
SEQ_FIELDS = ["device_manufacturer", "device_model", "device_attributes", "device_technology", "description_of_device_manufacturer", "description_of_device_model"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = self._root.DeviceManufacturer(self._io, self, self._root)
self.device_manufacturer._read()
self._debug['device_manufacturer']['end'] = self._io.pos()
self._debug['device_model']['start'] = self._io.pos()
self.device_model = (self._io.read_bytes(4)).decode(u"ASCII")
self._debug['device_model']['end'] = self._io.pos()
self._debug['device_attributes']['start'] = self._io.pos()
self.device_attributes = self._root.DeviceAttributes(self._io, self, self._root)
self.device_attributes._read()
self._debug['device_attributes']['end'] = self._io.pos()
self._debug['device_technology']['start'] = self._io.pos()
self.device_technology = self._root.TagTable.TagDefinition.TechnologyTag(self._io, self, self._root)
self.device_technology._read()
self._debug['device_technology']['end'] = self._io.pos()
self._debug['description_of_device_manufacturer']['start'] = self._io.pos()
self.description_of_device_manufacturer = self._root.TagTable.TagDefinition.DeviceMfgDescTag(self._io, self, self._root)
self.description_of_device_manufacturer._read()
self._debug['description_of_device_manufacturer']['end'] = self._io.pos()
self._debug['description_of_device_model']['start'] = self._io.pos()
self.description_of_device_model = self._root.TagTable.TagDefinition.DeviceModelDescTag(self._io, self, self._root)
self.description_of_device_model._read()
self._debug['description_of_device_model']['end'] = self._io.pos()
class ProfileSequenceIdentifierTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.profile_sequence_identifier_type:
self.tag_data = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class BToD1Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ColorantOrderType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "count_of_colorants", "numbers_of_colorants_in_order_of_printing"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['count_of_colorants']['start'] = self._io.pos()
self.count_of_colorants = self._io.read_u4be()
self._debug['count_of_colorants']['end'] = self._io.pos()
self._debug['numbers_of_colorants_in_order_of_printing']['start'] = self._io.pos()
self.numbers_of_colorants_in_order_of_printing = [None] * (self.count_of_colorants)
for i in range(self.count_of_colorants):
if not 'arr' in self._debug['numbers_of_colorants_in_order_of_printing']:
self._debug['numbers_of_colorants_in_order_of_printing']['arr'] = []
self._debug['numbers_of_colorants_in_order_of_printing']['arr'].append({'start': self._io.pos()})
self.numbers_of_colorants_in_order_of_printing[i] = self._io.read_u1()
self._debug['numbers_of_colorants_in_order_of_printing']['arr'][i]['end'] = self._io.pos()
self._debug['numbers_of_colorants_in_order_of_printing']['end'] = self._io.pos()
class DToB2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class GrayTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class ViewingConditionsType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "un_normalized_ciexyz_values_for_illuminant", "un_normalized_ciexyz_values_for_surround", "illuminant_type"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['un_normalized_ciexyz_values_for_illuminant']['start'] = self._io.pos()
self.un_normalized_ciexyz_values_for_illuminant = self._root.XyzNumber(self._io, self, self._root)
self.un_normalized_ciexyz_values_for_illuminant._read()
self._debug['un_normalized_ciexyz_values_for_illuminant']['end'] = self._io.pos()
self._debug['un_normalized_ciexyz_values_for_surround']['start'] = self._io.pos()
self.un_normalized_ciexyz_values_for_surround = self._root.XyzNumber(self._io, self, self._root)
self.un_normalized_ciexyz_values_for_surround._read()
self._debug['un_normalized_ciexyz_values_for_surround']['end'] = self._io.pos()
self._debug['illuminant_type']['start'] = self._io.pos()
self.illuminant_type = self._root.StandardIlluminantEncoding(self._io, self, self._root)
self.illuminant_type._read()
self._debug['illuminant_type']['end'] = self._io.pos()
class LutBToAType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "number_of_input_channels", "number_of_output_channels", "padding", "offset_to_first_b_curve", "offset_to_matrix", "offset_to_first_m_curve", "offset_to_clut", "offset_to_first_a_curve", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['number_of_input_channels']['start'] = self._io.pos()
self.number_of_input_channels = self._io.read_u1()
self._debug['number_of_input_channels']['end'] = self._io.pos()
self._debug['number_of_output_channels']['start'] = self._io.pos()
self.number_of_output_channels = self._io.read_u1()
self._debug['number_of_output_channels']['end'] = self._io.pos()
self._debug['padding']['start'] = self._io.pos()
self.padding = self._io.ensure_fixed_contents(b"\x00\x00")
self._debug['padding']['end'] = self._io.pos()
self._debug['offset_to_first_b_curve']['start'] = self._io.pos()
self.offset_to_first_b_curve = self._io.read_u4be()
self._debug['offset_to_first_b_curve']['end'] = self._io.pos()
self._debug['offset_to_matrix']['start'] = self._io.pos()
self.offset_to_matrix = self._io.read_u4be()
self._debug['offset_to_matrix']['end'] = self._io.pos()
self._debug['offset_to_first_m_curve']['start'] = self._io.pos()
self.offset_to_first_m_curve = self._io.read_u4be()
self._debug['offset_to_first_m_curve']['end'] = self._io.pos()
self._debug['offset_to_clut']['start'] = self._io.pos()
self.offset_to_clut = self._io.read_u4be()
self._debug['offset_to_clut']['end'] = self._io.pos()
self._debug['offset_to_first_a_curve']['start'] = self._io.pos()
self.offset_to_first_a_curve = self._io.read_u4be()
self._debug['offset_to_first_a_curve']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
class GreenTrcTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.curve_type:
self.tag_data = self._root.TagTable.TagDefinition.CurveType(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.parametric_curve_type:
self.tag_data = self._root.TagTable.TagDefinition.ParametricCurveType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt32ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u4be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class GamutTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_one_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut8Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_table_with_two_byte_precision_type:
self.tag_data = self._root.TagTable.TagDefinition.Lut16Type(self._io, self, self._root)
self.tag_data._read()
elif _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_function_b_to_a_table_type:
self.tag_data = self._root.TagTable.TagDefinition.LutBToAType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt8ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u1())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class RedMatrixColumnTag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.xyz_type:
self.tag_data = self._root.TagTable.TagDefinition.XyzType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
class UInt64ArrayType(KaitaiStruct):
SEQ_FIELDS = ["reserved", "values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['reserved']['end'] = self._io.pos()
self._debug['values']['start'] = self._io.pos()
self.values = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['values']:
self._debug['values']['arr'] = []
self._debug['values']['arr'].append({'start': self._io.pos()})
self.values.append(self._io.read_u8be())
self._debug['values']['arr'][len(self.values) - 1]['end'] = self._io.pos()
i += 1
self._debug['values']['end'] = self._io.pos()
class BToD2Tag(KaitaiStruct):
SEQ_FIELDS = ["tag_type", "tag_data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_type']['start'] = self._io.pos()
self.tag_type = KaitaiStream.resolve_enum(self._root.TagTable.TagDefinition.TagTypeSignatures, self._io.read_u4be())
self._debug['tag_type']['end'] = self._io.pos()
self._debug['tag_data']['start'] = self._io.pos()
_on = self.tag_type
if _on == self._root.TagTable.TagDefinition.TagTypeSignatures.multi_process_elements_type:
self.tag_data = self._root.TagTable.TagDefinition.MultiProcessElementsType(self._io, self, self._root)
self.tag_data._read()
self._debug['tag_data']['end'] = self._io.pos()
@property
def tag_data_element(self):
if hasattr(self, '_m_tag_data_element'):
return self._m_tag_data_element if hasattr(self, '_m_tag_data_element') else None
_pos = self._io.pos()
self._io.seek(self.offset_to_data_element)
self._debug['_m_tag_data_element']['start'] = self._io.pos()
_on = self.tag_signature
if _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_order:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantOrderTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.media_white_point:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.MediaWhitePointTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_3:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD3Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorimetric_intent_image_state:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorimetricIntentImageStateTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.viewing_cond_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ViewingCondDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.device_model_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DeviceModelDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.chromaticity:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ChromaticityTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.saturation_rendering_intent_gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.SaturationRenderingIntentGamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.green_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GreenMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.copyright:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CopyrightTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.blue_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BlueMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.chromatic_adaptation:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ChromaticAdaptationTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.output_response:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.OutputResponseTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_sequence:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileSequenceTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.char_target:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CharTargetTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.red_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.RedTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.device_mfg_desc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DeviceMfgDescTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.measurement:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.MeasurementTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.green_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GreenTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_3:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB3Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_table:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantTableTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_description:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileDescriptionTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.profile_sequence_identifier:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ProfileSequenceIdentifierTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.gray_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.GrayTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.perceptual_rendering_intent_gamut:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.PerceptualRenderingIntentGamutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.blue_trc:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BlueTrcTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.d_to_b_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.DToB0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.calibration_date_time:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.CalibrationDateTimeTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.colorant_table_out:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ColorantTableOutTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.red_matrix_column:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.RedMatrixColumnTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.preview_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.Preview2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.a_to_b_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.AToB0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.luminance:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.LuminanceTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.named_color_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.NamedColor2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_2:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD2Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_0:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD0Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_a_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToA1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.b_to_d_1:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.BToD1Tag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.viewing_conditions:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.ViewingConditionsTag(io, self, self._root)
self._m_tag_data_element._read()
elif _on == self._root.TagTable.TagDefinition.TagSignatures.technology:
self._raw__m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
io = KaitaiStream(BytesIO(self._raw__m_tag_data_element))
self._m_tag_data_element = self._root.TagTable.TagDefinition.TechnologyTag(io, self, self._root)
self._m_tag_data_element._read()
else:
self._m_tag_data_element = self._io.read_bytes(self.size_of_data_element)
self._debug['_m_tag_data_element']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_tag_data_element if hasattr(self, '_m_tag_data_element') else None
class DeviceAttributes(KaitaiStruct):
class DeviceAttributesReflectiveOrTransparency(Enum):
reflective = 0
transparency = 1
class DeviceAttributesGlossyOrMatte(Enum):
glossy = 0
matte = 1
class DeviceAttributesPositiveOrNegativeMediaPolarity(Enum):
positive_media_polarity = 0
negative_media_polarity = 1
class DeviceAttributesColourOrBlackAndWhiteMedia(Enum):
colour_media = 0
black_and_white_media = 1
SEQ_FIELDS = ["reflective_or_transparency", "glossy_or_matte", "positive_or_negative_media_polarity", "colour_or_black_and_white_media", "reserved", "vendor_specific"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reflective_or_transparency']['start'] = self._io.pos()
self.reflective_or_transparency = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesReflectiveOrTransparency, self._io.read_bits_int(1))
self._debug['reflective_or_transparency']['end'] = self._io.pos()
self._debug['glossy_or_matte']['start'] = self._io.pos()
self.glossy_or_matte = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesGlossyOrMatte, self._io.read_bits_int(1))
self._debug['glossy_or_matte']['end'] = self._io.pos()
self._debug['positive_or_negative_media_polarity']['start'] = self._io.pos()
self.positive_or_negative_media_polarity = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesPositiveOrNegativeMediaPolarity, self._io.read_bits_int(1))
self._debug['positive_or_negative_media_polarity']['end'] = self._io.pos()
self._debug['colour_or_black_and_white_media']['start'] = self._io.pos()
self.colour_or_black_and_white_media = KaitaiStream.resolve_enum(self._root.DeviceAttributes.DeviceAttributesColourOrBlackAndWhiteMedia, self._io.read_bits_int(1))
self._debug['colour_or_black_and_white_media']['end'] = self._io.pos()
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.read_bits_int(28)
self._debug['reserved']['end'] = self._io.pos()
self._debug['vendor_specific']['start'] = self._io.pos()
self.vendor_specific = self._io.read_bits_int(32)
self._debug['vendor_specific']['end'] = self._io.pos()
class DeviceManufacturer(KaitaiStruct):
class DeviceManufacturers(Enum):
erdt_systems_gmbh_and_co_kg = 878981744
aamazing_technologies_inc = 1094798657
acer_peripherals = 1094927698
acolyte_color_research = 1094929492
actix_sytems_inc = 1094931529
adara_technology_inc = 1094992210
adobe_systems_incorporated = 1094992453
adi_systems_inc = 1094994208
agfa_graphics_nv = 1095190081
alps_electric_usa_inc = 1095519556
alps_electric_usa_inc_2 = 1095520339
alwan_color_expertise = 1095522126
amiable_technologies_inc = 1095586889
aoc_international_usa_ltd = 1095713568
apago = 1095778631
apple_computer_inc = 1095782476
ast = 1095980064
atandt_computer_systems = 1096033876
barbieri_electronic = 1111573836
barco_nv = 1112687439
breakpoint_pty_limited = 1112689488
brother_industries_ltd = 1112690516
bull = 1112886348
bus_computer_systems = 1112888096
c_itoh = 1127041364
intel_corporation = 1128353106
canon_inc_canon_development_americas_inc = 1128353359
carroll_touch = 1128354386
casio_computer_co_ltd = 1128354633
colorbus_pl = 1128420691
crossfield = 1128614944
crossfield_2 = 1128615032
cgs_publishing_technologies_international_gmbh = 1128747808
rochester_robotics = 1128811808
colour_imaging_group_london = 1128875852
citizen = 1128879177
candela_ltd = 1129066544
color_iq = 1129072977
chromaco_inc = 1129136975
chromix = 1129146712
colorgraphic_communications_corporation = 1129270351
compaq_computer_corporation = 1129270608
compeq_usa_focus_technology = 1129270640
conrac_display_products = 1129270866
cordata_technologies_inc = 1129271876
compaq_computer_corporation_2 = 1129337120
colorpro = 1129337423
cornerstone = 1129467424
ctx_international_inc = 1129601056
colorvision = 1129728339
fujitsu_laboratories_ltd = 1129792288
darius_technology_ltd = 1145131593
dataproducts = 1145132097
dry_creek_photo = 1145262112
digital_contents_resource_center_chung_ang_university = 1145262659
dell_computer_corporation = 1145392204
dainippon_ink_and_chemicals = 1145652000
diconix = 1145652047
digital = 1145653065
digital_light_and_color = 1145841219
doppelganger_llc = 1146113095
dainippon_screen = 1146298400
doosol = 1146310476
dupont = 1146441806
epson = 1162892111
esko_graphics = 1163086671
electronics_and_telecommunications_research_institute = 1163153993
everex_systems_inc = 1163281746
exactcode_gmbh = 1163411779
eizo_nanao_corporation = 1164540527
falco_data_products_inc = 1178684483
fuji_photo_film_coltd = 1179000864
fujifilm_electronic_imaging_ltd = 1179010377
fnord_software = 1179537988
fora_inc = 1179603521
forefront_technology_corporation = 1179603525
fujitsu = 1179658794
waytech_development_inc = 1179664672
fujitsu_2 = 1179994697
fuji_xerox_co_ltd = 1180180512
gcc_technologies_inc = 1195590432
global_graphics_software_limited = 1195856716
gretagmacbeth = 1196245536
gmg_gmbh_and_co_kg = 1196246816
goldstar_technology_inc = 1196379204
giantprint_pty_ltd = 1196446292
gretagmacbeth_2 = 1196707138
waytech_development_inc_2 = 1196835616
sony_corporation = 1196896843
hci = 1212369184
heidelberger_druckmaschinen_ag = 1212435744
hermes = 1212502605
hitachi_america_ltd = 1212765249
hewlett_packard = 1213210656
hitachi_ltd = 1213481760
hiti_digital_inc = 1214862441
ibm_corporation = 1229081888
scitex_corporation_ltd = 1229213268
hewlett_packard_2 = 1229275936
iiyama_north_america_inc = 1229543745
ikegami_electronics_inc = 1229669703
image_systems_corporation = 1229799751
ingram_micro_inc = 1229801760
intel_corporation_2 = 1229870147
intl = 1229870156
intra_electronics_usa_inc = 1229870162
iocomm_international_technology_corporation = 1229931343
infoprint_solutions_company = 1230000928
scitex_corporation_ltd_3 = 1230129491
ichikawa_soft_laboratory = 1230195744
itnl = 1230261836
ivm = 1230392608
iwatsu_electric_co_ltd = 1230455124
scitex_corporation_ltd_2 = 1231318644
inca_digital_printers_ltd = 1231971169
scitex_corporation_ltd_4 = 1232234867
jetsoft_development = 1246971476
jvc_information_products_co = 1247167264
scitex_corporation_ltd_6 = 1262572116
kfc_computek_components_corporation = 1262895904
klh_computers = 1263290400
konica_minolta_holdings_inc = 1263355972
konica_corporation = 1263420225
kodak = 1263486017
kyocera = 1264144195
scitex_corporation_ltd_7 = 1264677492
leica_camera_ag = 1279476039
leeds_colour = 1279476548
left_dakota = 1279541579
leading_technology_inc = 1279607108
lexmark_international_inc = 1279613005
link_computer_inc = 1279872587
linotronic = 1279872591
lite_on_inc = 1279874117
mag_computronic_usa_inc = 1296123715
mag_innovision_inc = 1296123721
mannesmann = 1296125518
micron_technology_inc = 1296646990
microtek = 1296646994
microvitec_inc = 1296646998
minolta = 1296649807
mitsubishi_electronics_america_inc = 1296651347
mitsuba_corporation = 1296651379
minolta_2 = 1296976980
modgraph_inc = 1297040455
monitronix_inc = 1297043017
monaco_systems_inc = 1297043027
morse_technology_inc = 1297044051
motive_systems = 1297044553
microsoft_corporation = 1297303124
mutoh_industries_ltd = 1297437775
mitsubishi_electric_corporation_kyoto_works = 1298756723
nanao_usa_corporation = 1312902721
nec_corporation = 1313162016
nexpress_solutions_llc = 1313167440
nissei_sangyo_america_ltd = 1313428307
nikon_corporation = 1313558350
oce_technologies_bv = 1329808672
ocecolor = 1329808707
oki = 1330333984
okidata = 1330334020
okidata_2 = 1330334032
olivetti = 1330399574
olympus_optical_co_ltd = 1330403661
onyx_graphics = 1330534744
optiquest = 1330664521
packard_bell = 1346454347
matsushita_electric_industrial_co_ltd = 1346457153
pantone_inc = 1346457172
packard_bell_2 = 1346522656
pfu_limited = 1346786592
philips_consumer_electronics_co = 1346914636
hoya_corporation_pentax_imaging_systems_division = 1347310680
phase_one_a_s = 1347382885
premier_computer_innovations = 1347568973
princeton_graphic_systems = 1347569998
princeton_publishing_labs = 1347570000
qlux = 1363957080
qms_inc = 1364022048
qpcard_ab = 1364214596
quadlaser = 1364541764
qume_corporation = 1364544837
radius_inc = 1380009033
integrated_color_solutions_inc_2 = 1380205688
roland_dg_corporation = 1380206368
redms_group_inc = 1380271181
relisys = 1380273225
rolf_gierling_multitools = 1380404563
ricoh_corporation = 1380533071
edmund_ronald = 1380863044
royal = 1380931905
ricoh_printing_systemsltd = 1380991776
royal_information_electronics_co_ltd = 1381256224
sampo_corporation_of_america = 1396788560
samsung_inc = 1396788563
jaime_santana_pomares = 1396788820
scitex_corporation_ltd_9 = 1396918612
dainippon_screen_3 = 1396920910
scitex_corporation_ltd_12 = 1396985888
samsung_electronics_coltd = 1397048096
seiko_instruments_usa_inc = 1397049675
seikosha = 1397049707
scanguycom = 1397183833
sharp_laboratories = 1397244242
international_color_consortium = 1397310275
sony_corporation_2 = 1397706329
spectracal = 1397769036
star = 1398030674
sampo_technology_corporation = 1398031136
scitex_corporation_ltd_10 = 1399023988
scitex_corporation_ltd_13 = 1399091232
sony_corporation_3 = 1399811705
talon_technology_corporation = 1413565519
tandy = 1413566020
tatung_co_of_america_inc = 1413567573
taxan_america_inc = 1413568577
tokyo_denshi_sekei_kk = 1413763872
teco_information_systems_inc = 1413825359
tegra = 1413826386
tektronix_inc = 1413827412
texas_instruments = 1414078496
typemaker_ltd = 1414351698
toshiba_corp = 1414484802
toshiba_inc = 1414484808
totoku_electric_co_ltd = 1414485067
triumph = 1414678869
toshiba_tec_corporation = 1414742612
ttx_computer_products_inc = 1414813728
tvm_professional_monitor_corporation = 1414941984
tw_casper_corporation = 1414996000
ulead_systems = 1431065432
unisys = 1431193939
utz_fehlau_and_sohn = 1431591494
varityper = 1447121481
viewsonic = 1447642455
visual_communication = 1447646028
wang = 1463897671
wilbur_imaging = 1464615506
ware_to_go = 1465141042
wyse_technology = 1465471813
xerox_corporation = 1480938072
x_rite = 1481787732
lavanyas_test_company = 1513173555
zoran_corporation = 1515340110
zebra_technologies_inc = 1516593778
basiccolor_gmbh = 1648968515
bergdesign_incorporated = 1650815591
integrated_color_solutions_inc = 1667594596
macdermid_colorspan_inc = 1668051824
dainippon_screen_2 = 1685266464
dupont_2 = 1685418094
fujifilm_electronic_imaging_ltd_2 = 1717986665
fluxdata_corporation = 1718383992
scitex_corporation_ltd_5 = 1769105779
scitex_corporation_ltd_8 = 1801548404
erdt_systems_gmbh_and_co_kg_2 = 1868706916
medigraph_gmbh = 1868720483
qubyx_sarl = 1903518329
scitex_corporation_ltd_11 = 1935894900
dainippon_screen_4 = 1935897198
scitex_corporation_ltd_14 = 1935962144
siwi_grafika_corporation = 1936291689
yxymaster_gmbh = 2037938541
SEQ_FIELDS = ["device_manufacturer"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['device_manufacturer']['start'] = self._io.pos()
self.device_manufacturer = KaitaiStream.resolve_enum(self._root.DeviceManufacturer.DeviceManufacturers, self._io.read_u4be())
self._debug['device_manufacturer']['end'] = self._io.pos()
class S15Fixed16Number(KaitaiStruct):
SEQ_FIELDS = ["number"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['number']['start'] = self._io.pos()
self.number = self._io.read_bytes(4)
self._debug['number']['end'] = self._io.pos()
class PositionNumber(KaitaiStruct):
SEQ_FIELDS = ["offset_to_data_element", "size_of_data_element"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['offset_to_data_element']['start'] = self._io.pos()
self.offset_to_data_element = self._io.read_u4be()
self._debug['offset_to_data_element']['end'] = self._io.pos()
self._debug['size_of_data_element']['start'] = self._io.pos()
self.size_of_data_element = self._io.read_u4be()
self._debug['size_of_data_element']['end'] = self._io.pos()
| 59.437389
| 338
| 0.568151
| 21,327
| 200,304
| 4.889108
| 0.058424
| 0.075784
| 0.060075
| 0.064582
| 0.817742
| 0.788175
| 0.748576
| 0.710693
| 0.683495
| 0.665695
| 0
| 0.036423
| 0.322065
| 200,304
| 3,369
| 339
| 59.455031
| 0.731437
| 0.000479
| 0
| 0.591122
| 1
| 0
| 0.087606
| 0.028976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070761
| false
| 0
| 0.001335
| 0
| 0.115487
| 0.004339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
45d9f20c37e133fdde36fa4f1ffbcbd4ce272eee
| 9,623
|
py
|
Python
|
src/marion/marion/tests/test_views.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | 7
|
2021-04-06T20:33:31.000Z
|
2021-09-30T23:29:24.000Z
|
src/marion/marion/tests/test_views.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | 23
|
2020-09-09T15:01:50.000Z
|
2022-01-03T08:58:36.000Z
|
src/marion/marion/tests/test_views.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | 2
|
2020-12-14T10:07:07.000Z
|
2021-06-29T00:20:43.000Z
|
"""Tests for the marion application views"""
import json
import tempfile
from pathlib import Path
from django.urls import reverse
import pytest
from pytest_django import asserts as django_assertions
from rest_framework import exceptions as drf_exceptions
from rest_framework import status
from rest_framework.test import APIClient
from marion import defaults, models
from marion.issuers import DummyDocument
client = APIClient()
def count_documents(root):
"""Return the number of generated PDF files in the root directory"""
return len(list(root.glob("*.pdf")))
@pytest.mark.django_db
def test_document_request_viewset_post(monkeypatch):
"""Test the DocumentRequestViewSet create view"""
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Request payload required parameters
data = {}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert isinstance(response.data.get("context_query")[0], drf_exceptions.ErrorDetail)
assert response.data.get("context_query")[0].code == "required"
assert isinstance(response.data.get("issuer")[0], drf_exceptions.ErrorDetail)
assert response.data.get("issuer")[0].code == "required"
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Invalid issuer
data = {
"issuer": "marion.issuers.DumberDocument",
"context_query": json.dumps({"fullname": "Richie Cunningham"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data.get("issuer")[0].code == "invalid_choice"
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Perform standard request
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "Richie Cunningham"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_201_CREATED
assert models.DocumentRequest.objects.count() == 1
assert (
models.DocumentRequest.objects.get().context.get("fullname")
== "Richie Cunningham"
)
assert count_documents(defaults.DOCUMENTS_ROOT) == 1
@pytest.mark.django_db
def test_document_request_viewset_post_context_query_pydantic_model_validation(
monkeypatch,
):
"""Test the DocumentRequestViewSet create view context_query pydantic model
validation.
"""
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Refuse extra fields in context query
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "Richie Cunningham", "friends": 2}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "extra fields not permitted" in str(response.data.get("error"))
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input types checking
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": None}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "none is not an allowed value" in str(response.data.get("error"))
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input contraints checking (short fullname)
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "D"}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at least 2 characters" in str(
response.data.get("error")
)
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Input contraints checking (too long fullname)
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "F" * 256}),
}
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at most 255 characters" in str(
response.data.get("error")
)
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
@pytest.mark.django_db
def test_document_request_viewset_post_context_pydantic_model_validation(
monkeypatch,
):
"""Test the DocumentRequestViewSet create view context pydantic model
validation.
"""
# pylint: disable=unused-argument,function-redefined
monkeypatch.setattr(defaults, "DOCUMENTS_ROOT", Path(tempfile.mkdtemp()))
url = reverse("documentrequest-list")
data = {
"issuer": "marion.issuers.DummyDocument",
"context_query": json.dumps({"fullname": "Richie Cunningham"}),
}
# Refuse extra fields in context
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {
"fullname": "Richie Cunningham",
"identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1",
"friends": 2,
}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "extra fields not permitted" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Types checking
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": None, "identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "none is not an allowed value" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Missing identifier
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": "Richie Cunningham"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "identifier\n field required" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Constraints checking (short fullname)
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {"fullname": "D", "identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1"}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at least 2 characters" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
# Constraints checking (too long fullname)
def mock_fetch_context(*args, **kwargs):
"""A mock that returns invalid context"""
return {
"fullname": "F" * 256,
"identifier": "0a1c3ccf-c67d-4071-ab1f-3b27628db9b1",
}
monkeypatch.setattr(DummyDocument, "fetch_context", mock_fetch_context)
response = client.post(url, data, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "ensure this value has at most 255 characters" in response.data.get("error")
assert models.DocumentRequest.objects.count() == 0
assert count_documents(defaults.DOCUMENTS_ROOT) == 0
def test_document_template_debug_view_is_only_active_in_debug_mode(settings):
"""Test if the document_template_debug view is active when not in debug mode"""
settings.DEBUG = False
url = reverse("documents-template-debug")
response = client.get(url)
assert response.status_code == 403
def test_document_template_debug_view(settings):
"""Test the document_template_debug view"""
settings.DEBUG = True
settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (
"marion.default.DocumentIssuerChoices"
)
url = reverse("documents-template-debug")
response = client.get(url)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert b"You should provide an issuer." in response.content
response = client.get(url, {"issuer": "foo.bar.baz"})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert b"Unknown issuer foo.bar.baz" in response.content
response = client.get(url, {"issuer": "marion.issuers.DummyDocument"})
assert response.status_code == 200
# pylint: disable=no-member
django_assertions.assertContains(response, "<h1>Dummy document</h1>")
| 37.589844
| 88
| 0.70903
| 1,146
| 9,623
| 5.796684
| 0.156195
| 0.040042
| 0.053741
| 0.057805
| 0.815294
| 0.78308
| 0.742436
| 0.736866
| 0.710071
| 0.710071
| 0
| 0.021322
| 0.176348
| 9,623
| 255
| 89
| 37.737255
| 0.816805
| 0.108698
| 0
| 0.566474
| 0
| 0
| 0.190617
| 0.053401
| 0
| 0
| 0
| 0
| 0.352601
| 1
| 0.063584
| false
| 0
| 0.063584
| 0
| 0.16185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
afdb8993a89b7a69ee6383729d8cc25851f08233
| 28
|
py
|
Python
|
services/__init__.py
|
S4CH/discord-bot
|
9cea7e62da17199b1e8d8bb4c67f62e71fbc4539
|
[
"Apache-2.0"
] | 1
|
2021-02-18T03:39:15.000Z
|
2021-02-18T03:39:15.000Z
|
services/__init__.py
|
S4CH/discord-bot
|
9cea7e62da17199b1e8d8bb4c67f62e71fbc4539
|
[
"Apache-2.0"
] | null | null | null |
services/__init__.py
|
S4CH/discord-bot
|
9cea7e62da17199b1e8d8bb4c67f62e71fbc4539
|
[
"Apache-2.0"
] | null | null | null |
from .group import GroupMeet
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aff4ae7bfda34662882c07b022867d50e133a037
| 44
|
py
|
Python
|
creational/monostate/logic/__init__.py
|
Kozak24/Patterns
|
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
|
[
"MIT"
] | null | null | null |
creational/monostate/logic/__init__.py
|
Kozak24/Patterns
|
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
|
[
"MIT"
] | null | null | null |
creational/monostate/logic/__init__.py
|
Kozak24/Patterns
|
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
|
[
"MIT"
] | null | null | null |
from .character import Character, Archetype
| 22
| 43
| 0.840909
| 5
| 44
| 7.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 1
| 44
| 44
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2fef1c29a49d52b89d4976195a425ff3ee623553
| 2,940
|
py
|
Python
|
dragonite/constants.py
|
neuroticnerd/dragoncon-bot
|
44c4d96743cf11ea0e8eaa567100e42afa4de565
|
[
"Apache-2.0"
] | 2
|
2015-12-18T05:28:02.000Z
|
2018-05-24T04:18:26.000Z
|
dragonite/constants.py
|
neuroticnerd/dragoncon-bot
|
44c4d96743cf11ea0e8eaa567100e42afa4de565
|
[
"Apache-2.0"
] | 11
|
2016-08-27T22:05:18.000Z
|
2021-12-13T19:41:44.000Z
|
dragonite/constants.py
|
neuroticnerd/dragoncon-bot
|
44c4d96743cf11ea0e8eaa567100e42afa4de565
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from __future__ import division, print_function
DRAGONITE_ASCII = (
" ..\n"
" ''\n"
" .;' .. .';.\n"
" .,' .,''`..';;;;,,;,',;;,.\n"
" ''. ...... ....... ':''.\n"
" ':;;;;,.....,:. '. ..\n"
" .. . .\n"
" ., ;:.. .. '.'...\n"
" '. ;,'' . ' ;...,,.\n"
" ' .. ..'.' .','.\n"
" '. ''. . .'' '. .','\n"
" .,, . ',, ' .,' .. ' .',.\n"
" ,.'... ','.'''.....';'.'. .' .:'. . ' .,.\n"
" :,;.. .''. .''. .. ::;,'..':. ,..,:'. .. ' '..\n"
" ; ..''.....:. ... ' ,;. .. ' ''\n"
" ' ,.........' .. .'.... ' .... ...'..\n"
" .. .;.........,. ,. ..',.. .,..\n"
" '....__....'. ' . .. .;\n"
" .;............, .'' .. '.\n"
" '. ' '. ...\n"
" ,'.............., .' .. .'\n" # NOQA
" ' ' , . .. |\n" # NOQA
" '. .. ' , .' '\n" # NOQA
" .';...... ......., ..'.,.,.' ... '\n" # NOQA
" '. ' ```` .. .. ,. __.. ..\n" # NOQA
" . ,........ .....,... '. __..... '.\n" # NOQA
" ' .. '''''` '. ,..... ',\n"
" .' ',... ..., ' .,;.\n"
" ' .. ........... .' . .';:.\n"
" .. .;,.... ...., ' ..',:'.\n"
" '.. .... ....... '. .. ...'':''.\n"
" ... ..,'............'. .:,.;. ,'..\n"
" ..',. .''............';. .:;.''..\n"
" .'... .. . .......''... '.\n"
" ':',..' . .. .. '\n"
" ,..,;,:;.''.... ' ..\n"
" ' ..\n"
" .............'\n"
" .'. ., .c. '\n"
" ...'.'....\n"
" .\n"
)
| 61.25
| 90
| 0.065306
| 66
| 2,940
| 2.636364
| 0.257576
| 0.37931
| 0.517241
| 0.62069
| 0.356322
| 0.356322
| 0.356322
| 0.356322
| 0.356322
| 0.356322
| 0
| 0.000939
| 0.637755
| 2,940
| 47
| 91
| 62.553191
| 0.162441
| 0.018027
| 0
| 0
| 0
| 0
| 0.851337
| 0.008336
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.044444
| 0.022222
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6410a9c0c1f943bb018ea14084aa6f19f0acf484
| 5,910
|
py
|
Python
|
python-code/ml/ml.py
|
Edward-Son/yhack2017
|
80e85820dcd9580278f20585eef97e311381dad6
|
[
"MIT"
] | null | null | null |
python-code/ml/ml.py
|
Edward-Son/yhack2017
|
80e85820dcd9580278f20585eef97e311381dad6
|
[
"MIT"
] | null | null | null |
python-code/ml/ml.py
|
Edward-Son/yhack2017
|
80e85820dcd9580278f20585eef97e311381dad6
|
[
"MIT"
] | 1
|
2020-06-16T21:37:20.000Z
|
2020-06-16T21:37:20.000Z
|
import xml.etree.ElementTree as ET
import numpy as np
from sklearn.externals import joblib
from sklearn import svm
#get the number code for the people / get the samples
indices = []
i = 0
with open("./found-bad-people/1-common-people") as f:
for line in f:
indices.append(int(line.strip('\n').split(' ')[-1]))
i += 1
#get the features
root = ET.parse("./FINRAChallengeData/IAPD/IA_INDVL_Feed_10_11_2017.xml/IA_Indvl_Feeds1.xml").getroot()
#preset numpy array size ahead of time to improve speed
i = 0
for line in root[0]:
i += 1
target = np.zeros(shape=(i))
data = np.zeros(shape=(i,5))
#where all the numbers refer to a key
dic = {}
rowCounter = 0
j = 0
for line in root[0]:
#feature 1 : city
try:
city = line[2][0].attrib['city']
except:
city = 'NAcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(city):
dic[city] = j
j += 1
#feature 2 : organisation name
try:
orgName = line[2][0].attrib['orgNm']
except:
orgName = 'NAorg'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(orgName):
dic[orgName] = j
j += 1
#feature 3 : branch location
try:
branchCity = line[2][0][1][0].attrib['city']
except:
branchCity = 'NAbranchcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(branchCity):
dic[branchCity] = j
j += 1
#feature 4 : number of exams taken
try:
numExams = 0
for exam in line[3]:
numExams += 1
exams = str(numExams) + 'examsTaken'
except:
exams = 'NAexams'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(exams):
dic[exams] = j
j += 1
#feature 5 : has other business?
try:
otherBus = line[7][0].attrib['desc']
otherBus = 'YesOtherBusiness'
except:
otherBus = 'NAOtherBusiness'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(otherBus):
dic[otherBus] = j
j += 1
#update the image data
data[rowCounter] = [dic[city],dic[orgName],dic[branchCity],dic[exams],dic[otherBus]]
#update target data
if indices.count(rowCounter) == 0 :
target[rowCounter] = 0
else:
target[rowCounter] = 1
rowCounter += 1
#train model with second data set
indices = []
i = 0
with open("./found-bad-people/2-common-people") as f:
for line in f:
indices.append(int(line.strip('\n').split(' ')[-1]))
i += 1
root2 = ET.parse("./FINRAChallengeData/IAPD/IA_INDVL_Feed_10_11_2017.xml/IA_Indvl_Feeds2.xml").getroot()
#get length of file
l = 0
for line in root2[0]:
l += 1
#set the size of numpy array
data2 = np.zeros(shape=(l,5))
target2 = np.zeros(shape=(l))
rowCounter = 0
for line in root2[0]:
#feature 1 : city
try:
city = line[2][0].attrib['city']
except:
city = 'NAcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(city):
dic[city] = j
j += 1
#feature 2 : organisation name
try:
orgName = line[2][0].attrib['orgNm']
except:
orgName = 'NAorg'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(orgName):
dic[orgName] = j
j += 1
#feature 3 : branch location
try:
branchCity = line[2][0][1][0].attrib['city']
except:
branchCity = 'NAbranchcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(branchCity):
dic[branchCity] = j
j += 1
#feature 4: number of exams taken
try:
numExams = 0
for exam in line[3]:
numExams += 1
exams = str(numExams) + 'examsTaken'
except:
exams = 'NAexams'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(exams):
dic[exams] = j
j += 1
#feature 5 : has other business?
try:
otherBus = line[7][0].attrib['desc']
otherBus = 'YesOtherBusiness'
except:
otherBus = 'NAOtherBusiness'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(otherBus):
dic[otherBus] = j
j += 1
#update the image data
data2[rowCounter] = [dic[city],dic[orgName],dic[branchCity],dic[exams],dic[otherBus]]
#update target data
if indices.count(rowCounter) == 0 :
target2[rowCounter] = 0
else:
target2[rowCounter] = 1
rowCounter += 1
#train model with first data set
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(data, target)
#train model with the second data set
clf.fit(data2, target2)
root3 = ET.parse("./FINRAChallengeData/IAPD/IA_INDVL_Feed_10_11_2017.xml/IA_Indvl_Feeds3.xml").getroot()
m = 0
for line in root3[0]:
m += 1
data3 = np.zeros(shape=(l,5))
rowCounter = 0
for line in root3[0]:
#feature 1 : city
try:
city = line[2][0].attrib['city']
except:
city = 'NAcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(city):
dic[city] = j
j += 1
#feature 2 : organisation name
try:
orgName = line[2][0].attrib['orgNm']
except:
orgName = 'NAorg'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(orgName):
dic[orgName] = j
j += 1
#feature 3 : branch location
try:
branchCity = line[2][0][1][0].attrib['city']
except:
branchCity = 'NAbranchcity'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(branchCity):
dic[branchCity] = j
j += 1
#feature 4: number of exams taken
try:
numExams = 0
for exam in line[3]:
numExams += 1
exams = str(numExams) + 'examsTaken'
except:
exams = 'NAexams'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(exams):
dic[exams] = j
j += 1
#feature 5 : has other business?
try:
otherBus = line[7][0].attrib['desc']
otherBus = 'YesOtherBusiness'
except:
otherBus = 'NAOtherBusiness'
#if the key doesnt exist, add to dictionnary
if not dic.has_key(otherBus):
dic[otherBus] = j
j += 1
#update the image data
data3[rowCounter] = [dic[city],dic[orgName],dic[branchCity],dic[exams],dic[otherBus]]
rowCounter += 1
#predict on sample data set
print("length: " + str(len(data3)))
with open("resultsFinal", 'w') as f :
results = clf.predict(data3)
for k in results:
f.write(str(k) + "\n")
joblib.dump(clf, "finalPersistence.pkl")
# clf = joblib.load('finalPersistence.pkl')
| 19.966216
| 104
| 0.670558
| 936
| 5,910
| 4.195513
| 0.163462
| 0.019099
| 0.030558
| 0.053476
| 0.799593
| 0.787369
| 0.764197
| 0.745862
| 0.730074
| 0.730074
| 0
| 0.032629
| 0.191032
| 5,910
| 295
| 105
| 20.033898
| 0.788747
| 0.25736
| 0
| 0.801105
| 0
| 0
| 0.138876
| 0.06679
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022099
| 0
| 0.022099
| 0.005525
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6426ae79342b2e8f1461de9a701298078c413258
| 35
|
py
|
Python
|
dask/dataframe/io/orc/__init__.py
|
Juanlu001/dask
|
ba29ba377ae71e5a90fa5ef5198c7d317b45c06a
|
[
"BSD-3-Clause"
] | 9,684
|
2016-02-12T16:09:21.000Z
|
2022-03-31T19:38:26.000Z
|
dask/dataframe/io/orc/__init__.py
|
Juanlu001/dask
|
ba29ba377ae71e5a90fa5ef5198c7d317b45c06a
|
[
"BSD-3-Clause"
] | 7,059
|
2016-02-11T18:32:45.000Z
|
2022-03-31T22:12:40.000Z
|
dask/dataframe/io/orc/__init__.py
|
Juanlu001/dask
|
ba29ba377ae71e5a90fa5ef5198c7d317b45c06a
|
[
"BSD-3-Clause"
] | 1,794
|
2016-02-13T23:28:39.000Z
|
2022-03-30T14:33:19.000Z
|
from .core import read_orc, to_orc
| 17.5
| 34
| 0.8
| 7
| 35
| 3.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 1
| 35
| 35
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ff91c2830b5a723ae8ceaa113b2c1146b19c95f7
| 65
|
py
|
Python
|
inheritance/zoo/reptile.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | 1
|
2021-08-03T19:14:24.000Z
|
2021-08-03T19:14:24.000Z
|
inheritance/zoo/reptile.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | null | null | null |
inheritance/zoo/reptile.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | null | null | null |
from zoo.animal import Animal
class Reptile(Animal):
pass
| 9.285714
| 29
| 0.723077
| 9
| 65
| 5.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215385
| 65
| 6
| 30
| 10.833333
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ff9c433f186bc1eb384d68ef84ff4bba8d0574a0
| 9,934
|
py
|
Python
|
openarticlegauge/plugins/oup.py
|
CottageLabs/OpenArticleGauge
|
58d29b4209a7b59041d61326ffe1cf03f98f3cff
|
[
"BSD-3-Clause"
] | 1
|
2016-04-07T18:29:27.000Z
|
2016-04-07T18:29:27.000Z
|
openarticlegauge/plugins/oup.py
|
CottageLabs/OpenArticleGauge
|
58d29b4209a7b59041d61326ffe1cf03f98f3cff
|
[
"BSD-3-Clause"
] | 11
|
2015-01-06T15:53:09.000Z
|
2022-03-01T01:46:14.000Z
|
openarticlegauge/plugins/oup.py
|
CottageLabs/OpenArticleGauge
|
58d29b4209a7b59041d61326ffe1cf03f98f3cff
|
[
"BSD-3-Clause"
] | null | null | null |
from openarticlegauge import plugin
import re
class OUPPlugin(plugin.Plugin):
_short_name = __name__.split('.')[-1]
__version__='0.1' # consider incrementing or at least adding a minor version
# e.g. "0.1.1" if you change this plugin
__desc__ = "Handles articles from the Oxford University Press"
supported_url_format = '(http|https){0,1}://.+?\.oxfordjournals.org/.+'
_license_mappings = [
{"This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0/),"
+ "\n" + ' '*21 + "which permits unrestricted reuse, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-by', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by/3.0/'}
},
# same, but note "re-use" vs "reuse"
{"This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0/),"
+ "\n" + ' '*21 + "which permits unrestricted re-use, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-by', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by/3.0/'}
},
{ # Same as above but without the trailing slash in the URL in the license statement and 'use' rather than 'reuse'
"This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0),"
+ "\n" + ' '*21 + "which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-by', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by/3.0/'}
},
{ # Same as above but without the trailing slash in the URL and 'reuse' rather than 'use'
"This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0),"
+ "\n" + ' '*21 + "which permits unrestricted reuse, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-by', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by/3.0/'}
},
{ # this license statement is the same as the one above, but somebody's missed out the "reuse" word after unrestricted
"This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0/),"
+ "\n" + ' '*21 + "which permits unrestricted, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-by', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by/3.0/'}
},
{"This is an Open Access article distributed under the terms of the Creative Commons Attribution Non-Commercial License (http://creativecommons.org/licenses/by-nc/3.0),"
+ "\n" + ' '*21 + "which permits unrestricted non-commercial use, distribution, and reproduction in any medium, provided the original work is"
+ "\n" + ' '*21 + "properly cited.":
{'type': 'cc-nc', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by-nc/3.0/'}
},
{ # Same as above but with the trailing slash in the URL in the license statement
"This is an Open Access article distributed under the terms of the Creative Commons Attribution Non-Commercial License (http://creativecommons.org/licenses/by-nc/3.0/),"
+ "\n" + ' '*21 + "which permits unrestricted non-commercial use, distribution, and reproduction in any medium, provided the original work is"
+ "\n" + ' '*21 + "properly cited.":
{'type': 'cc-nc', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by-nc/3.0/'}
},
{ # Subtly different text
"This is an Open Access article distributed under the terms of the Creative Commons Attribution Non-Commercial License (http://creativecommons.org/licenses/by-nc/3.0/),"
+ "\n" + ' '*21 + "which permits unrestricted non-commercial use, distribution, and reproduction in any medium, provided the original work is properly"
+ "\n" + ' '*21 + "and fully attributed":
{'type': 'cc-nc', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by-nc/3.0/'}
},
# Yet another subtly different case - note "reuse" immediately after unrestricted
{
"This is an Open Access article distributed under the terms of the Creative Commons Attribution Non-Commercial License (http://creativecommons.org/licenses/by-nc/3.0),"
+ "\n" + ' '*21 + "which permits unrestricted reuse, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-nc', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by-nc/3.0/'}
},
# Variation on the above with a trailing slash in the license URL
{
"This is an Open Access article distributed under the terms of the Creative Commons Attribution Non-Commercial License (http://creativecommons.org/licenses/by-nc/3.0/),"
+ "\n" + ' '*21 + "which permits unrestricted reuse, distribution, and reproduction in any medium, provided the original work is properly cited.":
{'type': 'cc-nc', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by-nc/3.0/'}
},
{ # Yet another case at eg: http://cardiovascres.oxfordjournals.org/content/98/2/286
"This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by-nc/3.0/),"
+ "\n" + ' '*21 + "which permits non-commercial use, distribution, and reproduction in any medium, provided that the original authorship is properly"
+ "\n" + ' '*21 + "and fully attributed":
{'type': 'cc-nc', 'version':'3.0',
# also declare some properties which override info about this license in the licenses list (see licenses module)
'url': 'http://creativecommons.org/licenses/by-nc/3.0/'}
}
]
def capabilities(self):
return {
"type_detect_verify" : False,
"canonicalise" : [],
"detect_provider" : [],
"license_detect" : True
}
def supports(self, provider):
"""
Does the page_license plugin support this provider
"""
for url in provider.get("url", []):
if self.supports_url(url):
return True
return False
def supports_url(self, url):
if re.match(self.supported_url_format, url):
return True
return False
def license_detect(self, record):
"""
To respond to the provider identifier: *.oxfordjournals.org
This should determine the licence conditions of the OUP article and populate
the record['bibjson']['license'] (note the US spelling) field.
"""
# licensing statements to look for on this publisher's pages
# take the form of {statement: meaning}
# where meaning['type'] identifies the license (see licenses.py)
# and meaning['version'] identifies the license version (if available)
lic_statements = self._license_mappings
for url in record.provider_urls:
if self.supports_url(url):
self.simple_extract(lic_statements, record, url)
return (self._short_name, self.__version__)
def get_description(self, plugin_name):
pd = super(OUPPlugin, self).get_description(plugin_name)
pd.provider_support = "Supports urls which match the regular expression: " + self.supported_url_format
return pd
| 66.671141
| 182
| 0.615663
| 1,202
| 9,934
| 5.048253
| 0.152246
| 0.010877
| 0.079763
| 0.108767
| 0.756922
| 0.750824
| 0.739618
| 0.739618
| 0.739618
| 0.739618
| 0
| 0.015421
| 0.281961
| 9,934
| 148
| 183
| 67.121622
| 0.835273
| 0.248641
| 0
| 0.441176
| 0
| 0.205882
| 0.564851
| 0.006247
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04902
| false
| 0
| 0.019608
| 0.009804
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
440f965af7b346c54b4fbe18ee8e15ee65bf6d0a
| 223
|
py
|
Python
|
polyaxon_client/tracking/__init__.py
|
yu-iskw/polyaxon-client
|
af72f30af218a8a027fea1ad966b543c900e0444
|
[
"MIT"
] | null | null | null |
polyaxon_client/tracking/__init__.py
|
yu-iskw/polyaxon-client
|
af72f30af218a8a027fea1ad966b543c900e0444
|
[
"MIT"
] | null | null | null |
polyaxon_client/tracking/__init__.py
|
yu-iskw/polyaxon-client
|
af72f30af218a8a027fea1ad966b543c900e0444
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from polyaxon_client.tracking.experiment import Experiment
from polyaxon_client.tracking.group import Group
from polyaxon_client.tracking.job import Job
from polyaxon_client.tracking.paths import *
| 31.857143
| 58
| 0.820628
| 30
| 223
| 5.966667
| 0.4
| 0.268156
| 0.402235
| 0.581006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004975
| 0.098655
| 223
| 6
| 59
| 37.166667
| 0.885572
| 0.09417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
441bb897dd52545f78cb38db6b09f33c7eaf7a91
| 14
|
py
|
Python
|
pypackage_scripts/__init__.py
|
marisalim/stonybrook_juypterworkflow
|
41a985be595c76e6f1257cd65c097f654a3c779e
|
[
"MIT"
] | null | null | null |
pypackage_scripts/__init__.py
|
marisalim/stonybrook_juypterworkflow
|
41a985be595c76e6f1257cd65c097f654a3c779e
|
[
"MIT"
] | 1
|
2018-10-09T17:59:53.000Z
|
2018-10-09T17:59:53.000Z
|
pypackage_scripts/__init__.py
|
marisalim/stonybrook_juypterworkflow
|
41a985be595c76e6f1257cd65c097f654a3c779e
|
[
"MIT"
] | 3
|
2018-10-09T17:08:46.000Z
|
2018-10-09T17:38:28.000Z
|
x = 5.9
y = 6
| 4.666667
| 7
| 0.357143
| 5
| 14
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 0.428571
| 14
| 2
| 8
| 7
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92ab04d9c4e5ed2dc30d7e736c6bb4a8b5abecb8
| 16,060
|
py
|
Python
|
uranium_quantum/circuit_exporter/cirq-exporter.py
|
radumarg/uranium_quantum
|
e9e62046a2b2e2f31bcab661d48d4bd721ca111a
|
[
"MIT"
] | null | null | null |
uranium_quantum/circuit_exporter/cirq-exporter.py
|
radumarg/uranium_quantum
|
e9e62046a2b2e2f31bcab661d48d4bd721ca111a
|
[
"MIT"
] | null | null | null |
uranium_quantum/circuit_exporter/cirq-exporter.py
|
radumarg/uranium_quantum
|
e9e62046a2b2e2f31bcab661d48d4bd721ca111a
|
[
"MIT"
] | null | null | null |
import importlib
BaseExporter = importlib.import_module("uranium_quantum.circuit_exporter.base-exporter")
class Exporter(BaseExporter.BaseExporter):
def _define_import_code_section(self):
return f"\
import cirq\n\
import numpy as np\n\
\n\
q = [cirq.NamedQubit('q' + str(i)) for i in range({self._qubits})]\n\
\n"
def _define_u3_gates_code_section(self):
return "\
# define the u3 gate\n\
def u3(theta_radians, phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[np.cos(theta_radians/2), -np.exp(1j * lambda_radians) * np.sin(theta_radians/2)], [np.exp(1j * phi_radians) * np.sin(theta_radians/2), np.exp(1j * lambda_radians+1j * phi_radians) * np.cos(theta_radians/2)]]))\n\
\n"
def _define_u2_gates_code_section(self):
return "\
# define the u2 gate\n\
def u2(phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[1/np.sqrt(2), -np.exp(1j * lambda_radians) * 1/np.sqrt(2)], [np.exp(1j * phi_radians) * 1/np.sqrt(2), np.exp(1j * lambda_radians + 1j * phi_radians) * 1/np.sqrt(2)]]))\n\
\n"
def _define_u1_gates_code_section(self):
return "\
def u1(lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0], [0, np.exp(1j * lambda_radians)]]))\n\
\n"
def _define_crtl_u1(self):
return "\
# define ctrl-u1 gate\n\
def cu1(lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, np.exp(1j * lambda_radians)]]))\n\
\n"
def _define_crtl_u2(self):
return "\
# define ctrl-u2 gate\n\
def cu2(phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1/np.sqrt(2), -np.exp(1j * lambda_radians) * 1/np.sqrt(2)], [0, 0, np.exp(1j * phi_radians) * 1/np.sqrt(2), np.exp(1j * lambda_radians + 1j * phi_radians) * 1/np.sqrt(2)]]))\n\
\n"
def _define_crtl_u3(self):
return "\
# define ctrl-u3 gate\n\
def cu3(theta_radians, phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta_radians/2), -np.exp(1j * lambda_radians) * np.sin(theta_radians/2)], [0, 0, np.exp(1j * phi_radians) * np.sin(theta_radians/2), np.exp(1j * lambda_radians+1j * phi_radians) * np.cos(theta_radians/2)]]))\n\
\n"
def start_code(self):
return (
self._define_import_code_section()
+ "\n"
+ self._define_u3_gates_code_section()
+ "\n"
+ self._define_u2_gates_code_section()
+ "\n"
+ self._define_u1_gates_code_section()
+ "\n"
+ self._define_crtl_u1()
+ "\n"
+ self._define_crtl_u2()
+ "\n"
+ self._define_crtl_u3()
+ "\n"
+ "circuit = cirq.Circuit(\n\n"
)
def end_code(self):
return f"\
)\n\
\n\
simulator = cirq.Simulator()\n\
simulator.run(circuit, repetitions=1000)\n"
@staticmethod
def _gate_u3(
target, theta_radians, phi_radians, lambda_radians, add_comments=True
):
out = " # u3 gate\n" if add_comments else ""
out += (
f" u3({theta_radians}, {phi_radians}, {lambda_radians})(q[{target}]),\n"
)
return out
@staticmethod
def _gate_u2(target, phi_radians, lambda_radians, add_comments=True):
out = " # u2 gate\n" if add_comments else ""
out += f" u2({phi_radians}, {lambda_radians})(q[{target}]),\n"
return out
@staticmethod
def _gate_u1(target, lambda_radians, add_comments=True):
out = " # u1 gate\n" if add_comments else ""
out += f" u1({lambda_radians})(q[{target}]),\n"
return out
@staticmethod
def _gate_identity(target, add_comments=True):
out = " # identity gate\n" if add_comments else ""
out += f" cirq.I(q[{target}]),\n"
return out
@staticmethod
def _gate_hadamard(target, add_comments=True):
out = " # hadamard gate\n" if add_comments else ""
out += f" cirq.H(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_x(target, add_comments=True):
out = " # pauli-x gate\n" if add_comments else ""
out += f" cirq.X(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_y(target, add_comments=True):
out = " # pauli-y gate\n" if add_comments else ""
out += f" cirq.Y(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_z(target, add_comments=True):
out = " # pauli-z gate\n" if add_comments else ""
out += f" cirq.Z(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_x_root(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-x-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_y_root(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-y-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_z_root(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-z-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_x_root_dagger(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-x-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_y_root_dagger(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-y-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_z_root_dagger(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-z-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_sqrt_not(target, add_comments=True):
out = " # sqrt-not gate\n" if add_comments else ""
out += f" (cirq.X**(1/2))(q[{target}]),\n"
return out
@staticmethod
def _gate_t(target, add_comments=True):
out = " # t gate\n" if add_comments else ""
out += f" cirq.T(q[{target}]),\n"
return out
@staticmethod
def _gate_t_dagger(target, add_comments=True):
out = " # t-dagger gate\n" if add_comments else ""
out += f" u1(-np.pi / 4)(q[{target}]),\n"
return out
@staticmethod
def _gate_rx_theta(target, theta, add_comments=True):
out = " # rx-theta gate\n" if add_comments else ""
out += f" cirq.rx(0)(q[{target}]),\n"
return out
@staticmethod
def _gate_ry_theta(target, theta, add_comments=True):
out = " # ry-theta gate\n" if add_comments else ""
out += f" cirq.ry(0)(q[{target}]),\n"
return out
@staticmethod
def _gate_rz_theta(target, theta, add_comments=True):
out = " # rz-theta gate\n" if add_comments else ""
out += f" cirq.rz(0)(q[{target}]),\n"
return out
@staticmethod
def _gate_s(target, add_comments=True):
out = " # s gate\n" if add_comments else ""
out += f" cirq.S(q[{target}]),\n"
return out
@staticmethod
def _gate_s_dagger(target, add_comments=True):
out = " # s-dagger gate\n" if add_comments else ""
out += f" u1(-np.pi / 2)(q[{target}]),\n"
return out
@staticmethod
def _gate_swap(target, target2, add_comments=True): ##
out = " # swap gate\n" if add_comments else ""
out += f" cirq.SWAP(q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_iswap(target, target2, add_comments=True):
out = " # iswap gate\n" if add_comments else ""
out += f" cirq.ISWAP(q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_swap_phi(target, target2, phi, add_comments=True):
raise BaseExporter.ExportException("The swap-phi gate is not implemented.")
@staticmethod
def _gate_sqrt_swap(target, target2, add_comments=True):
out = " # sqrt-swap gate\n" if add_comments else ""
out += f" (cirq.SWAP**(1/2))(q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_xx(target, target2, theta, add_comments=True):
out = "# xx gate\n" if add_comments else ""
return out
@staticmethod
def _gate_yy(target, target2, theta, add_comments=True):
out = "# yy gate\n" if add_comments else ""
return out
@staticmethod
def _gate_zz(target, target2, theta, add_comments=True):
out = "# zz gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_hadamard(control, target, controlstate, add_comments=True):
out = " # ctrl-hadamard gate\n" if add_comments else ""
out += f" cirq.H.controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_u3(
control,
target,
controlstate,
theta_radians,
phi_radians,
lambda_radians,
add_comments=True,
):
out = " # ctrl-u3 gate\n" if add_comments else ""
out += f" cu3({theta_radians}, {phi_radians}, {lambda_radians})(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_u2(
control, target, controlstate, phi_radians, lambda_radians, add_comments=True
):
out = " # ctrl-u2 gate\n" if add_comments else ""
out += f" cu2({phi_radians}, {lambda_radians})(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_u1(
control, target, controlstate, lambda_radians, add_comments=True
):
out = " # ctrl-u1 gate\n" if add_comments else ""
out += f" cu1({lambda_radians})(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_t(control, target, controlstate, add_comments=True):
out = " # ctrl-t gate\n" if add_comments else ""
out += f" cu1(np.pi / 4)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_t_dagger(control, target, controlstate, add_comments=True):
out = " # ctrl-t-dagger gate\n" if add_comments else ""
out += f" cu1(-np.pi / 4)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_x(control, target, controlstate, add_comments=True):
out = " # ctrl-pauli-x gate\n" if add_comments else ""
out += f" cirq.CNOT(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_y(control, target, controlstate, add_comments=True):
out = " # ctrl-pauli-y gate\n" if add_comments else ""
out += f" cirq.Y.controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_z(control, target, controlstate, add_comments=True):
out = " # ctrl-pauli-z gate\n" if add_comments else ""
out += f" cirq.CZ(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_x_root(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-x-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_y_root(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-y-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_z_root(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-z-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_x_root_dagger(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-x-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_y_root_dagger(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-y-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_z_root_dagger(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-z-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_sqrt_not(control, target, controlstate, add_comments=True):
out = " # ctrl-sqrt-not gate\n" if add_comments else ""
out += f" (cirq.X**(1/2)).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_rx_theta(
control, target, controlstate, theta_radians, add_comments=True
):
out = " # ctrl-rx-theta gate\n" if add_comments else ""
out += f" cirq.rx({theta_radians}).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_ry_theta(
control, target, controlstate, theta_radians, add_comments=True
):
out = " # ctrl-ry-theta gate\n" if add_comments else ""
out += f" cirq.ry({theta_radians}).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_rz_theta(
control, target, controlstate, theta_radians, add_comments=True
):
out = " # ctrl-rz-theta gate\n" if add_comments else ""
out += f" cirq.rz({theta_radians}).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_s(control, target, controlstate, add_comments=True):
out = " # ctrl-s gate\n" if add_comments else ""
out += f" cu1(np.pi / 2)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_s_dagger(control, target, controlstate, add_comments=True):
out = " # ctrl-s-dagger gate\n" if add_comments else ""
out += f" cu1(-np.pi / 2)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_toffoli(
control, control2, target, controlstate, controlstate2, add_comments=True
):
out = " # toffoli gate\n" if add_comments else ""
out += f" cirq.CSWAP(q[{control}], q[{control2}], q[{target}]),\n"
return out
@staticmethod
def _gate_fredkin(control, target, target2, controlstate, add_comments=True):
out = " # fredkin gate\n" if add_comments else ""
out += f" cirq.CCX(q[{control}], q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_measure_x(target, classic_bit, add_comments=True):
raise BaseExporter.ExportException("The measure-x gate is not implemented.")
@staticmethod
def _gate_measure_y(target, classic_bit, add_comments=True):
raise BaseExporter.ExportException("The measure-y gate is not implemented.")
@staticmethod
def _gate_measure_z(target, classic_bit, add_comments=True):
out = " # measure-z gate\n" if add_comments else ""
out += f" cirq.measure(q[{target}], key='c{classic_bit}'),\n"
return out
| 35.688889
| 290
| 0.591905
| 2,218
| 16,060
| 4.094229
| 0.051398
| 0.129611
| 0.115075
| 0.057262
| 0.893184
| 0.869288
| 0.830305
| 0.773703
| 0.736042
| 0.616011
| 0
| 0.016946
| 0.261457
| 16,060
| 449
| 291
| 35.768374
| 0.748672
| 0.003674
| 0
| 0.447439
| 0
| 0.021563
| 0.204179
| 0.067184
| 0
| 0
| 0
| 0.002227
| 0
| 1
| 0.172507
| false
| 0
| 0.016173
| 0.024259
| 0.371968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b87d675a6f442fa8fad6cb0d2e54cf6305fe641
| 35
|
py
|
Python
|
libs/yowsup/yowsup/yowsup/demos/contacts/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 22
|
2017-07-14T20:01:17.000Z
|
2022-03-08T14:22:39.000Z
|
libs/yowsup/yowsup/yowsup/demos/contacts/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 6
|
2017-07-14T21:03:50.000Z
|
2021-06-10T19:08:32.000Z
|
libs/yowsup/yowsup/yowsup/demos/contacts/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 13
|
2017-07-14T20:13:14.000Z
|
2020-11-12T08:06:05.000Z
|
from .stack import YowsupSyncStack
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2be2c8b2b924371bb312ae6447d33de45c5c44f4
| 170
|
py
|
Python
|
layers/__init__.py
|
MSU-MLSys-Lab/CATE
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
[
"Apache-2.0"
] | 15
|
2021-06-09T00:50:53.000Z
|
2022-03-15T07:01:43.000Z
|
layers/__init__.py
|
MSU-MLSys-Lab/CATE
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
[
"Apache-2.0"
] | null | null | null |
layers/__init__.py
|
MSU-MLSys-Lab/CATE
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
[
"Apache-2.0"
] | 4
|
2021-06-09T01:01:43.000Z
|
2021-11-03T06:16:50.000Z
|
from .graphEncoder import PairWiseLearning
from .graphEncoder import GraphEncoder
from .loss import KLDivLoss
__all__ = ["PairWiseLearning", "KLDivLoss", "GraphEncoder"]
| 34
| 59
| 0.817647
| 16
| 170
| 8.4375
| 0.4375
| 0.237037
| 0.325926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 170
| 5
| 59
| 34
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0.216374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a62f3dbfa49a7670d1b3d524f1aa46c427acc233
| 142
|
py
|
Python
|
munimap/model/__init__.py
|
MrSnyder/bielefeldGEOCLIENT
|
17c78b43fc2055d23a1bc4b5091da164756bf767
|
[
"Apache-2.0"
] | 2
|
2022-02-07T13:20:45.000Z
|
2022-02-14T21:40:06.000Z
|
munimap/model/__init__.py
|
MrSnyder/bielefeldGEOCLIENT
|
17c78b43fc2055d23a1bc4b5091da164756bf767
|
[
"Apache-2.0"
] | 4
|
2021-06-17T07:53:53.000Z
|
2021-12-17T10:55:48.000Z
|
munimap/model/__init__.py
|
MrSnyder/bielefeldGEOCLIENT
|
17c78b43fc2055d23a1bc4b5091da164756bf767
|
[
"Apache-2.0"
] | 2
|
2021-06-01T09:41:55.000Z
|
2022-02-14T17:33:33.000Z
|
from .mb_group import *
from .mb_user import *
from .layer import *
from .project import *
from .draw_schema import *
from .settings import *
| 20.285714
| 26
| 0.746479
| 21
| 142
| 4.904762
| 0.47619
| 0.485437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 142
| 6
| 27
| 23.666667
| 0.872881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a670ec3be4a4d71837f4d01e9fb690e0ead8b79f
| 31
|
py
|
Python
|
VideoTools/__init__.py
|
ausport/labelImg
|
8d50ea65c5e56aea801510edcde0c3b27daa2703
|
[
"MIT"
] | null | null | null |
VideoTools/__init__.py
|
ausport/labelImg
|
8d50ea65c5e56aea801510edcde0c3b27daa2703
|
[
"MIT"
] | null | null | null |
VideoTools/__init__.py
|
ausport/labelImg
|
8d50ea65c5e56aea801510edcde0c3b27daa2703
|
[
"MIT"
] | null | null | null |
from .Video import VideoObject
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a672dc4b1bad1cb9496578d59bad0ba1297ff670
| 128
|
py
|
Python
|
ssig_site/context_processors.py
|
LeoMcA/103P_2018_team51
|
cca9e022456b1e2653f0b69420ea914661c39b27
|
[
"MIT"
] | null | null | null |
ssig_site/context_processors.py
|
LeoMcA/103P_2018_team51
|
cca9e022456b1e2653f0b69420ea914661c39b27
|
[
"MIT"
] | 61
|
2018-02-22T11:10:48.000Z
|
2022-03-11T23:20:25.000Z
|
ssig_site/context_processors.py
|
LeoMcA/103P_2018_team51
|
cca9e022456b1e2653f0b69420ea914661c39b27
|
[
"MIT"
] | 2
|
2018-02-10T11:26:52.000Z
|
2018-02-21T12:14:36.000Z
|
from django.conf import settings as s
def settings(request):
return {
'GOOGLE_MAPS_KEY': s.GOOGLE_MAPS_KEY,
}
| 16
| 45
| 0.671875
| 18
| 128
| 4.555556
| 0.722222
| 0.243902
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242188
| 128
| 7
| 46
| 18.285714
| 0.845361
| 0
| 0
| 0
| 0
| 0
| 0.117188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
a68595267eb7b6aa11b2f3e56c5415a63f468933
| 34
|
py
|
Python
|
treasure/Phase1/Basic Python1/hello.py
|
treasurechristain/python-challenge-solutions
|
a7342a2e3629d34d6eaace95cc7d08a74e5edb1e
|
[
"MIT"
] | null | null | null |
treasure/Phase1/Basic Python1/hello.py
|
treasurechristain/python-challenge-solutions
|
a7342a2e3629d34d6eaace95cc7d08a74e5edb1e
|
[
"MIT"
] | null | null | null |
treasure/Phase1/Basic Python1/hello.py
|
treasurechristain/python-challenge-solutions
|
a7342a2e3629d34d6eaace95cc7d08a74e5edb1e
|
[
"MIT"
] | null | null | null |
print('My name is Amadikwa Joy N')
| 34
| 34
| 0.735294
| 7
| 34
| 3.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 1
| 34
| 34
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
a6cefbe44a0423aa06bda7a7e2afff5e29962dd2
| 26,813
|
py
|
Python
|
model/resnet.py
|
xzgz/vehicle-reid
|
10b05bff73e6d5c2d3a60251674bfdcab744c459
|
[
"MIT"
] | 3
|
2019-05-19T12:29:14.000Z
|
2019-06-08T03:05:53.000Z
|
model/resnet.py
|
xzgz/vehicle-reid
|
10b05bff73e6d5c2d3a60251674bfdcab744c459
|
[
"MIT"
] | 2
|
2019-04-07T08:19:54.000Z
|
2019-04-11T08:39:17.000Z
|
model/resnet.py
|
xzgz/vehicle-reid
|
10b05bff73e6d5c2d3a60251674bfdcab744c459
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
import torch
import copy
from torch import nn
from torch.nn import functional as F
from torchvision.models.resnet import resnet50, Bottleneck
from .hacnn import SoftBlock, SoftHardBlock
import torchvision
class ResNet50(nn.Module):
def __init__(self, num_classes, loss_type='xent', **kwargs):
super(ResNet50, self).__init__()
self.loss_type = loss_type
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.classifier = nn.Linear(2048, num_classes)
def forward(self, x):
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
f = x.view(x.size(0), -1)
if self.loss_type == 'xent':
if self.training:
y = self.classifier(f)
return [y]
else:
feat = torch.div(f, f.norm(dim=1, keepdim=True))
return feat
elif self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']:
feat = torch.div(f, f.norm(dim=1, keepdim=True))
if self.training:
y = self.classifier(f)
return [y], feat
else:
return feat
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class MGN(nn.Module):
def __init__(self, num_classes, loss_type='xent', **kwargs):
super(MGN, self).__init__()
self.loss_type = loss_type
self.dimension_branch = 512
# self.dimension_branch = 1024
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
# nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False),
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3[0], # res_conv4_1
)
# res_conv4x
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_g_conv5 = resnet.layer4
res_p_conv5 = nn.Sequential(
Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))),
Bottleneck(2048, 512),
Bottleneck(2048, 512))
res_p_conv5.load_state_dict(resnet.layer4.state_dict())
self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5))
self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))
self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(8, 8))
self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(16, 16))
reduction_512 = nn.Sequential(nn.Conv2d(2048, self.dimension_branch, 1, bias=False),
nn.BatchNorm2d(self.dimension_branch), nn.ReLU())
self.reduction_1 = copy.deepcopy(reduction_512)
self.reduction_2 = copy.deepcopy(reduction_512)
self.fc_id_512_1 = nn.Linear(self.dimension_branch, num_classes)
self.fc_id_512_2 = nn.Linear(self.dimension_branch, num_classes)
# self.fc_id_512_1 = nn.Linear(2048, num_classes)
# self.fc_id_512_2 = nn.Linear(2048, num_classes)
def forward(self, x):
x = self.backbone(x)
p1 = self.p1(x)
p2 = self.p2(x)
zg_p1 = self.maxpool_zg_p1(p1)
zg_p2 = self.maxpool_zg_p2(p2)
fg_p1 = self.reduction_1(zg_p1).squeeze(dim=3).squeeze(dim=2)
fg_p2 = self.reduction_2(zg_p2).squeeze(dim=3).squeeze(dim=2)
l_p1 = self.fc_id_512_1(fg_p1)
l_p2 = self.fc_id_512_2(fg_p2)
# l_p1 = self.fc_id_512_1(zg_p1.squeeze(dim=3).squeeze(dim=2))
# l_p2 = self.fc_id_512_2(zg_p2.squeeze(dim=3).squeeze(dim=2))
if self.loss_type in ['xent']:
if self.training:
feat_clfy = [l_p1, l_p2]
return feat_clfy
else:
# feat_embed = torch.cat([fg_p1, fg_p2], dim=1)
# feat_embed = torch.div(feat_embed, feat_embed.norm(dim=1, keepdim=True))
# return feat_embed
# fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True))
# fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True))
feat_global = torch.cat([fg_p1, fg_p2], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
return feat_global
elif self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']:
# # feat_clfy = torch.cat([l_p1, l_p2], dim=0)
# feat_clfy = [l_p1, l_p2]
# # feat_clfy = l_p1
# feat_global = torch.cat([fg_p1, fg_p2], dim=1)
# # feat_global = fg_p1
# feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
# # feat_local = torch.cat([fz_p1, fz_p2, fz_p3, fz_p4], dim=1)
# # feat_local = torch.div(feat_local, feat_local.norm(dim=1, keepdim=True))
# if self.training:
# return feat_clfy, feat_global
# else:
# return feat_global
# feat_clfy = [l_p1, l_p2]
# fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True))
# fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True))
# feat_global = [fg_p1, fg_p2]
# if self.training:
# return feat_clfy, feat_global
# else:
# feat_global = torch.cat([fg_p1, fg_p2], dim=1)
# return feat_global
# feat_clfy = [l_p1, l_p2]
# feat_global = [fg_p1, fg_p2]
# if self.training:
# return feat_clfy, feat_global
# else:
# # fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True))
# # fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True))
# feat_global = torch.cat([fg_p1, fg_p2], dim=1)
# feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
# return feat_global
feat_clfy = [l_p1, l_p2]
feat_global = torch.cat([fg_p1, fg_p2], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
if self.training:
# fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True))
# fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True))
# feat_global = [fg_p1, fg_p2]
return feat_clfy, feat_global
else:
# feat_global = torch.cat([fg_p1, fg_p2], dim=1)
# feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
return feat_global
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class OriginMGN(nn.Module):
"""
@ARTICLE{2018arXiv180401438W,
author = {{Wang}, G. and {Yuan}, Y. and {Chen}, X. and {Li}, J. and {Zhou}, X.},
title = "{Learning Discriminative Features with Multiple Granularities for Person Re-Identification}",
journal = {ArXiv e-prints},
archivePrefix = "arXiv",
eprint = {1804.01438},
primaryClass = "cs.CV",
keywords = {Computer Science - Computer Vision and Pattern Recognition},
year = 2018,
month = apr,
adsurl = {http://adsabs.harvard.edu/abs/2018arXiv180401438W},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
"""
def __init__(self, num_classes, loss_type='xent', **kwargs):
super(OriginMGN, self).__init__()
self.loss_type = loss_type
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3[0], # res_conv4_1
)
# res_conv4x
res_conv4 = nn.Sequential(*resnet.layer3[1:])
# res_conv5 global
res_g_conv5 = resnet.layer4
# res_conv5 part
res_p_conv5 = nn.Sequential(
Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))),
Bottleneck(2048, 512),
Bottleneck(2048, 512))
res_p_conv5.load_state_dict(resnet.layer4.state_dict())
# mgn part-1 global
self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5))
# mgn part-2
self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))
# mgn part-3
self.p3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))
# global max pooling
self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(12, 4))
self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(24, 8))
self.maxpool_zg_p3 = nn.MaxPool2d(kernel_size=(24, 8))
self.maxpool_zp2 = nn.MaxPool2d(kernel_size=(12, 8))
self.maxpool_zp3 = nn.MaxPool2d(kernel_size=(8, 8))
# conv1 reduce
reduction = nn.Sequential(nn.Conv2d(2048, 256, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU())
self.reduction_0 = copy.deepcopy(reduction)
self.reduction_1 = copy.deepcopy(reduction)
self.reduction_2 = copy.deepcopy(reduction)
self.reduction_3 = copy.deepcopy(reduction)
self.reduction_4 = copy.deepcopy(reduction)
self.reduction_5 = copy.deepcopy(reduction)
self.reduction_6 = copy.deepcopy(reduction)
self.reduction_7 = copy.deepcopy(reduction)
# fc softmax loss
self.fc_id_2048_0 = nn.Linear(2048, num_classes)
self.fc_id_2048_1 = nn.Linear(2048, num_classes)
self.fc_id_2048_2 = nn.Linear(2048, num_classes)
self.fc_id_256_1_0 = nn.Linear(256, num_classes)
self.fc_id_256_1_1 = nn.Linear(256, num_classes)
self.fc_id_256_2_0 = nn.Linear(256, num_classes)
self.fc_id_256_2_1 = nn.Linear(256, num_classes)
self.fc_id_256_2_2 = nn.Linear(256, num_classes)
def forward(self, x):
x = self.backbone(x)
p1 = self.p1(x)
p2 = self.p2(x)
p3 = self.p3(x)
zg_p1 = self.maxpool_zg_p1(p1) # z_g^G
zg_p2 = self.maxpool_zg_p2(p2) # z_g^P2
zg_p3 = self.maxpool_zg_p3(p3) # z_g^P3
zp2 = self.maxpool_zp2(p2)
z0_p2 = zp2[:, :, 0:1, :] # z_p0^P2
z1_p2 = zp2[:, :, 1:2, :] # z_p1^P2
zp3 = self.maxpool_zp3(p3)
z0_p3 = zp3[:, :, 0:1, :] # z_p0^P3
z1_p3 = zp3[:, :, 1:2, :] # z_p1^P3
z2_p3 = zp3[:, :, 2:3, :] # z_p2^P3
fg_p1 = self.reduction_0(zg_p1).squeeze(dim=3).squeeze(dim=2) # f_g^G, L_triplet^G
fg_p2 = self.reduction_1(zg_p2).squeeze(dim=3).squeeze(dim=2) # f_g^P2, L_triplet^P2
fg_p3 = self.reduction_2(zg_p3).squeeze(dim=3).squeeze(dim=2) # f_g^P3, L_triplet^P3
f0_p2 = self.reduction_3(z0_p2).squeeze(dim=3).squeeze(dim=2) # f_p0^P2
f1_p2 = self.reduction_4(z1_p2).squeeze(dim=3).squeeze(dim=2) # f_p1^P2
f0_p3 = self.reduction_5(z0_p3).squeeze(dim=3).squeeze(dim=2) # f_p0^P3
f1_p3 = self.reduction_6(z1_p3).squeeze(dim=3).squeeze(dim=2) # f_p1^P3
f2_p3 = self.reduction_7(z2_p3).squeeze(dim=3).squeeze(dim=2) # f_p2^P3
l_p1 = self.fc_id_2048_0(zg_p1.squeeze(dim=3).squeeze(dim=2)) # L_softmax^G
l_p2 = self.fc_id_2048_1(zg_p2.squeeze(dim=3).squeeze(dim=2)) # L_softmax^P2
l_p3 = self.fc_id_2048_2(zg_p3.squeeze(dim=3).squeeze(dim=2)) # L_softmax^P3
l0_p2 = self.fc_id_256_1_0(f0_p2) # L_softmax0^P2
l1_p2 = self.fc_id_256_1_1(f1_p2) # L_softmax1^P2
l0_p3 = self.fc_id_256_2_0(f0_p3) # L_softmax0^P3
l1_p3 = self.fc_id_256_2_1(f1_p3) # L_softmax1^P3
l2_p3 = self.fc_id_256_2_2(f2_p3) # L_softmax2^P3
if self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']:
if self.training:
feat_clfy = [l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3]
feat = torch.cat([fg_p1, fg_p2, fg_p3], dim=1)
feat = torch.div(feat, feat.norm(dim=1, keepdim=True))
return feat_clfy, feat
else:
feat = torch.cat([fg_p1, fg_p2, fg_p3, f0_p2, f1_p2, f0_p3, f1_p3, f2_p3], dim=1)
feat = torch.div(feat, feat.norm(dim=1, keepdim=True))
return feat
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class MGNB4(nn.Module):
def __init__(self, num_classes, loss_type='xent', **kwargs):
super(MGNB4, self).__init__()
self.loss_type = loss_type
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3[0], # res_conv4_1
)
# res_conv4x
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_conv5 = resnet.layer4
self.b1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5))
self.b2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5))
self.b3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5))
self.b4 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5))
self.maxpool_b1 = nn.MaxPool2d(kernel_size=(8, 8))
self.maxpool_b2 = nn.MaxPool2d(kernel_size=(8, 8))
self.maxpool_b3 = nn.MaxPool2d(kernel_size=(8, 8))
self.maxpool_b4 = nn.MaxPool2d(kernel_size=(8, 8))
reduction_512 = nn.Sequential(nn.Conv2d(2048, 512, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU())
self.reduction_1 = copy.deepcopy(reduction_512)
self.reduction_2 = copy.deepcopy(reduction_512)
self.reduction_3 = copy.deepcopy(reduction_512)
self.reduction_4 = copy.deepcopy(reduction_512)
self.fc_id_512_1 = nn.Linear(512, num_classes)
self.fc_id_512_2 = nn.Linear(512, num_classes)
self.fc_id_512_3 = nn.Linear(512, num_classes)
self.fc_id_512_4 = nn.Linear(512, num_classes)
def forward(self, x):
x = self.backbone(x)
b1 = self.b1(x)
b2 = self.b2(x)
b3 = self.b3(x)
b4 = self.b4(x)
pb1 = self.maxpool_b1(b1)
pb2 = self.maxpool_b2(b2)
pb3 = self.maxpool_b3(b3)
pb4 = self.maxpool_b4(b4)
f_b1 = self.reduction_1(pb1).squeeze(dim=3).squeeze(dim=2)
f_b2 = self.reduction_2(pb2).squeeze(dim=3).squeeze(dim=2)
f_b3 = self.reduction_3(pb3).squeeze(dim=3).squeeze(dim=2)
f_b4 = self.reduction_4(pb4).squeeze(dim=3).squeeze(dim=2)
cf_b1 = self.fc_id_512_1(f_b1)
cf_b2 = self.fc_id_512_2(f_b2)
cf_b3 = self.fc_id_512_3(f_b3)
cf_b4 = self.fc_id_512_4(f_b4)
if self.loss_type in ['xent']:
if self.training:
feat_clfy = [cf_b1, cf_b2, cf_b3, cf_b4]
return feat_clfy
else:
feat_global = torch.cat([f_b1, f_b2, f_b3, f_b4], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
return feat_global
elif self.loss_type in ['xent_triplet', 'xent_tripletv2']:
feat_clfy = [cf_b1, cf_b2, cf_b3, cf_b4]
feat_global = torch.cat([f_b1, f_b2, f_b3, f_b4], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
if self.training:
return feat_clfy, feat_global
else:
return feat_global
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class MGNB2(nn.Module):
def __init__(self, num_classes, loss_type='xent', **kwargs):
super(MGNB2, self).__init__()
self.loss_type = loss_type
self.dimension_branch = 1024
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3[0], # res_conv4_1
)
# res_conv4x
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_conv5 = resnet.layer4
self.b1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5))
self.b2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5))
self.maxpool_b1 = nn.MaxPool2d(kernel_size=(8, 8))
self.maxpool_b2 = nn.MaxPool2d(kernel_size=(8, 8))
reduction_512 = nn.Sequential(nn.Conv2d(2048, self.dimension_branch, 1, bias=False),
nn.BatchNorm2d(self.dimension_branch), nn.ReLU())
self.reduction_1 = copy.deepcopy(reduction_512)
self.reduction_2 = copy.deepcopy(reduction_512)
self.fc_id_512_1 = nn.Linear(self.dimension_branch, num_classes)
self.fc_id_512_2 = nn.Linear(self.dimension_branch, num_classes)
def forward(self, x):
x = self.backbone(x)
b1 = self.b1(x)
b2 = self.b2(x)
pb1 = self.maxpool_b1(b1)
pb2 = self.maxpool_b2(b2)
f_b1 = self.reduction_1(pb1).squeeze(dim=3).squeeze(dim=2)
f_b2 = self.reduction_2(pb2).squeeze(dim=3).squeeze(dim=2)
cf_b1 = self.fc_id_512_1(f_b1)
cf_b2 = self.fc_id_512_2(f_b2)
if self.loss_type in ['xent']:
if self.training:
feat_clfy = [cf_b1, cf_b2]
return feat_clfy
else:
feat_global = torch.cat([f_b1, f_b2], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
return feat_global
elif self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']:
feat_clfy = [cf_b1, cf_b2]
feat_global = torch.cat([f_b1, f_b2], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
if self.training:
return feat_clfy, feat_global
else:
return feat_global
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class ResSoAttn(nn.Module):
def __init__(self, num_classes, loss_type='xent', nchannels=[128, 256, 384], branch_feat_dim=682, **kwargs):
super(ResSoAttn, self).__init__()
self.loss_type = loss_type
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
)
self.habk1 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim),
nn.Dropout(p=0.5, inplace=True))
self.habk2 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim),
nn.Dropout(p=0.5, inplace=True))
self.habk3 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim),
nn.Dropout(p=0.5, inplace=True))
self.fc_id_1 = nn.Linear(branch_feat_dim, num_classes)
self.fc_id_2 = nn.Linear(branch_feat_dim, num_classes)
self.fc_id_3 = nn.Linear(branch_feat_dim, num_classes)
def forward(self, x):
x = self.backbone(x)
f_b1 = self.habk1(x)
f_b2 = self.habk2(x)
f_b3 = self.habk3(x)
cf_b1 = self.fc_id_1(f_b1)
cf_b2 = self.fc_id_2(f_b2)
cf_b3 = self.fc_id_3(f_b3)
if self.loss_type in ['xent']:
if self.training:
feat_clfy = [cf_b1, cf_b2, cf_b3]
return feat_clfy
else:
feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
return feat_global
elif self.loss_type in ['xent_triplet', 'xent_tripletv2']:
feat_clfy = [cf_b1, cf_b2, cf_b3]
feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
if self.training:
return feat_clfy, feat_global
else:
return feat_global
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class ResSoHaAttn(nn.Module):
def __init__(self, num_classes, loss_type='xent', nchannels=[128, 256, 384], branch_feat_dim=682, **kwargs):
super(ResSoHaAttn, self).__init__()
self.loss_type = loss_type
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
)
self.habk1 = SoftHardBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim)
self.habk2 = SoftHardBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim)
self.habk3 = SoftHardBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim)
self.fc_id_1 = nn.Linear(branch_feat_dim, num_classes)
self.fc_id_2 = nn.Linear(branch_feat_dim, num_classes)
self.fc_id_3 = nn.Linear(branch_feat_dim, num_classes)
def forward(self, x):
x = self.backbone(x)
fg_b1, fl_b1 = self.habk1(x)
fg_b2, fl_b2 = self.habk2(x)
fg_b3, fl_b3 = self.habk3(x)
f_b1 = torch.cat([fg_b1, fl_b1], dim=1)
f_b2 = torch.cat([fg_b2, fl_b2], dim=1)
f_b3 = torch.cat([fg_b3, fl_b3], dim=1)
cf_b1 = self.fc_id_1(f_b1)
cf_b2 = self.fc_id_2(f_b2)
cf_b3 = self.fc_id_3(f_b3)
if self.loss_type in ['xent']:
if self.training:
feat_clfy = [cf_b1, cf_b2, cf_b3]
return feat_clfy
else:
feat = torch.cat([f_b1, f_b2, f_b3], dim=1)
feat = torch.div(feat, feat.norm(dim=1, keepdim=True))
return feat
elif self.loss_type in ['xent_triplet', 'xent_tripletv2']:
feat_clfy = [cf_b1, cf_b2, cf_b3]
# feat_global = torch.cat([fg_b1, fg_b2, fg_b3], dim=1)
# feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
feat = torch.cat([f_b1, f_b2, f_b3], dim=1)
feat = torch.div(feat, feat.norm(dim=1, keepdim=True))
if self.training:
# return feat_clfy, feat_global
return feat_clfy, feat
else:
# feat = torch.cat([f_b1, f_b2, f_b3], dim=1)
# feat = torch.div(feat, feat.norm(dim=1, keepdim=True))
return feat
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
class Resv2SoAttn(nn.Module):
def __init__(self, num_classes, loss_type='xent', nchannels=[256, 384, 512], branch_feat_dim=682, **kwargs):
super(Resv2SoAttn, self).__init__()
self.loss_type = loss_type
self.inplanes = 16
self.layer1 = self.make_layer(Bottleneck, 16, 3, stride=1)
self.layer2 = self.make_layer(Bottleneck, 32, 4, stride=2)
self.backbone = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self.layer1,
self.layer2,
)
self.habk1 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=128, feat_dim=branch_feat_dim),
nn.Dropout(p=0.5, inplace=True))
self.habk2 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=128, feat_dim=branch_feat_dim),
nn.Dropout(p=0.5, inplace=True))
self.habk3 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=128, feat_dim=branch_feat_dim),
nn.Dropout(p=0.5, inplace=True))
self.fc_id_1 = nn.Linear(branch_feat_dim, num_classes)
self.fc_id_2 = nn.Linear(branch_feat_dim, num_classes)
self.fc_id_3 = nn.Linear(branch_feat_dim, num_classes)
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.backbone(x)
f_b1 = self.habk1(x)
f_b2 = self.habk2(x)
f_b3 = self.habk3(x)
cf_b1 = self.fc_id_1(f_b1)
cf_b2 = self.fc_id_2(f_b2)
cf_b3 = self.fc_id_3(f_b3)
if self.loss_type in ['xent']:
if self.training:
feat_clfy = [cf_b1, cf_b2, cf_b3]
return feat_clfy
else:
feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
return feat_global
elif self.loss_type in ['xent_triplet', 'xent_tripletv2']:
feat_clfy = [cf_b1, cf_b2, cf_b3]
feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1)
feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True))
if self.training:
return feat_clfy, feat_global
else:
return feat_global
else:
raise KeyError("Unsupported loss: {}".format(self.loss_type))
| 41.895313
| 120
| 0.591168
| 3,777
| 26,813
| 3.931427
| 0.066455
| 0.055896
| 0.029093
| 0.031315
| 0.835208
| 0.805441
| 0.776012
| 0.762408
| 0.725503
| 0.675803
| 0
| 0.069181
| 0.288927
| 26,813
| 639
| 121
| 41.960876
| 0.70964
| 0.127475
| 0
| 0.660981
| 0
| 0
| 0.02429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036247
| false
| 0
| 0.01919
| 0
| 0.138593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a6d7b78ea02ffa24ebb016b8a7d036bd6219083b
| 146
|
py
|
Python
|
translations.py
|
anast20sm/Addarr
|
75b03d736478386b0e60ab5ff9132362cb3548da
|
[
"MIT"
] | null | null | null |
translations.py
|
anast20sm/Addarr
|
75b03d736478386b0e60ab5ff9132362cb3548da
|
[
"MIT"
] | null | null | null |
translations.py
|
anast20sm/Addarr
|
75b03d736478386b0e60ab5ff9132362cb3548da
|
[
"MIT"
] | null | null | null |
import i18n
from config import config
from definitions import LANG_PATH
i18n.load_path.append(LANG_PATH)
i18n.set('locale', config["language"])
| 18.25
| 38
| 0.80137
| 22
| 146
| 5.181818
| 0.545455
| 0.140351
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045802
| 0.10274
| 146
| 7
| 39
| 20.857143
| 0.824427
| 0
| 0
| 0
| 0
| 0
| 0.09589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b38680c2bdfab90a84ad1c547dc2a777ba0b2f1
| 4,885
|
py
|
Python
|
datahub/sql/controllers/gcp/create_dataset_controller.py
|
arpitkjain7/synapse
|
cb4cf28351bde94f4ad7ecc5df0714cfe5d616c6
|
[
"Apache-2.0"
] | 2
|
2021-08-02T07:56:38.000Z
|
2022-02-23T04:27:31.000Z
|
datahub/sql/controllers/gcp/create_dataset_controller.py
|
arpitkjain7/synapse
|
cb4cf28351bde94f4ad7ecc5df0714cfe5d616c6
|
[
"Apache-2.0"
] | null | null | null |
datahub/sql/controllers/gcp/create_dataset_controller.py
|
arpitkjain7/synapse
|
cb4cf28351bde94f4ad7ecc5df0714cfe5d616c6
|
[
"Apache-2.0"
] | null | null | null |
from commons.external_call import APIInterface
from sql import config
from sql.crud.dataset_crud import CRUDDataset
from datetime import datetime
class CreateDatasetController:
def __init__(self):
self.gcp_config = config.get("core_engine").get("gcp")
self.CRUDDataset = CRUDDataset()
def create_text_classification_dataset_controller(self, request):
uuid = str(int(datetime.now().timestamp()) * 10000)
create_dataset_url = (
self.gcp_config.get("automl")
.get("text")
.get("create_classification_dataset")
)
create_dataset_request = request.dict(exclude_none=True)
response, status_code = APIInterface.post(
route=create_dataset_url, data=create_dataset_request
)
print(f"{response=}")
if status_code == 200:
crud_request = {
"dataset_id": response.get("dataset_id"),
"alias_name": create_dataset_request.get("display_name"),
"UUID": uuid,
"status": "Created",
"problem_type": "text_classification",
}
print(f"{crud_request=}")
self.CRUDDataset.create(**crud_request)
return {
"dataset_name": create_dataset_request.get("display_name"),
"dataset_id": response.get("dataset_id"),
}
else:
# TODO: error
pass
return {"status": "create dataset failed"}
def create_ner_dataset_controller(self, request):
uuid = str(int(datetime.now().timestamp()) * 10000)
create_dataset_url = (
self.gcp_config.get("automl").get("text").get("create_ner_dataset")
)
create_dataset_request = request.dict(exclude_none=True)
response, status_code = APIInterface.post(
route=create_dataset_url, data=create_dataset_request
)
if status_code == 200:
crud_request = {
"dataset_id": response.get("dataset_id"),
"alias_name": create_dataset_request.get("display_name"),
"UUID": uuid,
"status": "Created",
"problem_type": "text_ner",
}
self.CRUDDataset.create(**crud_request)
return {
"dataset_name": create_dataset_request.get("display_name"),
"dataset_id": response.get("dataset_id"),
}
else:
# TODO: error
pass
return {"status": "create dataset failed"}
def create_image_classification_dataset_controller(self, request):
uuid = str(int(datetime.now().timestamp()) * 10000)
create_dataset_url = (
self.gcp_config.get("automl")
.get("image")
.get("create_image_classification_dataset")
)
create_dataset_request = request.dict(exclude_none=True)
response, status_code = APIInterface.post(
route=create_dataset_url, data=create_dataset_request
)
if status_code == 200:
crud_request = {
"dataset_id": response.get("dataset_id"),
"alias_name": create_dataset_request.get("display_name"),
"UUID": uuid,
"status": "Created",
"problem_type": "image_classification",
}
self.CRUDDataset.create(**crud_request)
return {
"dataset_name": create_dataset_request.get("display_name"),
"dataset_id": response.get("dataset_id"),
}
else:
# TODO: error
pass
return {"status": "create dataset failed"}
def create_object_detection_dataset_controller(self, request):
uuid = str(int(datetime.now().timestamp()) * 10000)
create_dataset_url = (
self.gcp_config.get("automl")
.get("image")
.get("create_object_detection_dataset")
)
create_dataset_request = request.dict(exclude_none=True)
response, status_code = APIInterface.post(
route=create_dataset_url, data=create_dataset_request
)
if status_code == 200:
crud_request = {
"dataset_id": response.get("dataset_id"),
"alias_name": create_dataset_request.get("display_name"),
"UUID": uuid,
"status": "Created",
"problem_type": "object_detection",
}
self.CRUDDataset.create(**crud_request)
return {
"dataset_name": create_dataset_request.get("display_name"),
"dataset_id": response.get("dataset_id"),
}
else:
# TODO: error
pass
return {"status": "create dataset failed"}
| 38.464567
| 79
| 0.565814
| 474
| 4,885
| 5.523207
| 0.137131
| 0.139037
| 0.122231
| 0.061115
| 0.859053
| 0.859053
| 0.859053
| 0.859053
| 0.859053
| 0.859053
| 0
| 0.009706
| 0.325077
| 4,885
| 126
| 80
| 38.769841
| 0.784349
| 0.009621
| 0
| 0.663793
| 0
| 0
| 0.170908
| 0.019657
| 0
| 0
| 0
| 0.007937
| 0
| 1
| 0.043103
| false
| 0.034483
| 0.034483
| 0
| 0.155172
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5b4aa442f140f84c537ef5a75e54ebb00d23d20a
| 29
|
py
|
Python
|
print_text/add.py
|
ErraticO/test_github_release_pypi
|
1d0a31a19bb150178ad316dbe2be63bd49abe6d5
|
[
"MIT"
] | 2
|
2022-02-21T01:13:44.000Z
|
2022-02-21T06:31:53.000Z
|
print_text/add.py
|
ErraticO/test_github_release_pypi
|
1d0a31a19bb150178ad316dbe2be63bd49abe6d5
|
[
"MIT"
] | null | null | null |
print_text/add.py
|
ErraticO/test_github_release_pypi
|
1d0a31a19bb150178ad316dbe2be63bd49abe6d5
|
[
"MIT"
] | null | null | null |
def make():
print("add")
| 9.666667
| 16
| 0.517241
| 4
| 29
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 29
| 2
| 17
| 14.5
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5b6e5d2460adf57181f64ff4577e0e14e38d1666
| 602
|
py
|
Python
|
user/mixins.py
|
calumlim/talentalps
|
cc66eabf71e04b6ab5831ee15f57e771cba82279
|
[
"FSFAP"
] | null | null | null |
user/mixins.py
|
calumlim/talentalps
|
cc66eabf71e04b6ab5831ee15f57e771cba82279
|
[
"FSFAP"
] | null | null | null |
user/mixins.py
|
calumlim/talentalps
|
cc66eabf71e04b6ab5831ee15f57e771cba82279
|
[
"FSFAP"
] | null | null | null |
from django.contrib.auth.mixins import AccessMixin
class StaffAccessMixin(AccessMixin):
def dispatch(self, request, *args, **kwargs):
if not (request.user.is_authenticated and request.user.is_staff):
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
class EmployerAccessMixin(AccessMixin):
def dispatch(self, request, *args, **kwargs):
if not (request.user.is_authenticated and request.user.userprofile.is_employer):
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
| 46.307692
| 88
| 0.712625
| 70
| 602
| 6.014286
| 0.428571
| 0.104513
| 0.16152
| 0.123515
| 0.72209
| 0.72209
| 0.72209
| 0.72209
| 0.72209
| 0.72209
| 0
| 0
| 0.174419
| 602
| 13
| 89
| 46.307692
| 0.847082
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.818182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
5b9ba292113018443f4ebd750c382d3c206284d4
| 8,360
|
py
|
Python
|
tests/test_permissions.py
|
radiac/django-fastview
|
31b06003c303c552e9c51184656d83a30b88377f
|
[
"BSD-3-Clause"
] | 8
|
2019-12-30T19:16:30.000Z
|
2021-09-06T14:08:25.000Z
|
tests/test_permissions.py
|
radiac/django-fastview
|
31b06003c303c552e9c51184656d83a30b88377f
|
[
"BSD-3-Clause"
] | 3
|
2021-03-10T04:45:19.000Z
|
2022-02-12T07:01:55.000Z
|
tests/test_permissions.py
|
radiac/django-fastview
|
31b06003c303c552e9c51184656d83a30b88377f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test fastview/permissions.py
"""
import pytest
from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser
from .app.models import Entry
def test_public__public_can_access(test_data, request_public):
perm = Public()
assert perm.check(request_public) is True
assert perm.filter(request_public, test_data).count() == test_data.count()
def test_login__public_cannot_access(test_data, request_public):
perm = Login()
assert perm.check(request_public) is False
assert perm.filter(request_public, test_data).count() == 0
def test_login__authed_can_access(test_data, request_owner):
perm = Login()
assert perm.check(request_owner) is True
assert perm.filter(request_owner, test_data).count() == test_data.count()
def test_staff__public_cannot_access(test_data, request_public):
perm = Staff()
assert perm.check(request_public) is False
assert perm.filter(request_public, test_data).count() == 0
def test_staff__authed_cannot_access(test_data, request_owner):
perm = Staff()
assert perm.check(request_owner) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_staff__staff_can_access(test_data, request_staff):
perm = Staff()
assert perm.check(request_staff) is True
assert perm.filter(request_staff, test_data).count() == test_data.count()
def test_superuser__public_cannot_access(test_data, request_public):
perm = Superuser()
assert perm.check(request_public) is False
assert perm.filter(request_public, test_data).count() == 0
def test_superuser__authed_cannot_access(test_data, request_owner):
perm = Superuser()
assert perm.check(request_owner) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_superuser__staff_cannot_access(test_data, request_staff):
perm = Superuser()
assert perm.check(request_staff) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_superuser__superuser_can_access(test_data, request_superuser):
perm = Superuser()
assert perm.check(request_superuser) is True
assert perm.filter(request_superuser, test_data).count() == test_data.count()
def test_django__public_cannot_access(test_data, request_public):
perm = Django(action="add")
assert perm.check(request_public, model=Entry) is False
assert perm.filter(request_public, test_data).count() == 0
def test_django__authed_cannot_access(test_data, request_owner):
perm = Django(action="add")
assert perm.check(request_owner, model=Entry) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_django__staff_cannot_access(test_data, request_staff):
perm = Django(action="add")
assert perm.check(request_staff, model=Entry) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_django__superuser_can_access(test_data, request_superuser):
perm = Django(action="add")
assert perm.check(request_superuser, model=Entry) is True
assert perm.filter(request_superuser, test_data).count() == test_data.count()
@pytest.mark.django_db
def test_django__user_with_permission_can_access(
test_data, request_other, user_other, add_entry_permission
):
user_other.user_permissions.add(add_entry_permission)
perm = Django(action="add")
assert perm.check(request_other, model=Entry) is True
assert perm.filter(request_other, test_data).count() == test_data.count()
def test_owner__public_cannot_access(test_data, request_public):
perm = Owner(owner_field="author")
# Test data is ordered, the first is owned by user_owner
owned = test_data.first()
assert perm.check(request_public, instance=owned) is False
assert perm.filter(request_public, test_data).count() == 0
def test_owner__owner_can_access_theirs(test_data, request_owner, user_owner):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 2
assert perm.filter(request_owner, test_data).filter(author=user_owner).count() == 2
def test_owner__other_can_access_theirs(test_data, request_other, user_other):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_other, instance=owned) is False
assert perm.filter(request_other, test_data).count() == 2
assert perm.filter(request_other, test_data).filter(author=user_other).count() == 2
def test_owner__staff_cannot_access(test_data, request_staff):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_staff, instance=owned) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_owner__superuser_cannot_access(test_data, request_superuser):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_superuser, instance=owned) is False
assert perm.filter(request_superuser, test_data).count() == 0
def test_and__owner_and_staff__owner_cannot_access(test_data, request_owner):
perm = Owner(owner_field="author") & Staff()
owned = test_data.first()
assert perm.check(request_owner, instance=owned) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_and__owner_and_staff__staff_cannot_access(test_data, request_staff):
perm = Owner(owner_field="author") & Staff()
owned = test_data.first()
assert perm.check(request_staff, instance=owned) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_and__owner_and_staff__staff_owner_can_access(
test_data, request_owner, user_owner
):
perm = Owner(owner_field="author") & Staff()
owned = test_data.first()
user_owner.is_staff = True
user_owner.save()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 2
def test_or__owner_or_staff__owner_can_access(test_data, request_owner):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 2
def test_or__owner_or_staff__staff_can_access(test_data, request_staff):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
assert perm.check(request_staff, instance=owned) is True
assert perm.filter(request_staff, test_data).count() == 4
def test_or__owner_or_staff__staff_owner_can_access(
test_data, request_owner, user_owner
):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
user_owner.is_staff = True
user_owner.save()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 4
def test_or__owner_or_staff__other_cannot_access(test_data, request_other, user_other):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
assert perm.check(request_other, instance=owned) is False
assert perm.filter(request_other, test_data).count() == 2
assert perm.filter(request_other, test_data).filter(author=user_other).count() == 2
def test_not__not_owner__all_can_access_all_except_own(
test_data, request_owner, user_owner
):
perm = ~Owner(owner_field="author")
owned = test_data.first()
not_owned = test_data.exclude(author=user_owner).first()
assert perm.check(request_owner, instance=owned) is False
assert perm.check(request_owner, instance=not_owned) is True
assert perm.filter(request_owner, test_data).count() == 2
assert perm.filter(request_owner, test_data).filter(author=user_owner).count() == 0
def test_and_not__staff_not_owner__staff_can_access_all_except_own(
test_data, request_owner, user_owner
):
perm = Staff() & ~Owner(owner_field="author")
owned = test_data.first()
not_owned = test_data.exclude(author=user_owner).first()
user_owner.is_staff = True
user_owner.save()
assert perm.check(request_owner, instance=owned) is False
assert perm.check(request_owner, instance=not_owned) is True
assert perm.filter(request_owner, test_data).count() == 2
assert perm.filter(request_owner, test_data).filter(author=user_owner).count() == 0
| 37.155556
| 87
| 0.754306
| 1,207
| 8,360
| 4.874896
| 0.04971
| 0.116927
| 0.077328
| 0.132903
| 0.9293
| 0.908736
| 0.859619
| 0.835826
| 0.658566
| 0.649898
| 0
| 0.003893
| 0.139713
| 8,360
| 224
| 88
| 37.321429
| 0.814238
| 0.010048
| 0
| 0.63125
| 0
| 0
| 0.011974
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 1
| 0.18125
| false
| 0
| 0.01875
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5bba87b4ea8ec5a9d0d76ab7b38afa53fd52a194
| 19
|
py
|
Python
|
sddr/__init__.py
|
felixGer/PySDDR
|
a7680e7190185ba605df6ad85b4fdf19401473b3
|
[
"MIT"
] | 14
|
2021-04-07T17:33:19.000Z
|
2022-02-07T14:49:37.000Z
|
sddr/__init__.py
|
felixGer/PySDDR
|
a7680e7190185ba605df6ad85b4fdf19401473b3
|
[
"MIT"
] | 3
|
2021-11-30T15:03:32.000Z
|
2022-01-09T06:24:29.000Z
|
sddr/__init__.py
|
felixGer/PySDDR
|
a7680e7190185ba605df6ad85b4fdf19401473b3
|
[
"MIT"
] | 7
|
2021-04-20T08:48:57.000Z
|
2022-03-02T10:45:19.000Z
|
from .sddr import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5bc198052f44848d5622ecfd0dbb5d88d1a1fb29
| 118
|
py
|
Python
|
jentry/entry/script/__init__.py
|
HansBug/jentry
|
69817fc19df1c8b31b32a834cfe1aa93841d6022
|
[
"Apache-2.0"
] | null | null | null |
jentry/entry/script/__init__.py
|
HansBug/jentry
|
69817fc19df1c8b31b32a834cfe1aa93841d6022
|
[
"Apache-2.0"
] | 1
|
2022-03-20T01:42:56.000Z
|
2022-03-20T01:42:56.000Z
|
jentry/entry/script/__init__.py
|
HansBug/jentry
|
69817fc19df1c8b31b32a834cfe1aa93841d6022
|
[
"Apache-2.0"
] | null | null | null |
from .file import load_entries_from_file, load_entry_classes_from_code
from .project import load_entries_from_project
| 39.333333
| 70
| 0.898305
| 19
| 118
| 5.052632
| 0.473684
| 0.166667
| 0.354167
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076271
| 118
| 2
| 71
| 59
| 0.880734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5bdcf940067fb25ce5b1bb48cc7ac89d31919552
| 109
|
py
|
Python
|
package/awesome_streamlit/experiments/__init__.py
|
R-fred/awesome-streamlit
|
10f2b132bc8e61a82edfacb4b3bb36d0da6c63d3
|
[
"CC0-1.0"
] | 1,194
|
2019-10-09T06:15:27.000Z
|
2022-03-31T14:53:00.000Z
|
package/awesome_streamlit/experiments/__init__.py
|
R-fred/awesome-streamlit
|
10f2b132bc8e61a82edfacb4b3bb36d0da6c63d3
|
[
"CC0-1.0"
] | 55
|
2019-10-09T12:08:39.000Z
|
2022-02-10T00:48:53.000Z
|
package/awesome_streamlit/experiments/__init__.py
|
R-fred/awesome-streamlit
|
10f2b132bc8e61a82edfacb4b3bb36d0da6c63d3
|
[
"CC0-1.0"
] | 272
|
2019-10-09T12:04:31.000Z
|
2022-03-29T02:43:30.000Z
|
"""Imports that should be exposed outside the package"""
from .hello_world import write as write_hello_world
| 36.333333
| 56
| 0.807339
| 17
| 109
| 5
| 0.823529
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12844
| 109
| 2
| 57
| 54.5
| 0.894737
| 0.458716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
750d66e4556ce66539f01afd454bfb3180d06258
| 86
|
py
|
Python
|
tracklib/init/__init__.py
|
xueyuelei/tracklib
|
d33912baf1bebd1605d5e9c8dfc31484c96628cc
|
[
"MIT"
] | 5
|
2020-03-04T11:36:19.000Z
|
2020-06-21T16:49:45.000Z
|
tracklib/init/__init__.py
|
xueyuelei/tracklib
|
d33912baf1bebd1605d5e9c8dfc31484c96628cc
|
[
"MIT"
] | null | null | null |
tracklib/init/__init__.py
|
xueyuelei/tracklib
|
d33912baf1bebd1605d5e9c8dfc31484c96628cc
|
[
"MIT"
] | null | null | null |
from __future__ import division, absolute_import, print_function
from .init import *
| 21.5
| 64
| 0.825581
| 11
| 86
| 5.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 4
| 65
| 21.5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
752fff69deb9402c4c7a6a943a150cc6d939f410
| 117
|
py
|
Python
|
change_ab.py
|
ximury/python
|
8624464e214c74e640d01a83b21c66df8eb7ad8c
|
[
"Apache-2.0"
] | null | null | null |
change_ab.py
|
ximury/python
|
8624464e214c74e640d01a83b21c66df8eb7ad8c
|
[
"Apache-2.0"
] | null | null | null |
change_ab.py
|
ximury/python
|
8624464e214c74e640d01a83b21c66df8eb7ad8c
|
[
"Apache-2.0"
] | null | null | null |
a, b = 3, 4
print(a, b)
a, b = b, a
print(a, b)
print('---------------------')
a = 1
a += 1
print(a)
| 9
| 31
| 0.316239
| 21
| 117
| 1.761905
| 0.285714
| 0.216216
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 0.324786
| 117
| 12
| 32
| 9.75
| 0.417722
| 0
| 0
| 0.25
| 0
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
754965e1d467f573955a91bf60d2f23e489b0380
| 17,564
|
py
|
Python
|
models.py
|
dahe-cvl/isvc2020_overscan_detection
|
e41b85fa8d1615a0e8f19c961a59f7d703232445
|
[
"MIT"
] | null | null | null |
models.py
|
dahe-cvl/isvc2020_overscan_detection
|
e41b85fa8d1615a0e8f19c961a59f7d703232445
|
[
"MIT"
] | null | null | null |
models.py
|
dahe-cvl/isvc2020_overscan_detection
|
e41b85fa8d1615a0e8f19c961a59f7d703232445
|
[
"MIT"
] | null | null | null |
from torchvision import models
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision.models.segmentation.fcn import FCNHead
from metrics import *
from datasets import *
from collections import OrderedDict
class CNN(nn.Module):
"""CNN."""
def __init__(self, model_arch="resnet50", n_classes=2, include_top=False, pretrained=False, lower_features=False):
"""CNN Builder."""
super(CNN, self).__init__()
self.include_top = include_top
self.pretrained = pretrained
self.lower_features = lower_features
self.gradients = None
self.classifier = None
if (model_arch == "resnet50"):
self.model = models.resnet50(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
#print(self.features)
self.features_dict = OrderedDict()
elif (model_arch == "resnet101"):
self.model = models.resnet101(pretrained=True)
#print(self.model)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, n_classes)
self.features_dict = OrderedDict()
if (lower_features == True):
self.model = nn.Sequential(*list(self.model.children())[:5])
else:
self.model = nn.Sequential(*list(self.model.children())[:-2])
elif (model_arch == "squeezenet"):
self.model = models.squeezenet1_1(pretrained=True)
#print(self.model)
#self.classifier = self.model.classifier
for params in self.model.parameters():
params.requires_grad = self.pretrained
#num_ftrs = self.model.fc.in_features
#self.model.fc = torch.nn.Linear(num_ftrs, n_classes)
#num_ftrs = 512
#self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
self.features_dict = OrderedDict()
if (lower_features == True):
self.model = nn.Sequential(self.model.features[:6])
else:
self.model = nn.Sequential(self.model.features)
#print(self.model)
#exit()
elif (model_arch == "densenet121"):
self.model = models.densenet121(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier.in_features
self.model.classifier = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
print(self.model)
elif (model_arch == "vgg19"):
self.model = models.vgg19(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[0].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
#print(self.features)
print(self.model)
elif (model_arch == "vgg16"):
self.model = models.vgg16(pretrained=True);
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[0].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
if(lower_features == True):
self.model = nn.Sequential(self.model.features[:5])
else:
self.model = nn.Sequential(*list(self.model.children())[:-2])
#print(self.features)
#print(self.model)
#exit()
print(self.model)
self.features_dict = OrderedDict()
elif (model_arch == "mobilenet"):
self.model = models.mobilenet_v2(pretrained=True);
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[1].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
if(lower_features == True):
#self.model = nn.Sequential(self.model.features[:5])
self.model = nn.Sequential(*list(self.model.features)[:5])
else:
#self.model = nn.Sequential(*list(self.model.children())[:-1])
self.model = nn.Sequential(*list(self.model.features))
self.features_dict = OrderedDict()
elif (model_arch == "alexnet"):
self.model = models.alexnet(pretrained=True)
for params in self.model.parameters():
params.requires_grad = self.pretrained
num_ftrs = self.model.classifier[0].in_features
self.model.classifier[-1] = torch.nn.Linear(num_ftrs, n_classes)
self.features = nn.Sequential(*list(self.model.children())[:-1])
#print(self.features)
print(self.model)
else:
self.model_arch = None
print("No valid backbone cnn network selected!")
def activations_hook(self, grad):
self.gradients = grad
def get_activations_gradient(self):
return self.gradients
def forward(self, x):
"""Perform forward."""
if(self.include_top == False):
# extract features
x = self.model(x)
self.features_dict['out'] = x
self.features_dict['aux'] = x
return self.features_dict
elif(self.include_top == True):
#print(x.size())
x = self.model(x)
# flatten
x = x.view(x.size(0), -1)
x = self.classifier(x)
self.features_dict['out'] = x
return self.features_dict
return x
def loadModel(model_arch="", classes=None, pre_trained_path=None, expType=None, trainable_backbone_flag=False, lower_features=False):
print("Load model architecture ... ")
if (model_arch == "deeplabv3_resnet101_orig"):
print("deeplab_resnet architecture selected ...")
model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True)
for params in model.parameters():
params.requires_grad = trainable_backbone_flag
model.classifier[-1] = torch.nn.Conv2d(256, len(classes), kernel_size=(1, 1))
model.aux_classifier[-1] = torch.nn.Conv2d(256, len(classes), kernel_size=(1, 1))
features = model.backbone
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path) # + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "fcn_resnet101_orig"):
print("deeplab_resnet architecture selected ...")
model = models.segmentation.fcn_resnet101(pretrained=True, progress=True)
for params in model.parameters():
params.requires_grad = trainable_backbone_flag
model.classifier[-1] = torch.nn.Conv2d(512, len(classes), kernel_size=(1, 1))
model.aux_classifier[-1] = torch.nn.Conv2d(256, len(classes), kernel_size=(1, 1))
features = model.backbone
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "deeplabv3_resnet101"):
print("deeplabv3_resnet101 architecture selected ...")
backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(256, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(2048, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "deeplabv3_vgg16"):
print("deeplabv3_vgg architecture selected ...")
# backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False)
backbone_net = CNN(model_arch="vgg16", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(64, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
#print(model)
#exit()
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path) # + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
#exit()
return model, features
elif (model_arch == "deeplabv3_mobilenet"):
print("deeplabv3_mobilenet architecture selected ...")
backbone_net = CNN(model_arch="mobilenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(32, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(1280, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "deeplabv3_squeezenet"):
print("deeplabv3_mobilenet architecture selected ...")
backbone_net = CNN(model_arch="squeezenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
DeepLabHead(128, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
DeepLabHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.DeepLabV3(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "fcn_vgg16"):
print("fcn_vgg16 architecture selected ...")
backbone_net = CNN(model_arch="vgg16", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if(lower_features == True):
classifier = nn.Sequential(
FCNHead(64, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
# print(model)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
return model, features
elif (model_arch == "fcn_resnet101"):
print("fcn_resnet101 architecture selected ...")
backbone_net = CNN(model_arch="resnet101", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
FCNHead(256, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(2048, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path) # + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
#exit()
return model, features
elif (model_arch == "fcn_squeezenet"):
print("deeplabv3_squeezenet architecture selected ...")
backbone_net = CNN(model_arch="squeezenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
FCNHead(128, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(512, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
# exit()
return model, features
elif (model_arch == "fcn_mobilenet"):
print("deeplabv3_mobilenet architecture selected ...")
backbone_net = CNN(model_arch="mobilenet", n_classes=len(classes), include_top=False, pretrained=trainable_backbone_flag, lower_features=lower_features)
if (lower_features == True):
classifier = nn.Sequential(
FCNHead(32, len(classes)),
# nn.Softmax()
)
else:
classifier = nn.Sequential(
FCNHead(1280, len(classes)),
# nn.Softmax()
)
features = backbone_net
model = models.segmentation.FCN(backbone=backbone_net, classifier=classifier, aux_classifier=None)
if (pre_trained_path != None):
print("load pre-trained-weights ... ")
model_dict_state = torch.load(pre_trained_path)# + "/best_model.pth")
model.load_state_dict(model_dict_state['net'])
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print("total_params:" + str(total_params))
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print("total_trainable_params: " + str(total_trainable_params))
# exit()
return model, features
else:
print("ERROR: select valid model architecture!")
exit()
| 38.858407
| 161
| 0.605443
| 1,924
| 17,564
| 5.324324
| 0.067048
| 0.061499
| 0.0287
| 0.029676
| 0.858161
| 0.842932
| 0.83278
| 0.822237
| 0.805838
| 0.7828
| 0
| 0.015199
| 0.280802
| 17,564
| 451
| 162
| 38.944568
| 0.795757
| 0.072876
| 0
| 0.661184
| 0
| 0
| 0.080434
| 0.007155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016447
| false
| 0
| 0.019737
| 0.003289
| 0.085526
| 0.115132
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
754a65d212f34d3a028eb508e500f5bfa8ea69a1
| 131,653
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_alarmgr_server_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_alarmgr_server_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_alarmgr_server_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'TimingBucketEnum' : _MetaInfoEnum('TimingBucketEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'not-specified':'not_specified',
'fifteen-min':'fifteen_min',
'one-day':'one_day',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmSeverityEnum' : _MetaInfoEnum('AlarmSeverityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'not-reported':'not_reported',
'not-alarmed':'not_alarmed',
'minor':'minor',
'major':'major',
'critical':'critical',
'severity-last':'severity_last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmDirectionEnum' : _MetaInfoEnum('AlarmDirectionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'not-specified':'not_specified',
'send':'send',
'receive':'receive',
'send-receive':'send_receive',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmStatusEnum' : _MetaInfoEnum('AlarmStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'set':'set',
'clear':'clear',
'suppress':'suppress',
'last':'last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmServiceAffectingEnum' : _MetaInfoEnum('AlarmServiceAffectingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'not-service-affecting':'not_service_affecting',
'service-affecting':'service_affecting',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmNotificationSrcEnum' : _MetaInfoEnum('AlarmNotificationSrcEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'not-specified':'not_specified',
'near-end':'near_end',
'far-end':'far_end',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmEventEnum' : _MetaInfoEnum('AlarmEventEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'default':'default',
'notification':'notification',
'last':'last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmClientEnum' : _MetaInfoEnum('AlarmClientEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'producer':'producer',
'consumer':'consumer',
'subscriber':'subscriber',
'client-last':'client_last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmClientStateEnum' : _MetaInfoEnum('AlarmClientStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'start':'start',
'init':'init',
'connecting':'connecting',
'connected':'connected',
'registered':'registered',
'disconnected':'disconnected',
'ready':'ready',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'AlarmGroupsEnum' : _MetaInfoEnum('AlarmGroupsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper',
{
'unknown':'unknown',
'environ':'environ',
'ethernet':'ethernet',
'fabric':'fabric',
'power':'power',
'software':'software',
'slice':'slice',
'cpu':'cpu',
'controller':'controller',
'sonet':'sonet',
'otn':'otn',
'sdh-controller':'sdh_controller',
'asic':'asic',
'fpd-infra':'fpd_infra',
'shelf':'shelf',
'mpa':'mpa',
'ots':'ots',
'last':'last',
}, 'Cisco-IOS-XR-alarmgr-server-oper', _yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper']),
'Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.History' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Stats' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Stats',
False,
[
_MetaInfoClassMember('active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active state
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-hit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache hit
''',
'cache_hit',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-miss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache miss
''',
'cache_miss',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that we couldn't keep track due to some
error or other
''',
'dropped',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-clear-without-set', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped clear without set
''',
'dropped_clear_without_set',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-db-error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to db error
''',
'dropped_db_error',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-duplicate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped which were duplicate
''',
'dropped_duplicate',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-insuff-mem', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to insufficient memory
''',
'dropped_insuff_mem',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-invalid-aid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to invalid aid
''',
'dropped_invalid_aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared. This one is counted
over a long period of time
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reported', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that were in all reported to this Alarm
Mgr
''',
'reported',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are in suppressed state
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active
state(sysadmin plane)
''',
'sysadmin_active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared in sysadmin plane. This
one is counted over a long period of time
''',
'sysadmin_history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are suppressed in sysadmin plane.
''',
'sysadmin_suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'stats',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Clients.ClientInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Clients.ClientInfo',
False,
[
_MetaInfoClassMember('connect-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent connected to the alarm
mgr
''',
'connect_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('connect-timestamp', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Agent connect timestamp
''',
'connect_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-disp', ATTRIBUTE, 'bool' , None, None,
[], [],
''' The current subscription status of the client
''',
'filter_disp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' The filter used for alarm group
''',
'filter_group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' The filter used for alarm severity
''',
'filter_severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-state', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' The filter used for alarm bi-state state+
''',
'filter_state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('get-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent queried for alarms
''',
'get_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('handle', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The client handle through which interface
''',
'handle',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent id of the client
''',
'id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The location of this client
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm client
''',
'name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('report-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent reported alarms
''',
'report_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'AlarmClientStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientStateEnum',
[], [],
''' The current state of the client
''',
'state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscribe-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent subscribed for alarms
''',
'subscribe_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscriber-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent subscriber id of the client
''',
'subscriber_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmClientEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientEnum',
[], [],
''' The type of the client
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'client-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem.Clients' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem.Clients',
False,
[
_MetaInfoClassMember('client-info', REFERENCE_LIST, 'ClientInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Clients.ClientInfo',
[], [],
''' Client List
''',
'client_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'clients',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailSystem' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailSystem',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clients', REFERENCE_CLASS, 'Clients' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Clients',
[], [],
''' Show the clients associated with this service.
''',
'clients',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('stats', REFERENCE_CLASS, 'Stats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Stats',
[], [],
''' Show the service statistics.
''',
'stats',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-system',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca',
False,
[
_MetaInfoClassMember('bucket-type', REFERENCE_ENUM_CLASS, 'TimingBucketEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'TimingBucketEnum',
[], [],
''' Timing Bucket
''',
'bucket_type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('current-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'current_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('threshold-value', ATTRIBUTE, 'str' , None, None,
[(0, 20)], [],
''' Alarm Threshold
''',
'threshold_value',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'tca',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tca', REFERENCE_CLASS, 'Tca' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca',
[], [],
''' TCA feature specific alarm attributes
''',
'tca',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmEventEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmEventEnum',
[], [],
''' alarm event type
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn',
False,
[
_MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AlarmDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmDirectionEnum',
[], [],
''' Alarm direction
''',
'direction',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('notification-source', REFERENCE_ENUM_CLASS, 'AlarmNotificationSrcEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmNotificationSrcEnum',
[], [],
''' Source of Alarm
''',
'notification_source',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'otn',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('aid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm aid
''',
'aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('alarm-name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm name
''',
'alarm_name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('eid', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm eid
''',
'eid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm interface name
''',
'interface',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('module', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm module description
''',
'module',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn',
[], [],
''' OTN feature specific alarm attributes
''',
'otn',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('pending-sync', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Pending async flag
''',
'pending_sync',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reporting-agent-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reporting agent id
''',
'reporting_agent_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('service-affecting', REFERENCE_ENUM_CLASS, 'AlarmServiceAffectingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmServiceAffectingEnum',
[], [],
''' Alarm service affecting
''',
'service_affecting',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' Alarm status
''',
'status',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('tag', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm tag description
''',
'tag',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats',
False,
[
_MetaInfoClassMember('active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active state
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-hit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache hit
''',
'cache_hit',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('cache-miss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total alarms which had the cache miss
''',
'cache_miss',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that we couldn't keep track due to some
error or other
''',
'dropped',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-clear-without-set', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped clear without set
''',
'dropped_clear_without_set',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-db-error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to db error
''',
'dropped_db_error',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-duplicate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped which were duplicate
''',
'dropped_duplicate',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-insuff-mem', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to insufficient memory
''',
'dropped_insuff_mem',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('dropped-invalid-aid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms dropped due to invalid aid
''',
'dropped_invalid_aid',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared. This one is counted
over a long period of time
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('reported', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that were in all reported to this Alarm
Mgr
''',
'reported',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are in suppressed state
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-active', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are currently in the active
state(sysadmin plane)
''',
'sysadmin_active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-history', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are cleared in sysadmin plane. This
one is counted over a long period of time
''',
'sysadmin_history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('sysadmin-suppressed', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarms that are suppressed in sysadmin plane.
''',
'sysadmin_suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'stats',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo',
False,
[
_MetaInfoClassMember('connect-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent connected to the alarm
mgr
''',
'connect_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('connect-timestamp', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Agent connect timestamp
''',
'connect_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-disp', ATTRIBUTE, 'bool' , None, None,
[], [],
''' The current subscription status of the client
''',
'filter_disp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' The filter used for alarm group
''',
'filter_group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' The filter used for alarm severity
''',
'filter_severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('filter-state', REFERENCE_ENUM_CLASS, 'AlarmStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmStatusEnum',
[], [],
''' The filter used for alarm bi-state state+
''',
'filter_state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('get-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent queried for alarms
''',
'get_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('handle', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The client handle through which interface
''',
'handle',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent id of the client
''',
'id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' The location of this client
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm client
''',
'name',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('report-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent reported alarms
''',
'report_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'AlarmClientStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientStateEnum',
[], [],
''' The current state of the client
''',
'state',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscribe-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of times the agent subscribed for alarms
''',
'subscribe_count',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('subscriber-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alarms agent subscriber id of the client
''',
'subscriber_id',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'AlarmClientEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmClientEnum',
[], [],
''' The type of the client
''',
'type',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'client-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients',
False,
[
_MetaInfoClassMember('client-info', REFERENCE_LIST, 'ClientInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo',
[], [],
''' Client List
''',
'client_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'clients',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations.DetailLocation' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations.DetailLocation',
False,
[
_MetaInfoClassMember('node-id', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' NodeID of the Location
''',
'node_id',
'Cisco-IOS-XR-alarmgr-server-oper', True),
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clients', REFERENCE_CLASS, 'Clients' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients',
[], [],
''' Show the clients associated with this
service.
''',
'clients',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('stats', REFERENCE_CLASS, 'Stats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats',
[], [],
''' Show the service statistics.
''',
'stats',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-location',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard.DetailLocations' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard.DetailLocations',
False,
[
_MetaInfoClassMember('detail-location', REFERENCE_LIST, 'DetailLocation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations.DetailLocation',
[], [],
''' Specify a card location for alarms.
''',
'detail_location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-locations',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail.DetailCard' : {
'meta_info' : _MetaInfoClass('Alarms.Detail.DetailCard',
False,
[
_MetaInfoClassMember('detail-locations', REFERENCE_CLASS, 'DetailLocations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard.DetailLocations',
[], [],
''' Table of DetailLocation
''',
'detail_locations',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail-card',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Detail' : {
'meta_info' : _MetaInfoClass('Alarms.Detail',
False,
[
_MetaInfoClassMember('detail-card', REFERENCE_CLASS, 'DetailCard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailCard',
[], [],
''' Show detail card scope alarm related data.
''',
'detail_card',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('detail-system', REFERENCE_CLASS, 'DetailSystem' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail.DetailSystem',
[], [],
''' show detail system scope alarm related data.
''',
'detail_system',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'detail',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations.BriefLocation' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations.BriefLocation',
False,
[
_MetaInfoClassMember('node-id', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' NodeID of the Location
''',
'node_id',
'Cisco-IOS-XR-alarmgr-server-oper', True),
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-location',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard.BriefLocations' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard.BriefLocations',
False,
[
_MetaInfoClassMember('brief-location', REFERENCE_LIST, 'BriefLocation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations.BriefLocation',
[], [],
''' Specify a card location for alarms.
''',
'brief_location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-locations',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefCard' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefCard',
False,
[
_MetaInfoClassMember('brief-locations', REFERENCE_CLASS, 'BriefLocations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard.BriefLocations',
[], [],
''' Table of BriefLocation
''',
'brief_locations',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-card',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Active.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Active.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Active' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Active',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Active.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'active',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.History.AlarmInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.History.AlarmInfo',
False,
[
_MetaInfoClassMember('clear-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm clear time
''',
'clear_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('clear-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm clear time(timestamp format)
''',
'clear_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarm-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.History' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.History',
False,
[
_MetaInfoClassMember('alarm-info', REFERENCE_LIST, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.History.AlarmInfo',
[], [],
''' Alarm List
''',
'alarm_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo',
False,
[
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 256)], [],
''' Alarm description
''',
'description',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'AlarmGroupsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmGroupsEnum',
[], [],
''' Alarm group
''',
'group',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[(0, 128)], [],
''' Alarm location
''',
'location',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm set time
''',
'set_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('set-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm set time(timestamp format)
''',
'set_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlarmSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'AlarmSeverityEnum',
[], [],
''' Alarm severity
''',
'severity',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-time', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Alarm suppressed time
''',
'suppressed_time',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed-timestamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Alarm suppressed time(timestamp format)
''',
'suppressed_timestamp',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed-info',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem.Suppressed' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem.Suppressed',
False,
[
_MetaInfoClassMember('suppressed-info', REFERENCE_LIST, 'SuppressedInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo',
[], [],
''' Suppressed Alarm List
''',
'suppressed_info',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'suppressed',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief.BriefSystem' : {
'meta_info' : _MetaInfoClass('Alarms.Brief.BriefSystem',
False,
[
_MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Active',
[], [],
''' Show the active alarms at this scope.
''',
'active',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.History',
[], [],
''' Show the history alarms at this scope.
''',
'history',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('suppressed', REFERENCE_CLASS, 'Suppressed' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem.Suppressed',
[], [],
''' Show the suppressed alarms at this scope.
''',
'suppressed',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief-system',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms.Brief' : {
'meta_info' : _MetaInfoClass('Alarms.Brief',
False,
[
_MetaInfoClassMember('brief-card', REFERENCE_CLASS, 'BriefCard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefCard',
[], [],
''' Show brief card scope alarm related data.
''',
'brief_card',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('brief-system', REFERENCE_CLASS, 'BriefSystem' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief.BriefSystem',
[], [],
''' Show brief system scope alarm related data.
''',
'brief_system',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'brief',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
'Alarms' : {
'meta_info' : _MetaInfoClass('Alarms',
False,
[
_MetaInfoClassMember('brief', REFERENCE_CLASS, 'Brief' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Brief',
[], [],
''' A set of brief alarm commands.
''',
'brief',
'Cisco-IOS-XR-alarmgr-server-oper', False),
_MetaInfoClassMember('detail', REFERENCE_CLASS, 'Detail' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper', 'Alarms.Detail',
[], [],
''' A set of detail alarm commands.
''',
'detail',
'Cisco-IOS-XR-alarmgr-server-oper', False),
],
'Cisco-IOS-XR-alarmgr-server-oper',
'alarms',
_yang_ns._namespaces['Cisco-IOS-XR-alarmgr-server-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_alarmgr_server_oper'
),
},
}
_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Active']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.History']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Suppressed']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Clients.ClientInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem.Clients']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Active']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.History']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Suppressed']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Stats']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailSystem.Clients']['meta_info'].parent =_meta_table['Alarms.Detail.DetailSystem']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo.Tca']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo.Otn']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients.ClientInfo']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Active']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.History']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Suppressed']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Stats']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation.Clients']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations.DetailLocation']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard.DetailLocations']['meta_info']
_meta_table['Alarms.Detail.DetailCard.DetailLocations']['meta_info'].parent =_meta_table['Alarms.Detail.DetailCard']['meta_info']
_meta_table['Alarms.Detail.DetailSystem']['meta_info'].parent =_meta_table['Alarms.Detail']['meta_info']
_meta_table['Alarms.Detail.DetailCard']['meta_info'].parent =_meta_table['Alarms.Detail']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Active']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.History']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation.Suppressed']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations.BriefLocation']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard.BriefLocations']['meta_info']
_meta_table['Alarms.Brief.BriefCard.BriefLocations']['meta_info'].parent =_meta_table['Alarms.Brief.BriefCard']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Active.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem.Active']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.History.AlarmInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem.History']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Suppressed.SuppressedInfo']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem.Suppressed']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Active']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.History']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem']['meta_info']
_meta_table['Alarms.Brief.BriefSystem.Suppressed']['meta_info'].parent =_meta_table['Alarms.Brief.BriefSystem']['meta_info']
_meta_table['Alarms.Brief.BriefCard']['meta_info'].parent =_meta_table['Alarms.Brief']['meta_info']
_meta_table['Alarms.Brief.BriefSystem']['meta_info'].parent =_meta_table['Alarms.Brief']['meta_info']
_meta_table['Alarms.Detail']['meta_info'].parent =_meta_table['Alarms']['meta_info']
_meta_table['Alarms.Brief']['meta_info'].parent =_meta_table['Alarms']['meta_info']
| 52.28475
| 232
| 0.521735
| 11,242
| 131,653
| 5.884896
| 0.023039
| 0.093594
| 0.116993
| 0.153405
| 0.97181
| 0.968923
| 0.964373
| 0.950618
| 0.932223
| 0.907131
| 0
| 0.018193
| 0.343691
| 131,653
| 2,517
| 233
| 52.305522
| 0.74748
| 0
| 0
| 0.683493
| 0
| 0.00091
| 0.437456
| 0.322974
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003638
| 0
| 0.003638
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
75515f557f7b57d9c80729b19cdefe22380b56fa
| 37,908
|
py
|
Python
|
egret/model_library/transmission/bus.py
|
breldridge/Egret
|
8672c974d1fc7b6ce72a4f457eae5682666575e4
|
[
"BSD-3-Clause"
] | null | null | null |
egret/model_library/transmission/bus.py
|
breldridge/Egret
|
8672c974d1fc7b6ce72a4f457eae5682666575e4
|
[
"BSD-3-Clause"
] | null | null | null |
egret/model_library/transmission/bus.py
|
breldridge/Egret
|
8672c974d1fc7b6ce72a4f457eae5682666575e4
|
[
"BSD-3-Clause"
] | null | null | null |
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
This module contains the declarations for the modeling components
typically used for buses (including loads and shunts)
"""
import pyomo.environ as pe
import egret.model_library.decl as decl
from pyomo.core.util import quicksum
from pyomo.core.expr.numeric_expr import LinearExpression
from egret.model_library.defn import FlowType, CoordinateType, ApproximationType
from math import tan, radians
def declare_var_vr(model, index_set, **kwargs):
"""
Create variable for the real component of the voltage at a bus
"""
decl.declare_var('vr', model=model, index_set=index_set, **kwargs)
def declare_var_vj(model, index_set, **kwargs):
"""
Create variable for the imaginary component of the voltage at a bus
"""
decl.declare_var('vj', model=model, index_set=index_set, **kwargs)
def declare_var_vm(model, index_set, **kwargs):
"""
Create variable for the voltage magnitude of the voltage at a bus
"""
decl.declare_var('vm', model=model, index_set=index_set, **kwargs)
def declare_var_va(model, index_set, **kwargs):
"""
Create variable for the phase angle of the voltage at a bus
"""
decl.declare_var('va', model=model, index_set=index_set, **kwargs)
def declare_expr_vmsq(model, index_set, coordinate_type=CoordinateType.POLAR):
"""
Create an expression for the voltage magnitude squared at a bus
"""
m = model
expr_set = decl.declare_set('_expr_vmsq', model, index_set)
m.vmsq = pe.Expression(expr_set)
if coordinate_type == CoordinateType.RECTANGULAR:
for bus in expr_set:
m.vmsq[bus] = m.vr[bus] ** 2 + m.vj[bus] ** 2
elif coordinate_type == CoordinateType.POLAR:
for bus in expr_set:
m.vmsq[bus] = m.vm[bus] ** 2
def declare_var_vmsq(model, index_set, **kwargs):
"""
Create auxiliary variable for the voltage magnitude squared at a bus
"""
decl.declare_var('vmsq', model=model, index_set=index_set, **kwargs)
def declare_eq_vmsq(model, index_set, coordinate_type=CoordinateType.POLAR):
"""
Create a constraint relating vmsq to the voltages
"""
m = model
con_set = decl.declare_set('_con_eq_vmsq', model, index_set)
m.eq_vmsq = pe.Constraint(con_set)
if coordinate_type == CoordinateType.POLAR:
for bus in con_set:
m.eq_vmsq[bus] = m.vmsq[bus] == m.vm[bus] ** 2
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus in con_set:
m.eq_vmsq[bus] = m.vmsq[bus] == m.vr[bus]**2 + m.vj[bus]**2
else:
raise ValueError('unexpected coordinate_type: {0}'.format(str(coordinate_type)))
def declare_var_ir_aggregation_at_bus(model, index_set, **kwargs):
"""
Create a variable for the aggregated real current at a bus
"""
decl.declare_var('ir_aggregation_at_bus', model=model, index_set=index_set, **kwargs)
def declare_var_ij_aggregation_at_bus(model, index_set, **kwargs):
"""
Create a variable for the aggregated imaginary current at a bus
"""
decl.declare_var('ij_aggregation_at_bus', model=model, index_set=index_set, **kwargs)
def declare_var_pl(model, index_set, **kwargs):
"""
Create variable for the real power load at a bus
"""
decl.declare_var('pl', model=model, index_set=index_set, **kwargs)
def declare_var_ql(model, index_set, **kwargs):
"""
Create variable for the reactive power load at a bus
"""
decl.declare_var('ql', model=model, index_set=index_set, **kwargs)
def declare_var_p_nw(model, index_set, **kwargs):
"""
Create variable for the net real power withdrawals at a bus
"""
decl.declare_var('p_nw', model=model, index_set=index_set, **kwargs)
def declare_var_q_nw(model, index_set, **kwargs):
"""
Create variable for the net reactive power withdrawals at a bus
"""
decl.declare_var('q_nw', model=model, index_set=index_set, **kwargs)
def declare_expr_shunt_power_at_bus(model, index_set, shunt_attrs,
coordinate_type=CoordinateType.POLAR):
"""
Create the expression for the shunt power at the bus
"""
m = model
expr_set = decl.declare_set('_expr_shunt_at_bus_set', model, index_set)
m.shunt_p = pe.Expression(expr_set, initialize=0.0)
m.shunt_q = pe.Expression(expr_set, initialize=0.0)
if coordinate_type == CoordinateType.POLAR:
for bus_name in expr_set:
if bus_name in shunt_attrs['bus']:
vmsq = m.vm[bus_name]**2
m.shunt_p[bus_name] = shunt_attrs['gs'][bus_name]*vmsq
m.shunt_q[bus_name] = -shunt_attrs['bs'][bus_name]*vmsq
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus_name in expr_set:
if bus_name in shunt_attrs['bus']:
vmsq = m.vr[bus_name]**2 + m.vj[bus_name]**2
m.shunt_p[bus_name] = shunt_attrs['gs'][bus_name]*vmsq
m.shunt_q[bus_name] = -shunt_attrs['bs'][bus_name]*vmsq
def _get_dc_dicts(dc_inlet_branches_by_bus, dc_outlet_branches_by_bus, con_set):
if dc_inlet_branches_by_bus is None:
assert dc_outlet_branches_by_bus is None
dc_inlet_branches_by_bus = {bn:() for bn in con_set}
if dc_outlet_branches_by_bus is None:
dc_outlet_branches_by_bus = dc_inlet_branches_by_bus
return dc_inlet_branches_by_bus, dc_outlet_branches_by_bus
def declare_expr_p_net_withdraw_at_bus(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts,
dc_inlet_branches_by_bus=None, dc_outlet_branches_by_bus=None,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo expression for bus net withdraw
"""
m = model
decl.declare_expr('p_nw', model, index_set)
dc_inlet_branches_by_bus, dc_outlet_branches_by_bus = _get_dc_dicts(dc_inlet_branches_by_bus,
dc_outlet_branches_by_bus,
index_set)
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b]
+ ( m.pl[b] if bus_p_loads[b] != 0.0 else 0.0 )
- sum( m.pg[g] for g in gens_by_bus[b] )
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
def declare_eq_p_net_withdraw_at_bus(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts,
dc_inlet_branches_by_bus=None, dc_outlet_branches_by_bus=None,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo constraint for bus net withdraw
"""
m = model
con_set = decl.declare_set('_con_eq_p_net_withdraw_at_bus', model, index_set)
dc_inlet_branches_by_bus, dc_outlet_branches_by_bus = _get_dc_dicts(dc_inlet_branches_by_bus,
dc_outlet_branches_by_bus,
index_set)
m.eq_p_net_withdraw_at_bus = pe.Constraint(con_set)
constr = m.eq_p_net_withdraw_at_bus
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
else:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b]
+ ( m.pl[b] if bus_p_loads[b] != 0.0 else 0.0 )
- sum( m.pg[g] for g in gens_by_bus[b] )
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
def declare_expr_q_net_withdraw_at_bus(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo expression for bus net withdraw
"""
m = model
decl.declare_expr('q_nw', model, index_set)
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b]
+ ( m.ql[b] if bus_q_loads[b] != 0.0 else 0.0 )
- sum( m.qg[g] for g in gens_by_bus[b] )
)
def declare_eq_q_net_withdraw_at_bus(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo constraint for bus net withdraw
"""
m = model
con_set = decl.declare_set('_con_eq_q_net_withdraw_at_bus', model, index_set)
m.eq_q_net_withdraw_at_bus = pe.Constraint(con_set)
constr = m.eq_q_net_withdraw_at_bus
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b]
+ ( m.ql[b] if bus_q_loads[b] != 0.0 else 0.0 )
- sum( m.qg[g] for g in gens_by_bus[b] )
)
def declare_eq_ref_bus_nonzero(model, ref_angle, ref_bus):
"""
Create an equality constraint to enforce tan(\theta) = vj/vr at the reference bus
"""
m = model
m.eq_ref_bus_nonzero = pe.Constraint(expr = tan(radians(ref_angle)) * m.vr[ref_bus] == m.vj[ref_bus])
def declare_eq_i_aggregation_at_bus(model, index_set,
bus_bs_fixed_shunts, bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus):
"""
Create the equality constraints for the aggregated real and imaginary
currents at the bus
"""
m = model
con_set = decl.declare_set('_con_eq_i_aggregation_at_bus_set', model, index_set)
m.eq_ir_aggregation_at_bus = pe.Constraint(con_set)
m.eq_ij_aggregation_at_bus = pe.Constraint(con_set)
for bus_name in con_set:
ir_expr = sum([m.ifr[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
ir_expr += sum([m.itr[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
ij_expr = sum([m.ifj[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
ij_expr += sum([m.itj[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_bs_fixed_shunts[bus_name] != 0.0:
ir_expr -= bus_bs_fixed_shunts[bus_name] * m.vj[bus_name]
ij_expr += bus_bs_fixed_shunts[bus_name] * m.vr[bus_name]
if bus_gs_fixed_shunts[bus_name] != 0.0:
ir_expr += bus_gs_fixed_shunts[bus_name] * m.vr[bus_name]
ij_expr += bus_gs_fixed_shunts[bus_name] * m.vj[bus_name]
ir_expr -= m.ir_aggregation_at_bus[bus_name]
ij_expr -= m.ij_aggregation_at_bus[bus_name]
m.eq_ir_aggregation_at_bus[bus_name] = ir_expr == 0
m.eq_ij_aggregation_at_bus[bus_name] = ij_expr == 0
def declare_eq_p_balance_ed(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
p_expr = sum(m.pg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
p_expr -= sum(m.pl[bus_name] for bus_name in index_set if bus_p_loads[bus_name] is not None)
p_expr -= sum(bus_gs_fixed_shunts[bus_name] for bus_name in index_set if bus_gs_fixed_shunts[bus_name] != 0.0)
relaxed_balance = False
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)
if idx == 'include_losses':
p_expr -= sum(m.pfl[branch_name] for branch_name in val)
if idx == 'relax_balance':
relaxed_balance = True
if relaxed_balance:
m.eq_p_balance = pe.Constraint(expr=p_expr >= 0.0)
else:
m.eq_p_balance = pe.Constraint(expr=p_expr == 0.0)
def declare_eq_p_balance_lopf(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, vm_by_bus, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
p_expr = sum(m.pg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
p_expr -= sum(m.pl[bus_name] for bus_name in index_set if bus_p_loads[bus_name] is not None)
relaxed_balance = False
if rhs_kwargs:
for idx,val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)
if idx == 'include_branch_losses':
pass # branch losses are added to the constraint after updating pfl constraints
if idx == 'include_system_losses':
p_expr -= m.ploss
if idx == 'relax_balance':
relaxed_balance = True
if idx == 'linearize_shunts':
if val == True:
p_expr -= sum( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2) \
for b in index_set if bus_gs_fixed_shunts[b] != 0.0)
elif val == False:
p_expr -= sum( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2 \
for b in index_set if bus_gs_fixed_shunts[b] != 0.0)
else:
raise Exception('linearize_shunts option is invalid.')
if relaxed_balance:
m.eq_p_balance = pe.Constraint(expr = p_expr >= 0.0)
else:
m.eq_p_balance = pe.Constraint(expr = p_expr == 0.0)
def declare_eq_q_balance_lopf(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts, vm_by_bus, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
q_expr = sum(m.qg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
q_expr -= sum(m.ql[bus_name] for bus_name in index_set if bus_q_loads[bus_name] is not None)
relaxed_balance = False
if rhs_kwargs:
for idx,val in rhs_kwargs.items():
if idx == 'include_reactive_load_shed':
q_expr += eval("m." + val)
if idx == 'include_reactive_over_generation':
q_expr -= eval("m." + val)
if idx == 'include_branch_losses':
pass # branch losses are added to the constraint after updating qfl constraints
if idx == 'include_system_losses':
q_expr -= m.qloss
if idx == 'relax_balance':
relaxed_balance = True
if idx == 'linearize_shunts':
if val == True:
q_expr -= sum( bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2) \
for b in index_set if bus_bs_fixed_shunts[b] != 0.0)
elif val == False:
q_expr -= sum( bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2 \
for b in index_set if bus_bs_fixed_shunts[b] != 0.0)
else:
raise Exception('linearize_shunts option is invalid.')
if relaxed_balance:
m.eq_q_balance = pe.Constraint(expr = q_expr >= 0.0)
else:
m.eq_q_balance = pe.Constraint(expr = q_expr == 0.0)
def declare_eq_ploss_sum_of_pfl(model, index_set):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m=model
ploss_is_var = isinstance(m.ploss, pe.Var)
if ploss_is_var:
m.eq_ploss = pe.Constraint()
else:
if not isinstance(m.ploss, pe.Expression):
raise Exception("Unrecognized type for m.ploss", m.ploss.pprint())
expr = sum(m.pfl[bn] for bn in index_set)
if ploss_is_var:
m.eq_ploss = m.ploss == expr
else:
m.ploss = expr
def declare_eq_qloss_sum_of_qfl(model, index_set):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m=model
qloss_is_var = isinstance(m.qloss, pe.Var)
if qloss_is_var:
m.eq_qloss = pe.Constraint()
else:
if not isinstance(m.qloss, pe.Expression):
raise Exception("Unrecognized type for m.qloss", m.qloss.pprint())
expr = sum(m.qfl[bn] for bn in index_set)
if qloss_is_var:
m.eq_qloss = m.qloss == expr
else:
m.qloss = expr
def declare_eq_ploss_ptdf_approx(model, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None, use_residuals=False):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m = model
ploss_is_var = isinstance(m.ploss, pe.Var)
if ploss_is_var:
m.eq_ploss = pe.Constraint()
else:
if not isinstance(m.ploss, pe.Expression):
raise Exception("Unrecognized type for m.ploss", m.ploss.pprint())
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
expr = get_ploss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol, use_residuals=use_residuals)
if ploss_is_var:
m.eq_ploss = m.ploss == expr
else:
m.ploss = expr
def get_ploss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=None, rel_ptdf_tol=None, use_residuals=False):
if not use_residuals:
const = PTDF.get_lossoffset()
iterator = PTDF.get_lossfactor_iterator()
else:
const = PTDF.get_lossoffset_resid()
iterator = PTDF.get_lossfactor_resid_iterator()
max_coef = PTDF.get_lossfactor_abs_max()
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
m_p_nw = m.p_nw
## if model.p_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bus_name, coef in iterator:
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_p_nw[bus_name])
if use_residuals:
for i in m._idx_monitored:
bn = PTDF.branches_keys_masked[i]
coef_list.append(1)
var_list.append(m.pfl[bn])
if isinstance(m_p_nw, pe.Var):
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_qloss_ptdf_approx(model, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None, use_residuals=False):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m = model
qloss_is_var = isinstance(m.qloss, pe.Var)
if qloss_is_var:
m.eq_qloss = pe.Constraint()
else:
if not isinstance(m.qloss, pe.Expression):
raise Exception("Unrecognized type for m.qloss", m.qloss.pprint())
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
expr = get_qloss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol, use_residuals=use_residuals)
if qloss_is_var:
m.eq_qloss = m.qloss == expr
else:
m.qloss = expr
def get_qloss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=None, rel_ptdf_tol=None, use_residuals=False):
if not use_residuals:
const = PTDF.get_qlossoffset()
iterator = PTDF.get_qlossfactor_iterator()
else:
const = PTDF.get_qlossoffset_resid()
iterator = PTDF.get_qlossfactor_resid_iterator()
max_coef = PTDF.get_qlossfactor_abs_max()
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
m_q_nw = m.q_nw
## if model.q_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bus_name, coef in iterator:
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_q_nw[bus_name])
if use_residuals:
for i in m._idx_monitored:
bn = PTDF.branches_keys[i]
coef_list.append(1)
var_list.append(m.qfl[bn])
if isinstance(m_q_nw, pe.Var):
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_bus_vm_approx(model, index_set, PTDF=None, rel_ptdf_tol=None, abs_ptdf_tol=None):
"""
Create the equality constraints or expressions for voltage magnitude (from PTDF
approximation) at the bus
"""
m = model
con_set = decl.declare_set("_con_eq_bus_vm_approx_set", model, index_set)
vm_is_var = isinstance(m.vm, pe.Var)
if vm_is_var:
m.eq_vm_bus = pe.Constraint(con_set)
else:
if not isinstance(m.vm, pe.Expression):
raise Exception("Unrecognized type for m.vm", m.vm.pprint())
if PTDF is None:
return
for bus_name in con_set:
expr = \
get_vm_expr_ptdf_approx(m, bus_name, PTDF, rel_ptdf_tol=rel_ptdf_tol, abs_ptdf_tol=abs_ptdf_tol)
if vm_is_var:
m.eq_vm_bus[bus_name] = \
m.vm[bus_name] == expr
else:
m.vm[bus_name] = expr
def get_vm_expr_ptdf_approx(model, bus_name, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None):
"""
Create a pyomo reactive power flow expression from PTDF matrix
"""
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
const = PTDF.get_bus_vdf_const(bus_name)
max_coef = PTDF.get_bus_vdf_abs_max(bus_name)
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
## NOTE: It would be easy to hold on to the 'ptdf' dictionary here, if we wanted to
m_q_nw = model.q_nw
qnw_is_var = isinstance(m_q_nw, pe.Var)
## if model.q_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bn, coef in PTDF.get_bus_vdf_iterator(bus_name):
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_q_nw[bn])
elif qnw_is_var:
const += coef * m_q_nw[bn].value
else:
const += coef * m_q_nw[bn].expr()
if qnw_is_var:
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_p_balance_dc_approx(model, index_set,
bus_p_loads,
gens_by_bus,
bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
approximation_type=ApproximationType.BTHETA,
dc_inlet_branches_by_bus=None,
dc_outlet_branches_by_bus=None,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
if approximation_type == ApproximationType.BTHETA:
p_expr = -sum(m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr += sum(m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
elif approximation_type == ApproximationType.BTHETA_LOSSES:
p_expr = -0.5*sum(m.pfl[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
p_expr -= 0.5*sum(m.pfl[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr -= sum(m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr += sum(m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
if dc_inlet_branches_by_bus is not None:
p_expr -= sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[bus_name])
p_expr += sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[bus_name])
if bus_gs_fixed_shunts[bus_name] != 0.0:
p_expr -= bus_gs_fixed_shunts[bus_name]
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
k = bus_name
for idx, val in rhs_kwargs.items():
if isinstance(val, tuple):
val,key = val
k = (key,bus_name)
if not k in eval("m." + val).index_set():
continue
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[k]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[k]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_p_balance(model, index_set,
bus_p_loads,
gens_by_bus,
bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
p_expr = -sum([m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
p_expr -= sum([m.pt[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_gs_fixed_shunts[bus_name] != 0.0:
vmsq = m.vmsq[bus_name]
p_expr -= bus_gs_fixed_shunts[bus_name] * vmsq
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_p_balance_with_i_aggregation(model, index_set,
bus_p_loads,
gens_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
p_expr = -m.vr[bus_name] * m.ir_aggregation_at_bus[bus_name] + \
-m.vj[bus_name] * m.ij_aggregation_at_bus[bus_name]
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_q_balance(model, index_set,
bus_q_loads,
gens_by_bus,
bus_bs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the reactive power balance
at a bus using the variables for reactive power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_q_balance', model, index_set)
m.eq_q_balance = pe.Constraint(con_set)
for bus_name in con_set:
q_expr = -sum([m.qf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
q_expr -= sum([m.qt[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_bs_fixed_shunts[bus_name] != 0.0:
vmsq = m.vmsq[bus_name]
q_expr += bus_bs_fixed_shunts[bus_name] * vmsq
if bus_q_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
q_expr -= m.ql[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
q_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
q_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
q_expr += m.qg[gen_name]
m.eq_q_balance[bus_name] = \
q_expr == 0.0
def declare_eq_q_balance_with_i_aggregation(model, index_set,
bus_q_loads,
gens_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the reactive power balance
at a bus using the variables for reactive power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_q_balance', model, index_set)
m.eq_q_balance = pe.Constraint(con_set)
for bus_name in con_set:
q_expr = m.vr[bus_name] * m.ij_aggregation_at_bus[bus_name] + \
-m.vj[bus_name] * m.ir_aggregation_at_bus[bus_name]
if bus_q_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
q_expr -= m.ql[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
q_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
q_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
q_expr += m.qg[gen_name]
m.eq_q_balance[bus_name] = \
q_expr == 0.0
def declare_ineq_vm_bus_lbub(model, index_set, buses, coordinate_type=CoordinateType.POLAR):
"""
Create the inequalities for the voltage magnitudes from the
voltage variables
"""
m = model
con_set = decl.declare_set('_con_ineq_vm_bus_lbub',
model=model, index_set=index_set)
m.ineq_vm_bus_lb = pe.Constraint(con_set)
m.ineq_vm_bus_ub = pe.Constraint(con_set)
if coordinate_type == CoordinateType.POLAR:
for bus_name in con_set:
m.ineq_vm_bus_lb[bus_name] = \
buses[bus_name]['v_min'] <= m.vm[bus_name]
m.ineq_vm_bus_ub[bus_name] = \
m.vm[bus_name] <= buses[bus_name]['v_max']
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus_name in con_set:
m.ineq_vm_bus_lb[bus_name] = \
buses[bus_name]['v_min']**2 <= m.vr[bus_name]**2 + m.vj[bus_name]**2
m.ineq_vm_bus_ub[bus_name] = \
m.vr[bus_name]**2 + m.vj[bus_name]**2 <= buses[bus_name]['v_max']**2
| 40.630225
| 129
| 0.605387
| 5,597
| 37,908
| 3.756119
| 0.054315
| 0.047947
| 0.037721
| 0.026542
| 0.88798
| 0.866337
| 0.836322
| 0.80783
| 0.762165
| 0.716739
| 0
| 0.006149
| 0.300754
| 37,908
| 932
| 130
| 40.67382
| 0.786962
| 0.130817
| 0
| 0.62963
| 0
| 0
| 0.044944
| 0.024436
| 0
| 0
| 0
| 0
| 0.00161
| 1
| 0.061192
| false
| 0.003221
| 0.009662
| 0
| 0.091787
| 0.008052
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.