body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
f8104243f65d723fcb2f5decaa35d09297703269cfd4422ba3dc5021c7c41d92
def get_class_labels(y): "\n Get the class labels\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: sorted unique class labels\n " return numpy.unique(y)
Get the class labels :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...] :return: sorted unique class labels
kutilities/helpers/data_preparation.py
get_class_labels
grenwi/keras-utilities
30
python
def get_class_labels(y): "\n Get the class labels\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: sorted unique class labels\n " return numpy.unique(y)
def get_class_labels(y): "\n Get the class labels\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: sorted unique class labels\n " return numpy.unique(y)<|docstring|>Get the class labels :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...] :return: sorted unique class labels<|endoftext|>
75ce73e1d9c43b8c1b27689e0b659e0a30455ccdd02963bedfb556b955fa11cb
def labels_to_categories(y): "\n Labels to categories\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: list of categories, ex. [0, 2, 1, 2, 0, ...]\n " encoder = LabelEncoder() encoder.fit(y) y_num = encoder.transform(y) return y_num
Labels to categories :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...] :return: list of categories, ex. [0, 2, 1, 2, 0, ...]
kutilities/helpers/data_preparation.py
labels_to_categories
grenwi/keras-utilities
30
python
def labels_to_categories(y): "\n Labels to categories\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: list of categories, ex. [0, 2, 1, 2, 0, ...]\n " encoder = LabelEncoder() encoder.fit(y) y_num = encoder.transform(y) return y_num
def labels_to_categories(y): "\n Labels to categories\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: list of categories, ex. [0, 2, 1, 2, 0, ...]\n " encoder = LabelEncoder() encoder.fit(y) y_num = encoder.transform(y) return y_num<|docstring|>Labels to categories :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...] :return: list of categories, ex. [0, 2, 1, 2, 0, ...]<|endoftext|>
05e3a1184fd7a3e5e55655625e949620780af273b1a8892b3a39d397a5725ec5
def get_labels_to_categories_map(y): "\n Get the mapping of class labels to numerical categories\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: dictionary with the mapping\n " labels = get_class_labels(y) return {l: i for (i, l) in enumerate(labels)}
Get the mapping of class labels to numerical categories :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...] :return: dictionary with the mapping
kutilities/helpers/data_preparation.py
get_labels_to_categories_map
grenwi/keras-utilities
30
python
def get_labels_to_categories_map(y): "\n Get the mapping of class labels to numerical categories\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: dictionary with the mapping\n " labels = get_class_labels(y) return {l: i for (i, l) in enumerate(labels)}
def get_labels_to_categories_map(y): "\n Get the mapping of class labels to numerical categories\n :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...]\n :return: dictionary with the mapping\n " labels = get_class_labels(y) return {l: i for (i, l) in enumerate(labels)}<|docstring|>Get the mapping of class labels to numerical categories :param y: list of labels, ex. ['positive', 'negative', 'positive', 'neutral', 'positive', ...] :return: dictionary with the mapping<|endoftext|>
5937d27aa7ec3c75cccec58e1b0edcb34352787794787c7d9f66b1b0458b5a3f
def categories_to_onehot(y): '\n Transform categorical labels to one-hot vectors\n :param y: list of categories, ex. [0, 2, 1, 2, 0, ...]\n :return: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]\n ' return np_utils.to_categorical(y)
Transform categorical labels to one-hot vectors :param y: list of categories, ex. [0, 2, 1, 2, 0, ...] :return: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]
kutilities/helpers/data_preparation.py
categories_to_onehot
grenwi/keras-utilities
30
python
def categories_to_onehot(y): '\n Transform categorical labels to one-hot vectors\n :param y: list of categories, ex. [0, 2, 1, 2, 0, ...]\n :return: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]\n ' return np_utils.to_categorical(y)
def categories_to_onehot(y): '\n Transform categorical labels to one-hot vectors\n :param y: list of categories, ex. [0, 2, 1, 2, 0, ...]\n :return: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]\n ' return np_utils.to_categorical(y)<|docstring|>Transform categorical labels to one-hot vectors :param y: list of categories, ex. [0, 2, 1, 2, 0, ...] :return: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]<|endoftext|>
30c562e35acf39b02c781a45196e811b08df8e2acb5e43aa48129b06b583ae62
def onehot_to_categories(y): '\n Transform categorical labels to one-hot vectors\n :param y: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]\n :return: list of categories, ex. [0, 2, 1, 2, 0, ...]\n ' return numpy.asarray(y).argmax(axis=(- 1))
Transform categorical labels to one-hot vectors :param y: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...] :return: list of categories, ex. [0, 2, 1, 2, 0, ...]
kutilities/helpers/data_preparation.py
onehot_to_categories
grenwi/keras-utilities
30
python
def onehot_to_categories(y): '\n Transform categorical labels to one-hot vectors\n :param y: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]\n :return: list of categories, ex. [0, 2, 1, 2, 0, ...]\n ' return numpy.asarray(y).argmax(axis=(- 1))
def onehot_to_categories(y): '\n Transform categorical labels to one-hot vectors\n :param y: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]\n :return: list of categories, ex. [0, 2, 1, 2, 0, ...]\n ' return numpy.asarray(y).argmax(axis=(- 1))<|docstring|>Transform categorical labels to one-hot vectors :param y: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...] :return: list of categories, ex. [0, 2, 1, 2, 0, ...]<|endoftext|>
f33c706c1688440c8e4224ab4a24d314cea4c8530262161a61bbf45dfab14725
def get_class_weights(y): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' weights = compute_class_weight('balanced', numpy.unique(y), y) d = {c: w for (c, w) in zip(numpy.unique(y), weights)} return d
Returns the normalized weights for each class based on the frequencies of the samples :param y: list of true labels (the labels must be hashable) :return: dictionary with the weight for each class
kutilities/helpers/data_preparation.py
get_class_weights
grenwi/keras-utilities
30
python
def get_class_weights(y): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' weights = compute_class_weight('balanced', numpy.unique(y), y) d = {c: w for (c, w) in zip(numpy.unique(y), weights)} return d
def get_class_weights(y): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' weights = compute_class_weight('balanced', numpy.unique(y), y) d = {c: w for (c, w) in zip(numpy.unique(y), weights)} return d<|docstring|>Returns the normalized weights for each class based on the frequencies of the samples :param y: list of true labels (the labels must be hashable) :return: dictionary with the weight for each class<|endoftext|>
ef421ae4277d08dd0c8dfc5e21ef03c7b4bcbd6482dc431b4f23d8314837637a
def get_class_weights2(y, smooth_factor=0): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param smooth_factor: factor that smooths extremely uneven weights\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' counter = Counter(y) if (smooth_factor > 0): p = (max(counter.values()) * smooth_factor) for k in counter.keys(): counter[k] += p majority = max(counter.values()) return {cls: float((majority / count)) for (cls, count) in counter.items()}
Returns the normalized weights for each class based on the frequencies of the samples :param smooth_factor: factor that smooths extremely uneven weights :param y: list of true labels (the labels must be hashable) :return: dictionary with the weight for each class
kutilities/helpers/data_preparation.py
get_class_weights2
grenwi/keras-utilities
30
python
def get_class_weights2(y, smooth_factor=0): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param smooth_factor: factor that smooths extremely uneven weights\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' counter = Counter(y) if (smooth_factor > 0): p = (max(counter.values()) * smooth_factor) for k in counter.keys(): counter[k] += p majority = max(counter.values()) return {cls: float((majority / count)) for (cls, count) in counter.items()}
def get_class_weights2(y, smooth_factor=0): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param smooth_factor: factor that smooths extremely uneven weights\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' counter = Counter(y) if (smooth_factor > 0): p = (max(counter.values()) * smooth_factor) for k in counter.keys(): counter[k] += p majority = max(counter.values()) return {cls: float((majority / count)) for (cls, count) in counter.items()}<|docstring|>Returns the normalized weights for each class based on the frequencies of the samples :param smooth_factor: factor that smooths extremely uneven weights :param y: list of true labels (the labels must be hashable) :return: dictionary with the weight for each class<|endoftext|>
84c4146f78f0e265d407d382d7371f8892fde5aba488426433995df254c12131
def print_dataset_statistics(y): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' counter = Counter(y) print('Total:', len(y)) statistics = {c: (str(counter[c]) + (' (%.2f%%)' % ((counter[c] / float(len(y))) * 100.0))) for c in sorted(counter.keys())} print(statistics)
Returns the normalized weights for each class based on the frequencies of the samples :param y: list of true labels (the labels must be hashable) :return: dictionary with the weight for each class
kutilities/helpers/data_preparation.py
print_dataset_statistics
grenwi/keras-utilities
30
python
def print_dataset_statistics(y): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' counter = Counter(y) print('Total:', len(y)) statistics = {c: (str(counter[c]) + (' (%.2f%%)' % ((counter[c] / float(len(y))) * 100.0))) for c in sorted(counter.keys())} print(statistics)
def print_dataset_statistics(y): '\n Returns the normalized weights for each class based on the frequencies of the samples\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n ' counter = Counter(y) print('Total:', len(y)) statistics = {c: (str(counter[c]) + (' (%.2f%%)' % ((counter[c] / float(len(y))) * 100.0))) for c in sorted(counter.keys())} print(statistics)<|docstring|>Returns the normalized weights for each class based on the frequencies of the samples :param y: list of true labels (the labels must be hashable) :return: dictionary with the weight for each class<|endoftext|>
674b83d89eac0a677b273b0aec73ca335e660179c32dd8ee6644a4640fa5b40b
def hydrogen_chloride_force_field(library_charge: bool, charge_increment: bool) -> ForceField: 'Returns a SMIRNOFF force field which is able to parameterize hydrogen chloride.' force_field = ForceField() vdw_handler = vdWHandler(version=0.3) vdw_handler.method = 'cutoff' vdw_handler.cutoff = (6.0 * simtk_unit.angstrom) vdw_handler.scale14 = 1.0 vdw_handler.add_parameter({'smirks': '[#1:1]', 'epsilon': (0.0 * simtk_unit.kilojoules_per_mole), 'sigma': (1.0 * simtk_unit.angstrom)}) vdw_handler.add_parameter({'smirks': '[#17:1]', 'epsilon': (2.0 * simtk_unit.kilojoules_per_mole), 'sigma': (2.0 * simtk_unit.angstrom)}) force_field.register_parameter_handler(vdw_handler) electrostatics_handler = ElectrostaticsHandler(version=0.3) electrostatics_handler.cutoff = (6.0 * simtk_unit.angstrom) electrostatics_handler.method = 'PME' force_field.register_parameter_handler(electrostatics_handler) if library_charge: library_charge_handler = LibraryChargeHandler(version=0.3) library_charge_handler.add_parameter(parameter_kwargs={'smirks': '[#1:1]', 'charge1': (1.0 * simtk_unit.elementary_charge)}) library_charge_handler.add_parameter(parameter_kwargs={'smirks': '[#17:1]', 'charge1': ((- 1.0) * simtk_unit.elementary_charge)}) force_field.register_parameter_handler(library_charge_handler) if charge_increment: charge_increment_handler = ChargeIncrementModelHandler(version=0.3) charge_increment_handler.add_parameter(parameter_kwargs={'smirks': '[#1:1]-[#17:2]', 'charge_increment1': ((- 1.0) * simtk_unit.elementary_charge), 'charge_increment2': (1.0 * simtk_unit.elementary_charge)}) force_field.register_parameter_handler(charge_increment_handler) return force_field
Returns a SMIRNOFF force field which is able to parameterize hydrogen chloride.
openff/evaluator/tests/test_utils/test_openmm.py
hydrogen_chloride_force_field
pavankum/openff-evaluator
0
python
def hydrogen_chloride_force_field(library_charge: bool, charge_increment: bool) -> ForceField: force_field = ForceField() vdw_handler = vdWHandler(version=0.3) vdw_handler.method = 'cutoff' vdw_handler.cutoff = (6.0 * simtk_unit.angstrom) vdw_handler.scale14 = 1.0 vdw_handler.add_parameter({'smirks': '[#1:1]', 'epsilon': (0.0 * simtk_unit.kilojoules_per_mole), 'sigma': (1.0 * simtk_unit.angstrom)}) vdw_handler.add_parameter({'smirks': '[#17:1]', 'epsilon': (2.0 * simtk_unit.kilojoules_per_mole), 'sigma': (2.0 * simtk_unit.angstrom)}) force_field.register_parameter_handler(vdw_handler) electrostatics_handler = ElectrostaticsHandler(version=0.3) electrostatics_handler.cutoff = (6.0 * simtk_unit.angstrom) electrostatics_handler.method = 'PME' force_field.register_parameter_handler(electrostatics_handler) if library_charge: library_charge_handler = LibraryChargeHandler(version=0.3) library_charge_handler.add_parameter(parameter_kwargs={'smirks': '[#1:1]', 'charge1': (1.0 * simtk_unit.elementary_charge)}) library_charge_handler.add_parameter(parameter_kwargs={'smirks': '[#17:1]', 'charge1': ((- 1.0) * simtk_unit.elementary_charge)}) force_field.register_parameter_handler(library_charge_handler) if charge_increment: charge_increment_handler = ChargeIncrementModelHandler(version=0.3) charge_increment_handler.add_parameter(parameter_kwargs={'smirks': '[#1:1]-[#17:2]', 'charge_increment1': ((- 1.0) * simtk_unit.elementary_charge), 'charge_increment2': (1.0 * simtk_unit.elementary_charge)}) force_field.register_parameter_handler(charge_increment_handler) return force_field
def hydrogen_chloride_force_field(library_charge: bool, charge_increment: bool) -> ForceField: force_field = ForceField() vdw_handler = vdWHandler(version=0.3) vdw_handler.method = 'cutoff' vdw_handler.cutoff = (6.0 * simtk_unit.angstrom) vdw_handler.scale14 = 1.0 vdw_handler.add_parameter({'smirks': '[#1:1]', 'epsilon': (0.0 * simtk_unit.kilojoules_per_mole), 'sigma': (1.0 * simtk_unit.angstrom)}) vdw_handler.add_parameter({'smirks': '[#17:1]', 'epsilon': (2.0 * simtk_unit.kilojoules_per_mole), 'sigma': (2.0 * simtk_unit.angstrom)}) force_field.register_parameter_handler(vdw_handler) electrostatics_handler = ElectrostaticsHandler(version=0.3) electrostatics_handler.cutoff = (6.0 * simtk_unit.angstrom) electrostatics_handler.method = 'PME' force_field.register_parameter_handler(electrostatics_handler) if library_charge: library_charge_handler = LibraryChargeHandler(version=0.3) library_charge_handler.add_parameter(parameter_kwargs={'smirks': '[#1:1]', 'charge1': (1.0 * simtk_unit.elementary_charge)}) library_charge_handler.add_parameter(parameter_kwargs={'smirks': '[#17:1]', 'charge1': ((- 1.0) * simtk_unit.elementary_charge)}) force_field.register_parameter_handler(library_charge_handler) if charge_increment: charge_increment_handler = ChargeIncrementModelHandler(version=0.3) charge_increment_handler.add_parameter(parameter_kwargs={'smirks': '[#1:1]-[#17:2]', 'charge_increment1': ((- 1.0) * simtk_unit.elementary_charge), 'charge_increment2': (1.0 * simtk_unit.elementary_charge)}) force_field.register_parameter_handler(charge_increment_handler) return force_field<|docstring|>Returns a SMIRNOFF force field which is able to parameterize hydrogen chloride.<|endoftext|>
387f90fd0e6270d2bf4f5cdedb3c959b75b139ef8b962a5a751f02daaf7be0a1
@pytest.fixture def rootdir(): 'The directory in which to search for testroots.\n\n This is used by any test using the pytest.mark.sphinx decorator. For a\n `testroot` specified in the decorator, the rootdir will be\n "./test_extension/roots/test-<testroot>".\n\n The rootdir must contain a conf.py file. All of the rootdir\'s content will\n be copied to a temporary folder, and the Sphinx builder will be invoked\n inside that folder.\n ' return sphinx_path(str((Path(__file__).with_suffix('') / 'roots')))
The directory in which to search for testroots. This is used by any test using the pytest.mark.sphinx decorator. For a `testroot` specified in the decorator, the rootdir will be "./test_extension/roots/test-<testroot>". The rootdir must contain a conf.py file. All of the rootdir's content will be copied to a temporary folder, and the Sphinx builder will be invoked inside that folder.
tests/test_extension.py
rootdir
klauer/doctr_versions_menu
0
python
@pytest.fixture def rootdir(): 'The directory in which to search for testroots.\n\n This is used by any test using the pytest.mark.sphinx decorator. For a\n `testroot` specified in the decorator, the rootdir will be\n "./test_extension/roots/test-<testroot>".\n\n The rootdir must contain a conf.py file. All of the rootdir\'s content will\n be copied to a temporary folder, and the Sphinx builder will be invoked\n inside that folder.\n ' return sphinx_path(str((Path(__file__).with_suffix() / 'roots')))
@pytest.fixture def rootdir(): 'The directory in which to search for testroots.\n\n This is used by any test using the pytest.mark.sphinx decorator. For a\n `testroot` specified in the decorator, the rootdir will be\n "./test_extension/roots/test-<testroot>".\n\n The rootdir must contain a conf.py file. All of the rootdir\'s content will\n be copied to a temporary folder, and the Sphinx builder will be invoked\n inside that folder.\n ' return sphinx_path(str((Path(__file__).with_suffix() / 'roots')))<|docstring|>The directory in which to search for testroots. This is used by any test using the pytest.mark.sphinx decorator. For a `testroot` specified in the decorator, the rootdir will be "./test_extension/roots/test-<testroot>". The rootdir must contain a conf.py file. All of the rootdir's content will be copied to a temporary folder, and the Sphinx builder will be invoked inside that folder.<|endoftext|>
9d511fc4b5fc54578e37373d99130eca3dfd0701bd464864e3743b5a1773e814
@pytest.mark.sphinx('html', testroot='basic') def test_basic(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests the default configuration in ./test_extension/roots/test-basic/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert ((_build / '_static') / 'badge_only.css').is_file() html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html)
Test building documentation with the doctr_versions_menu extension. This tests the default configuration in ./test_extension/roots/test-basic/
tests/test_extension.py
test_basic
klauer/doctr_versions_menu
0
python
@pytest.mark.sphinx('html', testroot='basic') def test_basic(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests the default configuration in ./test_extension/roots/test-basic/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert ((_build / '_static') / 'badge_only.css').is_file() html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html)
@pytest.mark.sphinx('html', testroot='basic') def test_basic(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests the default configuration in ./test_extension/roots/test-basic/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert ((_build / '_static') / 'badge_only.css').is_file() html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html)<|docstring|>Test building documentation with the doctr_versions_menu extension. This tests the default configuration in ./test_extension/roots/test-basic/<|endoftext|>
be1a7e04455cc8c826061df868ba78115aecf5c762cb7f55f7e8dde6175424fc
@pytest.mark.sphinx('html', testroot='rtdtheme') def test_rtdtheme(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests a configuration using the RTD theme, in\n ./test_extension/roots/test-rtdtheme/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert (not ((_build / '_static') / 'badge_only.css').is_file()) html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html)
Test building documentation with the doctr_versions_menu extension. This tests a configuration using the RTD theme, in ./test_extension/roots/test-rtdtheme/
tests/test_extension.py
test_rtdtheme
klauer/doctr_versions_menu
0
python
@pytest.mark.sphinx('html', testroot='rtdtheme') def test_rtdtheme(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests a configuration using the RTD theme, in\n ./test_extension/roots/test-rtdtheme/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert (not ((_build / '_static') / 'badge_only.css').is_file()) html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html)
@pytest.mark.sphinx('html', testroot='rtdtheme') def test_rtdtheme(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests a configuration using the RTD theme, in\n ./test_extension/roots/test-rtdtheme/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert (not ((_build / '_static') / 'badge_only.css').is_file()) html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html)<|docstring|>Test building documentation with the doctr_versions_menu extension. This tests a configuration using the RTD theme, in ./test_extension/roots/test-rtdtheme/<|endoftext|>
82bfe5c1e0667abe699dae138dbe75519e31dd30259b79616552b224c6ec2e6b
@pytest.mark.sphinx('html', testroot='custom') def test_custom(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests a configuration with full customization (custom template for the\n JS file, and a custom doctr_versions_menu_conf dict in conf.py;\n ./test_extension/roots/test-custom/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert (not ((_build / '_static') / 'badge_only.css').is_file()) html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html) js = ((_build / '_static') / 'doctr-versions-menu.js').read_text() assert ("var my_var = 'custom variable';" in js) assert ('var current_folder = getGhPagesCurrentFolder();' in js) assert ("var github_project_url = 'https://github.com/goerz/doctr_versions_menu';" in js) assert ('var json_file = "/" + window.location.pathname.split("/")[1] + "/versions.json";' in js)
Test building documentation with the doctr_versions_menu extension. This tests a configuration with full customization (custom template for the JS file, and a custom doctr_versions_menu_conf dict in conf.py; ./test_extension/roots/test-custom/
tests/test_extension.py
test_custom
klauer/doctr_versions_menu
0
python
@pytest.mark.sphinx('html', testroot='custom') def test_custom(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests a configuration with full customization (custom template for the\n JS file, and a custom doctr_versions_menu_conf dict in conf.py;\n ./test_extension/roots/test-custom/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert (not ((_build / '_static') / 'badge_only.css').is_file()) html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html) js = ((_build / '_static') / 'doctr-versions-menu.js').read_text() assert ("var my_var = 'custom variable';" in js) assert ('var current_folder = getGhPagesCurrentFolder();' in js) assert ("var github_project_url = 'https://github.com/goerz/doctr_versions_menu';" in js) assert ('var json_file = "/" + window.location.pathname.split("/")[1] + "/versions.json";' in js)
@pytest.mark.sphinx('html', testroot='custom') def test_custom(app, status, warning): 'Test building documentation with the doctr_versions_menu extension.\n\n This tests a configuration with full customization (custom template for the\n JS file, and a custom doctr_versions_menu_conf dict in conf.py;\n ./test_extension/roots/test-custom/\n ' app.build() _build = Path(app.outdir) assert (_build / 'index.html').is_file() assert ((_build / '_static') / 'doctr-versions-menu.js').is_file() assert (not ((_build / '_static') / 'badge_only.css').is_file()) html = (_build / 'index.html').read_text() assert ('src="_static/doctr-versions-menu.js"' in html) js = ((_build / '_static') / 'doctr-versions-menu.js').read_text() assert ("var my_var = 'custom variable';" in js) assert ('var current_folder = getGhPagesCurrentFolder();' in js) assert ("var github_project_url = 'https://github.com/goerz/doctr_versions_menu';" in js) assert ('var json_file = "/" + window.location.pathname.split("/")[1] + "/versions.json";' in js)<|docstring|>Test building documentation with the doctr_versions_menu extension. This tests a configuration with full customization (custom template for the JS file, and a custom doctr_versions_menu_conf dict in conf.py; ./test_extension/roots/test-custom/<|endoftext|>
118fb6644ed69c666e652684c7ce2f7361f7a1db2b3a5bc26445297a59819acc
@pytest.mark.usefixtures('widget_data_common', 'widget_data1') def test_ok(self): '\n 正常系\n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_match_data(3, **{'language': 'JA', 'req_rule_ids': [999], 'count': 3}) assert (len(data['data']) == 4) assert (('[pytest]<br>pytest:aaa' in data['data']) and (data['data']['[pytest]<br>pytest:aaa'][0] == 'known1')) assert (('[pytest]<br>pytest:bbb' in data['data']) and (data['data']['[pytest]<br>pytest:bbb'][0] == 'known2')) assert (('[pytest]<br>pytest:ccc' in data['data']) and (data['data']['[pytest]<br>pytest:ccc'][0] == 'known3')) assert (('その他' in data['data']) and (data['data']['その他'][0] == 'known6'))
正常系
oase-root/tests/web_app/views/top/test_dashboard.py
test_ok
Masa-Yasuno/oase
9
python
@pytest.mark.usefixtures('widget_data_common', 'widget_data1') def test_ok(self): '\n \n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_match_data(3, **{'language': 'JA', 'req_rule_ids': [999], 'count': 3}) assert (len(data['data']) == 4) assert (('[pytest]<br>pytest:aaa' in data['data']) and (data['data']['[pytest]<br>pytest:aaa'][0] == 'known1')) assert (('[pytest]<br>pytest:bbb' in data['data']) and (data['data']['[pytest]<br>pytest:bbb'][0] == 'known2')) assert (('[pytest]<br>pytest:ccc' in data['data']) and (data['data']['[pytest]<br>pytest:ccc'][0] == 'known3')) assert (('その他' in data['data']) and (data['data']['その他'][0] == 'known6'))
@pytest.mark.usefixtures('widget_data_common', 'widget_data1') def test_ok(self): '\n \n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_match_data(3, **{'language': 'JA', 'req_rule_ids': [999], 'count': 3}) assert (len(data['data']) == 4) assert (('[pytest]<br>pytest:aaa' in data['data']) and (data['data']['[pytest]<br>pytest:aaa'][0] == 'known1')) assert (('[pytest]<br>pytest:bbb' in data['data']) and (data['data']['[pytest]<br>pytest:bbb'][0] == 'known2')) assert (('[pytest]<br>pytest:ccc' in data['data']) and (data['data']['[pytest]<br>pytest:ccc'][0] == 'known3')) assert (('その他' in data['data']) and (data['data']['その他'][0] == 'known6'))<|docstring|>正常系<|endoftext|>
34423518e208474ab8edfdac0dcc7d32445a1e2e9524c77b6466429a869d22b2
@pytest.mark.usefixtures('widget_data_common', 'widget_data2') def test_ok(self): '\n 正常系\n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_unmatch_data(3, **{'language': 'JA', 'req_rule_ids': [999], 'count': 3}) assert (len(data['data']) == 4) assert (('[pytest]<br>pytest:aaa' in data['data']) and (data['data']['[pytest]<br>pytest:aaa'][0] == 'unknown1')) assert (('[pytest]<br>pytest:bbb' in data['data']) and (data['data']['[pytest]<br>pytest:bbb'][0] == 'unknown2')) assert (('[pytest]<br>pytest:ccc' in data['data']) and (data['data']['[pytest]<br>pytest:ccc'][0] == 'unknown3')) assert (('その他' in data['data']) and (data['data']['その他'][0] == 'unknown6'))
正常系
oase-root/tests/web_app/views/top/test_dashboard.py
test_ok
Masa-Yasuno/oase
9
python
@pytest.mark.usefixtures('widget_data_common', 'widget_data2') def test_ok(self): '\n \n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_unmatch_data(3, **{'language': 'JA', 'req_rule_ids': [999], 'count': 3}) assert (len(data['data']) == 4) assert (('[pytest]<br>pytest:aaa' in data['data']) and (data['data']['[pytest]<br>pytest:aaa'][0] == 'unknown1')) assert (('[pytest]<br>pytest:bbb' in data['data']) and (data['data']['[pytest]<br>pytest:bbb'][0] == 'unknown2')) assert (('[pytest]<br>pytest:ccc' in data['data']) and (data['data']['[pytest]<br>pytest:ccc'][0] == 'unknown3')) assert (('その他' in data['data']) and (data['data']['その他'][0] == 'unknown6'))
@pytest.mark.usefixtures('widget_data_common', 'widget_data2') def test_ok(self): '\n \n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_unmatch_data(3, **{'language': 'JA', 'req_rule_ids': [999], 'count': 3}) assert (len(data['data']) == 4) assert (('[pytest]<br>pytest:aaa' in data['data']) and (data['data']['[pytest]<br>pytest:aaa'][0] == 'unknown1')) assert (('[pytest]<br>pytest:bbb' in data['data']) and (data['data']['[pytest]<br>pytest:bbb'][0] == 'unknown2')) assert (('[pytest]<br>pytest:ccc' in data['data']) and (data['data']['[pytest]<br>pytest:ccc'][0] == 'unknown3')) assert (('その他' in data['data']) and (data['data']['その他'][0] == 'unknown6'))<|docstring|>正常系<|endoftext|>
87427643b61996ec054b3e63d454104b2b37f138c9b3a902c2d2c5545c972014
@pytest.mark.usefixtures('widget_data3') def test_ok(self): '\n 正常系\n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_matching_data(3, **{'language': 'JA', 'req_rule_ids': [999]}) assert (len(data['data']) == 2) assert (data['data']['Match'][1] == 2) assert (data['data']['Unmatch'][1] == 1)
正常系
oase-root/tests/web_app/views/top/test_dashboard.py
test_ok
Masa-Yasuno/oase
9
python
@pytest.mark.usefixtures('widget_data3') def test_ok(self): '\n \n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_matching_data(3, **{'language': 'JA', 'req_rule_ids': [999]}) assert (len(data['data']) == 2) assert (data['data']['Match'][1] == 2) assert (data['data']['Unmatch'][1] == 1)
@pytest.mark.usefixtures('widget_data3') def test_ok(self): '\n \n ' cls_widget = dashboard.WidgetData() data = cls_widget.pie_graph_date_matching_data(3, **{'language': 'JA', 'req_rule_ids': [999]}) assert (len(data['data']) == 2) assert (data['data']['Match'][1] == 2) assert (data['data']['Unmatch'][1] == 1)<|docstring|>正常系<|endoftext|>
556ff47e769a451ba64d35f03674ba6aa1297f5988cbe4f9957dc1d9655b1fc1
@pytest.mark.usefixtures('widget_data21') def test_ok(self): '\n 正常系\n ' param_info = {'language': 'JA', 'date_range': 30, 'req_rule_ids': [999]} cls_widget = dashboard.WidgetData() data = cls_widget.stacked_graph_hourly_matching_data(21, **param_info) assert (len(data['data']) == 24) for d in data['data']: if (d[1] == '12'): assert (d[2] == 2) assert (d[3] == 1) else: assert (d[2] == 0) assert (d[3] == 0)
正常系
oase-root/tests/web_app/views/top/test_dashboard.py
test_ok
Masa-Yasuno/oase
9
python
@pytest.mark.usefixtures('widget_data21') def test_ok(self): '\n \n ' param_info = {'language': 'JA', 'date_range': 30, 'req_rule_ids': [999]} cls_widget = dashboard.WidgetData() data = cls_widget.stacked_graph_hourly_matching_data(21, **param_info) assert (len(data['data']) == 24) for d in data['data']: if (d[1] == '12'): assert (d[2] == 2) assert (d[3] == 1) else: assert (d[2] == 0) assert (d[3] == 0)
@pytest.mark.usefixtures('widget_data21') def test_ok(self): '\n \n ' param_info = {'language': 'JA', 'date_range': 30, 'req_rule_ids': [999]} cls_widget = dashboard.WidgetData() data = cls_widget.stacked_graph_hourly_matching_data(21, **param_info) assert (len(data['data']) == 24) for d in data['data']: if (d[1] == '12'): assert (d[2] == 2) assert (d[3] == 1) else: assert (d[2] == 0) assert (d[3] == 0)<|docstring|>正常系<|endoftext|>
e533e386a4a31d6e3273489bb0e26a6cf2eb00d9244b24400473361f97f5cfa7
@pytest.mark.usefixtures('widget_data22') def test_ok(self): '\n 正常系\n ' param_info = {'language': 'JA', 'date_range': 30, 'req_rule_ids': [999]} cls_widget = dashboard.WidgetData() data = cls_widget.stacked_graph_monthly_matching_data(22, **param_info) cnt_known = 0 cnt_unknown = 0 assert (len(data['data']) == 12) for d in data['data']: cnt_known += d[2] cnt_unknown += d[3] assert (cnt_known == 2) assert (cnt_unknown == 1)
正常系
oase-root/tests/web_app/views/top/test_dashboard.py
test_ok
Masa-Yasuno/oase
9
python
@pytest.mark.usefixtures('widget_data22') def test_ok(self): '\n \n ' param_info = {'language': 'JA', 'date_range': 30, 'req_rule_ids': [999]} cls_widget = dashboard.WidgetData() data = cls_widget.stacked_graph_monthly_matching_data(22, **param_info) cnt_known = 0 cnt_unknown = 0 assert (len(data['data']) == 12) for d in data['data']: cnt_known += d[2] cnt_unknown += d[3] assert (cnt_known == 2) assert (cnt_unknown == 1)
@pytest.mark.usefixtures('widget_data22') def test_ok(self): '\n \n ' param_info = {'language': 'JA', 'date_range': 30, 'req_rule_ids': [999]} cls_widget = dashboard.WidgetData() data = cls_widget.stacked_graph_monthly_matching_data(22, **param_info) cnt_known = 0 cnt_unknown = 0 assert (len(data['data']) == 12) for d in data['data']: cnt_known += d[2] cnt_unknown += d[3] assert (cnt_known == 2) assert (cnt_unknown == 1)<|docstring|>正常系<|endoftext|>
4fc796a44334bfe8bb5a56f8a160300feffd566e0417b2aa37be97c71d393200
@classmethod def construct(cls, project, *, run=None, name=None, data=None, **desc): '\n Construct an animation, set the runner, and add in the two\n "reserved fields" `name` and `data`.\n ' from .failed import Failed exception = desc.pop('_exception', None) if exception: a = Failed(project.layout, desc, exception) else: try: a = cls(project.layout, **desc) a._set_runner((run or {})) except Exception as e: if cls.FAIL_ON_EXCEPTION: raise a = Failed(project.layout, desc, e) a.name = name a.data = data return a
Construct an animation, set the runner, and add in the two "reserved fields" `name` and `data`.
bibliopixel/animation/animation.py
construct
8cH9azbsFifZ/BiblioPixel
253
python
@classmethod def construct(cls, project, *, run=None, name=None, data=None, **desc): '\n Construct an animation, set the runner, and add in the two\n "reserved fields" `name` and `data`.\n ' from .failed import Failed exception = desc.pop('_exception', None) if exception: a = Failed(project.layout, desc, exception) else: try: a = cls(project.layout, **desc) a._set_runner((run or {})) except Exception as e: if cls.FAIL_ON_EXCEPTION: raise a = Failed(project.layout, desc, e) a.name = name a.data = data return a
@classmethod def construct(cls, project, *, run=None, name=None, data=None, **desc): '\n Construct an animation, set the runner, and add in the two\n "reserved fields" `name` and `data`.\n ' from .failed import Failed exception = desc.pop('_exception', None) if exception: a = Failed(project.layout, desc, exception) else: try: a = cls(project.layout, **desc) a._set_runner((run or {})) except Exception as e: if cls.FAIL_ON_EXCEPTION: raise a = Failed(project.layout, desc, e) a.name = name a.data = data return a<|docstring|>Construct an animation, set the runner, and add in the two "reserved fields" `name` and `data`.<|endoftext|>
33d7a375b3aaf222ef1b6a1daba4d07dd750d4c054843648b4358a09d7368140
def __init__(self, layout, *, preclear=True, fail_on_exception=None, **kwds): '\n Arguments:\n preclear: If True, clear the layout before rendering the frame;\n otherwise, the results of the previous frame are preserved\n\n fail_on_exception: If False, exceptions thrown in the animation frame are\n caught and reported;\n if True, exceptions are are raised, potentially ending the\n animation cycle and the program;\n if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used\n ' self.palette = legacy_palette.pop_legacy_palette(kwds, *self.COLOR_DEFAULTS) self.palette.length = layout.numLEDs attributes.set_reserved(self, 'animation', **kwds) self.layout = layout assert layout self.internal_delay = None self.on_completion = None self.state = runner.STATE.ready self.preclear = preclear self.runner = None self.time = time.time self.sleep_time = 0 self.preframe_callbacks = [] self.fail_on_exception = (self.FAIL_ON_EXCEPTION if (fail_on_exception is None) else fail_on_exception)
Arguments: preclear: If True, clear the layout before rendering the frame; otherwise, the results of the previous frame are preserved fail_on_exception: If False, exceptions thrown in the animation frame are caught and reported; if True, exceptions are are raised, potentially ending the animation cycle and the program; if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used
bibliopixel/animation/animation.py
__init__
8cH9azbsFifZ/BiblioPixel
253
python
def __init__(self, layout, *, preclear=True, fail_on_exception=None, **kwds): '\n Arguments:\n preclear: If True, clear the layout before rendering the frame;\n otherwise, the results of the previous frame are preserved\n\n fail_on_exception: If False, exceptions thrown in the animation frame are\n caught and reported;\n if True, exceptions are are raised, potentially ending the\n animation cycle and the program;\n if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used\n ' self.palette = legacy_palette.pop_legacy_palette(kwds, *self.COLOR_DEFAULTS) self.palette.length = layout.numLEDs attributes.set_reserved(self, 'animation', **kwds) self.layout = layout assert layout self.internal_delay = None self.on_completion = None self.state = runner.STATE.ready self.preclear = preclear self.runner = None self.time = time.time self.sleep_time = 0 self.preframe_callbacks = [] self.fail_on_exception = (self.FAIL_ON_EXCEPTION if (fail_on_exception is None) else fail_on_exception)
def __init__(self, layout, *, preclear=True, fail_on_exception=None, **kwds): '\n Arguments:\n preclear: If True, clear the layout before rendering the frame;\n otherwise, the results of the previous frame are preserved\n\n fail_on_exception: If False, exceptions thrown in the animation frame are\n caught and reported;\n if True, exceptions are are raised, potentially ending the\n animation cycle and the program;\n if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used\n ' self.palette = legacy_palette.pop_legacy_palette(kwds, *self.COLOR_DEFAULTS) self.palette.length = layout.numLEDs attributes.set_reserved(self, 'animation', **kwds) self.layout = layout assert layout self.internal_delay = None self.on_completion = None self.state = runner.STATE.ready self.preclear = preclear self.runner = None self.time = time.time self.sleep_time = 0 self.preframe_callbacks = [] self.fail_on_exception = (self.FAIL_ON_EXCEPTION if (fail_on_exception is None) else fail_on_exception)<|docstring|>Arguments: preclear: If True, clear the layout before rendering the frame; otherwise, the results of the previous frame are preserved fail_on_exception: If False, exceptions thrown in the animation frame are caught and reported; if True, exceptions are are raised, potentially ending the animation cycle and the program; if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used<|endoftext|>
4ce41322c5b7f717a53f17cc7003fbaf893422d0c0d76a0d79b8252d8e327e2e
@property def _led(self): 'Many BiblioPixelAnimations use the "protected" variable _led.' return self.layout
Many BiblioPixelAnimations use the "protected" variable _led.
bibliopixel/animation/animation.py
_led
8cH9azbsFifZ/BiblioPixel
253
python
@property def _led(self): return self.layout
@property def _led(self): return self.layout<|docstring|>Many BiblioPixelAnimations use the "protected" variable _led.<|endoftext|>
c874c198463fe476c02dbf588e20ca926de34a41ad5fb605ecd1ceb06b69d493
@property def completed(self): 'Many BiblioPixelAnimations use the old `completed` variable.' return (self.state == runner.STATE.complete)
Many BiblioPixelAnimations use the old `completed` variable.
bibliopixel/animation/animation.py
completed
8cH9azbsFifZ/BiblioPixel
253
python
@property def completed(self): return (self.state == runner.STATE.complete)
@property def completed(self): return (self.state == runner.STATE.complete)<|docstring|>Many BiblioPixelAnimations use the old `completed` variable.<|endoftext|>
c838d1f7133c8ae2ac0ba4955a2609ea3ead6042ed08510976223515c7271d95
def add_preframe_callback(self, callback): '\n The preframe_callbacks are called right before the start of a\n frame rendering pass.\n\n To avoid race conditions when editing values, the ``Project``\n adds a callback here for the top-level animation, to drain the\n edit_queue at a moment where no rendering is\n happening.\n ' self.preframe_callbacks.append(callback)
The preframe_callbacks are called right before the start of a frame rendering pass. To avoid race conditions when editing values, the ``Project`` adds a callback here for the top-level animation, to drain the edit_queue at a moment where no rendering is happening.
bibliopixel/animation/animation.py
add_preframe_callback
8cH9azbsFifZ/BiblioPixel
253
python
def add_preframe_callback(self, callback): '\n The preframe_callbacks are called right before the start of a\n frame rendering pass.\n\n To avoid race conditions when editing values, the ``Project``\n adds a callback here for the top-level animation, to drain the\n edit_queue at a moment where no rendering is\n happening.\n ' self.preframe_callbacks.append(callback)
def add_preframe_callback(self, callback): '\n The preframe_callbacks are called right before the start of a\n frame rendering pass.\n\n To avoid race conditions when editing values, the ``Project``\n adds a callback here for the top-level animation, to drain the\n edit_queue at a moment where no rendering is\n happening.\n ' self.preframe_callbacks.append(callback)<|docstring|>The preframe_callbacks are called right before the start of a frame rendering pass. To avoid race conditions when editing values, the ``Project`` adds a callback here for the top-level animation, to drain the edit_queue at a moment where no rendering is happening.<|endoftext|>
eac65bdd2f130880d82bfb89f2f6039137fa04b40ca339a2a55c0df05c3e566a
def numTrees(self, n): '\n :type n: int\n :rtype: int\n ' dp = [0 for _ in range((n + 1))] dp[0] = 1 dp[1] = 1 for i in range(2, (n + 1)): for j in range(i): dp[i] += (dp[((i - j) - 1)] * dp[j]) return dp[n]
:type n: int :rtype: int
tree/bst/unique-bst.py
numTrees
windowssocket/py_leetcode
3
python
def numTrees(self, n): '\n :type n: int\n :rtype: int\n ' dp = [0 for _ in range((n + 1))] dp[0] = 1 dp[1] = 1 for i in range(2, (n + 1)): for j in range(i): dp[i] += (dp[((i - j) - 1)] * dp[j]) return dp[n]
def numTrees(self, n): '\n :type n: int\n :rtype: int\n ' dp = [0 for _ in range((n + 1))] dp[0] = 1 dp[1] = 1 for i in range(2, (n + 1)): for j in range(i): dp[i] += (dp[((i - j) - 1)] * dp[j]) return dp[n]<|docstring|>:type n: int :rtype: int<|endoftext|>
af1924003d54c21354dd70a9aaf7c5e77b77b2778d522a0430a0bb4856deb381
def h_normal_gpytorch(s): ' Entropy of a normal distribution ' return torch.log((s * (((2 * np.e) * np.pi) ** 0.5)))
Entropy of a normal distribution
excursion/active_learning/approximations.py
h_normal_gpytorch
leonoravesterbacka/excursion
2
python
def h_normal_gpytorch(s): ' ' return torch.log((s * (((2 * np.e) * np.pi) ** 0.5)))
def h_normal_gpytorch(s): ' ' return torch.log((s * (((2 * np.e) * np.pi) ** 0.5)))<|docstring|>Entropy of a normal distribution<|endoftext|>
6a8c9079bd149365a5afb3cb3a8b6f00449c1697b3b62e8fbc0016c4d9ce3f97
@staticmethod def __classcall_private__(cls, linear_extension, poset): "\n Implements the shortcut ``LinearExtensionOfPoset(linear_extension, poset)`` to ``LinearExtensionsOfPoset(poset)(linear_extension)``\n\n INPUT:\n\n - ``linear_extension`` -- a list of elements of ``poset``\n - ``poset`` -- a finite poset\n\n .. todo:: check whether this method is still useful\n\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionOfPoset\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: p = LinearExtensionOfPoset([1,4,2,3], P)\n sage: p.parent()\n The set of all linear extensions of Finite poset containing 4 elements\n sage: type(p)\n <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'>\n sage: p.poset()\n Finite poset containing 4 elements\n sage: TestSuite(p).run()\n\n sage: LinearExtensionOfPoset([4,3,2,1], P)\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n " if isinstance(linear_extension, cls): return linear_extension return LinearExtensionsOfPoset(poset)(linear_extension)
Implements the shortcut ``LinearExtensionOfPoset(linear_extension, poset)`` to ``LinearExtensionsOfPoset(poset)(linear_extension)`` INPUT: - ``linear_extension`` -- a list of elements of ``poset`` - ``poset`` -- a finite poset .. todo:: check whether this method is still useful TESTS:: sage: from sage.combinat.posets.linear_extensions import LinearExtensionOfPoset sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]])) sage: p = LinearExtensionOfPoset([1,4,2,3], P) sage: p.parent() The set of all linear extensions of Finite poset containing 4 elements sage: type(p) <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'> sage: p.poset() Finite poset containing 4 elements sage: TestSuite(p).run() sage: LinearExtensionOfPoset([4,3,2,1], P) Traceback (most recent call last): ... ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements
src/sage/combinat/posets/linear_extensions.py
__classcall_private__
robertwb/sage
2
python
@staticmethod def __classcall_private__(cls, linear_extension, poset): "\n Implements the shortcut ``LinearExtensionOfPoset(linear_extension, poset)`` to ``LinearExtensionsOfPoset(poset)(linear_extension)``\n\n INPUT:\n\n - ``linear_extension`` -- a list of elements of ``poset``\n - ``poset`` -- a finite poset\n\n .. todo:: check whether this method is still useful\n\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionOfPoset\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: p = LinearExtensionOfPoset([1,4,2,3], P)\n sage: p.parent()\n The set of all linear extensions of Finite poset containing 4 elements\n sage: type(p)\n <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'>\n sage: p.poset()\n Finite poset containing 4 elements\n sage: TestSuite(p).run()\n\n sage: LinearExtensionOfPoset([4,3,2,1], P)\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n " if isinstance(linear_extension, cls): return linear_extension return LinearExtensionsOfPoset(poset)(linear_extension)
@staticmethod def __classcall_private__(cls, linear_extension, poset): "\n Implements the shortcut ``LinearExtensionOfPoset(linear_extension, poset)`` to ``LinearExtensionsOfPoset(poset)(linear_extension)``\n\n INPUT:\n\n - ``linear_extension`` -- a list of elements of ``poset``\n - ``poset`` -- a finite poset\n\n .. todo:: check whether this method is still useful\n\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionOfPoset\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: p = LinearExtensionOfPoset([1,4,2,3], P)\n sage: p.parent()\n The set of all linear extensions of Finite poset containing 4 elements\n sage: type(p)\n <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'>\n sage: p.poset()\n Finite poset containing 4 elements\n sage: TestSuite(p).run()\n\n sage: LinearExtensionOfPoset([4,3,2,1], P)\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n " if isinstance(linear_extension, cls): return linear_extension return LinearExtensionsOfPoset(poset)(linear_extension)<|docstring|>Implements the shortcut ``LinearExtensionOfPoset(linear_extension, poset)`` to ``LinearExtensionsOfPoset(poset)(linear_extension)`` INPUT: - ``linear_extension`` -- a list of elements of ``poset`` - ``poset`` -- a finite poset .. todo:: check whether this method is still useful TESTS:: sage: from sage.combinat.posets.linear_extensions import LinearExtensionOfPoset sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]])) sage: p = LinearExtensionOfPoset([1,4,2,3], P) sage: p.parent() The set of all linear extensions of Finite poset containing 4 elements sage: type(p) <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'> sage: p.poset() Finite poset containing 4 elements sage: TestSuite(p).run() sage: LinearExtensionOfPoset([4,3,2,1], P) Traceback (most recent call last): ... ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements<|endoftext|>
dd514b83ca8664d7ccc9e6fd79a6a35f68a63954f4f70ac8d263a761cba2b363
def check(self): '\n Checks whether ``self`` is indeed a linear extension of the underlying poset.\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: P.linear_extension([1,4,2,3])\n [1, 4, 2, 3]\n sage: P.linear_extension([4,3,2,1])\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n ' P = self.parent().poset() if (not P.is_linear_extension(self)): raise ValueError(('%s is not a linear extension of %s' % (self, P)))
Checks whether ``self`` is indeed a linear extension of the underlying poset. TESTS:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]])) sage: P.linear_extension([1,4,2,3]) [1, 4, 2, 3] sage: P.linear_extension([4,3,2,1]) Traceback (most recent call last): ... ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements
src/sage/combinat/posets/linear_extensions.py
check
robertwb/sage
2
python
def check(self): '\n Checks whether ``self`` is indeed a linear extension of the underlying poset.\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: P.linear_extension([1,4,2,3])\n [1, 4, 2, 3]\n sage: P.linear_extension([4,3,2,1])\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n ' P = self.parent().poset() if (not P.is_linear_extension(self)): raise ValueError(('%s is not a linear extension of %s' % (self, P)))
def check(self): '\n Checks whether ``self`` is indeed a linear extension of the underlying poset.\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: P.linear_extension([1,4,2,3])\n [1, 4, 2, 3]\n sage: P.linear_extension([4,3,2,1])\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n ' P = self.parent().poset() if (not P.is_linear_extension(self)): raise ValueError(('%s is not a linear extension of %s' % (self, P)))<|docstring|>Checks whether ``self`` is indeed a linear extension of the underlying poset. TESTS:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]])) sage: P.linear_extension([1,4,2,3]) [1, 4, 2, 3] sage: P.linear_extension([4,3,2,1]) Traceback (most recent call last): ... ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements<|endoftext|>
80b7bf0261c1928be3a4ef63fd0372842712ae99dbbfd8cce09fb18a06c71daa
def poset(self): '\n Returns the underlying original poset.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]]))\n sage: p = P.linear_extension([1,2,4,3])\n sage: p.poset()\n Finite poset containing 4 elements\n ' return self.parent().poset()
Returns the underlying original poset. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]])) sage: p = P.linear_extension([1,2,4,3]) sage: p.poset() Finite poset containing 4 elements
src/sage/combinat/posets/linear_extensions.py
poset
robertwb/sage
2
python
def poset(self): '\n Returns the underlying original poset.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]]))\n sage: p = P.linear_extension([1,2,4,3])\n sage: p.poset()\n Finite poset containing 4 elements\n ' return self.parent().poset()
def poset(self): '\n Returns the underlying original poset.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]]))\n sage: p = P.linear_extension([1,2,4,3])\n sage: p.poset()\n Finite poset containing 4 elements\n ' return self.parent().poset()<|docstring|>Returns the underlying original poset. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]])) sage: p = P.linear_extension([1,2,4,3]) sage: p.poset() Finite poset containing 4 elements<|endoftext|>
53654ddedfcb8482035676da84790f2b7e5b39490af820c9a901187b11632816
def _latex_(self): "\n Returns the latex string for ``self``.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: p = P.linear_extension([1,2,3,4])\n sage: p._latex_()\n '\\\\mathtt{(1, 2, 3, 4)}'\n " return (('\\mathtt{' + str(tuple(self))) + '}')
Returns the latex string for ``self``. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]])) sage: p = P.linear_extension([1,2,3,4]) sage: p._latex_() '\\mathtt{(1, 2, 3, 4)}'
src/sage/combinat/posets/linear_extensions.py
_latex_
robertwb/sage
2
python
def _latex_(self): "\n Returns the latex string for ``self``.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: p = P.linear_extension([1,2,3,4])\n sage: p._latex_()\n '\\\\mathtt{(1, 2, 3, 4)}'\n " return (('\\mathtt{' + str(tuple(self))) + '}')
def _latex_(self): "\n Returns the latex string for ``self``.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]))\n sage: p = P.linear_extension([1,2,3,4])\n sage: p._latex_()\n '\\\\mathtt{(1, 2, 3, 4)}'\n " return (('\\mathtt{' + str(tuple(self))) + '}')<|docstring|>Returns the latex string for ``self``. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]])) sage: p = P.linear_extension([1,2,3,4]) sage: p._latex_() '\\mathtt{(1, 2, 3, 4)}'<|endoftext|>
39b9198c0abef0f0c82070ca31cfb658675287eef97a32faae9b880ad0f8e35d
def to_poset(self): '\n Return the poset associated to the linear extension ``self``.\n\n This method returns the poset obtained from the original poset\n `P` by relabelling the `i`-th element of ``self`` to the\n `i`-th element of the original poset, while keeping the linear\n extension of the original poset.\n\n For a poset with default linear extension `1,\\dots,n`,\n ``self`` can be interpreted as a permutation, and the\n relabelling is done according to the inverse of this\n permutation.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[1,3],[3,4]]), linear_extension=True, facade=False)\n sage: p = P.linear_extension([1,3,4,2])\n sage: Q = p.to_poset(); Q\n Finite poset containing 4 elements with distinguished linear extension\n sage: P == Q\n False\n\n The default linear extension remains the same::\n\n sage: list(P)\n [1, 2, 3, 4]\n sage: list(Q)\n [1, 2, 3, 4]\n\n But the relabelling can be seen on cover relations::\n\n sage: P.cover_relations()\n [[1, 2], [1, 3], [3, 4]]\n sage: Q.cover_relations()\n [[1, 2], [1, 4], [2, 3]]\n\n sage: p = P.linear_extension([1,2,3,4])\n sage: Q = p.to_poset()\n sage: P == Q\n True\n ' P = self.parent().poset() old = [P.unwrap(x) for x in self] new = [P.unwrap(x) for x in P] relabelling = dict(zip(old, new)) return P.relabel(relabelling).with_linear_extension(new)
Return the poset associated to the linear extension ``self``. This method returns the poset obtained from the original poset `P` by relabelling the `i`-th element of ``self`` to the `i`-th element of the original poset, while keeping the linear extension of the original poset. For a poset with default linear extension `1,\dots,n`, ``self`` can be interpreted as a permutation, and the relabelling is done according to the inverse of this permutation. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,2],[1,3],[3,4]]), linear_extension=True, facade=False) sage: p = P.linear_extension([1,3,4,2]) sage: Q = p.to_poset(); Q Finite poset containing 4 elements with distinguished linear extension sage: P == Q False The default linear extension remains the same:: sage: list(P) [1, 2, 3, 4] sage: list(Q) [1, 2, 3, 4] But the relabelling can be seen on cover relations:: sage: P.cover_relations() [[1, 2], [1, 3], [3, 4]] sage: Q.cover_relations() [[1, 2], [1, 4], [2, 3]] sage: p = P.linear_extension([1,2,3,4]) sage: Q = p.to_poset() sage: P == Q True
src/sage/combinat/posets/linear_extensions.py
to_poset
robertwb/sage
2
python
def to_poset(self): '\n Return the poset associated to the linear extension ``self``.\n\n This method returns the poset obtained from the original poset\n `P` by relabelling the `i`-th element of ``self`` to the\n `i`-th element of the original poset, while keeping the linear\n extension of the original poset.\n\n For a poset with default linear extension `1,\\dots,n`,\n ``self`` can be interpreted as a permutation, and the\n relabelling is done according to the inverse of this\n permutation.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[1,3],[3,4]]), linear_extension=True, facade=False)\n sage: p = P.linear_extension([1,3,4,2])\n sage: Q = p.to_poset(); Q\n Finite poset containing 4 elements with distinguished linear extension\n sage: P == Q\n False\n\n The default linear extension remains the same::\n\n sage: list(P)\n [1, 2, 3, 4]\n sage: list(Q)\n [1, 2, 3, 4]\n\n But the relabelling can be seen on cover relations::\n\n sage: P.cover_relations()\n [[1, 2], [1, 3], [3, 4]]\n sage: Q.cover_relations()\n [[1, 2], [1, 4], [2, 3]]\n\n sage: p = P.linear_extension([1,2,3,4])\n sage: Q = p.to_poset()\n sage: P == Q\n True\n ' P = self.parent().poset() old = [P.unwrap(x) for x in self] new = [P.unwrap(x) for x in P] relabelling = dict(zip(old, new)) return P.relabel(relabelling).with_linear_extension(new)
def to_poset(self): '\n Return the poset associated to the linear extension ``self``.\n\n This method returns the poset obtained from the original poset\n `P` by relabelling the `i`-th element of ``self`` to the\n `i`-th element of the original poset, while keeping the linear\n extension of the original poset.\n\n For a poset with default linear extension `1,\\dots,n`,\n ``self`` can be interpreted as a permutation, and the\n relabelling is done according to the inverse of this\n permutation.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[1,3],[3,4]]), linear_extension=True, facade=False)\n sage: p = P.linear_extension([1,3,4,2])\n sage: Q = p.to_poset(); Q\n Finite poset containing 4 elements with distinguished linear extension\n sage: P == Q\n False\n\n The default linear extension remains the same::\n\n sage: list(P)\n [1, 2, 3, 4]\n sage: list(Q)\n [1, 2, 3, 4]\n\n But the relabelling can be seen on cover relations::\n\n sage: P.cover_relations()\n [[1, 2], [1, 3], [3, 4]]\n sage: Q.cover_relations()\n [[1, 2], [1, 4], [2, 3]]\n\n sage: p = P.linear_extension([1,2,3,4])\n sage: Q = p.to_poset()\n sage: P == Q\n True\n ' P = self.parent().poset() old = [P.unwrap(x) for x in self] new = [P.unwrap(x) for x in P] relabelling = dict(zip(old, new)) return P.relabel(relabelling).with_linear_extension(new)<|docstring|>Return the poset associated to the linear extension ``self``. This method returns the poset obtained from the original poset `P` by relabelling the `i`-th element of ``self`` to the `i`-th element of the original poset, while keeping the linear extension of the original poset. For a poset with default linear extension `1,\dots,n`, ``self`` can be interpreted as a permutation, and the relabelling is done according to the inverse of this permutation. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,2],[1,3],[3,4]]), linear_extension=True, facade=False) sage: p = P.linear_extension([1,3,4,2]) sage: Q = p.to_poset(); Q Finite poset containing 4 elements with distinguished linear extension sage: P == Q False The default linear extension remains the same:: sage: list(P) [1, 2, 3, 4] sage: list(Q) [1, 2, 3, 4] But the relabelling can be seen on cover relations:: sage: P.cover_relations() [[1, 2], [1, 3], [3, 4]] sage: Q.cover_relations() [[1, 2], [1, 4], [2, 3]] sage: p = P.linear_extension([1,2,3,4]) sage: Q = p.to_poset() sage: P == Q True<|endoftext|>
c6c1a83b52222308b34863a1ff14761d9a773aac3c572ceaff7bb4dd5191095a
def tau(self, i): '\n Returns the operator `\\tau_i` on linear extensions ``self`` of a poset.\n\n INPUT:\n\n - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset.\n\n The operator `\\tau_i` on a linear extension `\\pi` of a poset\n `P` interchanges positions `i` and `i+1` if the result is\n again a linear extension of `P`, and otherwise acts\n trivially. For more details, see [Stan2009]_.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension=True)\n sage: L = P.linear_extensions()\n sage: l = L.an_element(); l\n [1, 2, 3, 4]\n sage: l.tau(1)\n [2, 1, 3, 4]\n sage: for p in L:\n ....: for i in range(1,4):\n ....: print("{} {} {}".format(i, p, p.tau(i)))\n 1 [1, 2, 3, 4] [2, 1, 3, 4]\n 2 [1, 2, 3, 4] [1, 2, 3, 4]\n 3 [1, 2, 3, 4] [1, 2, 4, 3]\n 1 [1, 2, 4, 3] [2, 1, 4, 3]\n 2 [1, 2, 4, 3] [1, 4, 2, 3]\n 3 [1, 2, 4, 3] [1, 2, 3, 4]\n 1 [1, 4, 2, 3] [1, 4, 2, 3]\n 2 [1, 4, 2, 3] [1, 2, 4, 3]\n 3 [1, 4, 2, 3] [1, 4, 2, 3]\n 1 [2, 1, 3, 4] [1, 2, 3, 4]\n 2 [2, 1, 3, 4] [2, 1, 3, 4]\n 3 [2, 1, 3, 4] [2, 1, 4, 3]\n 1 [2, 1, 4, 3] [1, 2, 4, 3]\n 2 [2, 1, 4, 3] [2, 1, 4, 3]\n 3 [2, 1, 4, 3] [2, 1, 3, 4]\n\n TESTS::\n\n sage: type(l.tau(1))\n <class \'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class\'>\n sage: l.tau(2) == l\n True\n ' P = self.poset() a = self[(i - 1)] b = self[i] if (P.lt(a, b) or P.lt(b, a)): return self with self.clone() as q: q[(i - 1)] = b q[i] = a return q
Returns the operator `\tau_i` on linear extensions ``self`` of a poset. INPUT: - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset. The operator `\tau_i` on a linear extension `\pi` of a poset `P` interchanges positions `i` and `i+1` if the result is again a linear extension of `P`, and otherwise acts trivially. For more details, see [Stan2009]_. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension=True) sage: L = P.linear_extensions() sage: l = L.an_element(); l [1, 2, 3, 4] sage: l.tau(1) [2, 1, 3, 4] sage: for p in L: ....: for i in range(1,4): ....: print("{} {} {}".format(i, p, p.tau(i))) 1 [1, 2, 3, 4] [2, 1, 3, 4] 2 [1, 2, 3, 4] [1, 2, 3, 4] 3 [1, 2, 3, 4] [1, 2, 4, 3] 1 [1, 2, 4, 3] [2, 1, 4, 3] 2 [1, 2, 4, 3] [1, 4, 2, 3] 3 [1, 2, 4, 3] [1, 2, 3, 4] 1 [1, 4, 2, 3] [1, 4, 2, 3] 2 [1, 4, 2, 3] [1, 2, 4, 3] 3 [1, 4, 2, 3] [1, 4, 2, 3] 1 [2, 1, 3, 4] [1, 2, 3, 4] 2 [2, 1, 3, 4] [2, 1, 3, 4] 3 [2, 1, 3, 4] [2, 1, 4, 3] 1 [2, 1, 4, 3] [1, 2, 4, 3] 2 [2, 1, 4, 3] [2, 1, 4, 3] 3 [2, 1, 4, 3] [2, 1, 3, 4] TESTS:: sage: type(l.tau(1)) <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'> sage: l.tau(2) == l True
src/sage/combinat/posets/linear_extensions.py
tau
robertwb/sage
2
python
def tau(self, i): '\n Returns the operator `\\tau_i` on linear extensions ``self`` of a poset.\n\n INPUT:\n\n - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset.\n\n The operator `\\tau_i` on a linear extension `\\pi` of a poset\n `P` interchanges positions `i` and `i+1` if the result is\n again a linear extension of `P`, and otherwise acts\n trivially. For more details, see [Stan2009]_.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension=True)\n sage: L = P.linear_extensions()\n sage: l = L.an_element(); l\n [1, 2, 3, 4]\n sage: l.tau(1)\n [2, 1, 3, 4]\n sage: for p in L:\n ....: for i in range(1,4):\n ....: print("{} {} {}".format(i, p, p.tau(i)))\n 1 [1, 2, 3, 4] [2, 1, 3, 4]\n 2 [1, 2, 3, 4] [1, 2, 3, 4]\n 3 [1, 2, 3, 4] [1, 2, 4, 3]\n 1 [1, 2, 4, 3] [2, 1, 4, 3]\n 2 [1, 2, 4, 3] [1, 4, 2, 3]\n 3 [1, 2, 4, 3] [1, 2, 3, 4]\n 1 [1, 4, 2, 3] [1, 4, 2, 3]\n 2 [1, 4, 2, 3] [1, 2, 4, 3]\n 3 [1, 4, 2, 3] [1, 4, 2, 3]\n 1 [2, 1, 3, 4] [1, 2, 3, 4]\n 2 [2, 1, 3, 4] [2, 1, 3, 4]\n 3 [2, 1, 3, 4] [2, 1, 4, 3]\n 1 [2, 1, 4, 3] [1, 2, 4, 3]\n 2 [2, 1, 4, 3] [2, 1, 4, 3]\n 3 [2, 1, 4, 3] [2, 1, 3, 4]\n\n TESTS::\n\n sage: type(l.tau(1))\n <class \'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class\'>\n sage: l.tau(2) == l\n True\n ' P = self.poset() a = self[(i - 1)] b = self[i] if (P.lt(a, b) or P.lt(b, a)): return self with self.clone() as q: q[(i - 1)] = b q[i] = a return q
def tau(self, i): '\n Returns the operator `\\tau_i` on linear extensions ``self`` of a poset.\n\n INPUT:\n\n - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset.\n\n The operator `\\tau_i` on a linear extension `\\pi` of a poset\n `P` interchanges positions `i` and `i+1` if the result is\n again a linear extension of `P`, and otherwise acts\n trivially. For more details, see [Stan2009]_.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension=True)\n sage: L = P.linear_extensions()\n sage: l = L.an_element(); l\n [1, 2, 3, 4]\n sage: l.tau(1)\n [2, 1, 3, 4]\n sage: for p in L:\n ....: for i in range(1,4):\n ....: print("{} {} {}".format(i, p, p.tau(i)))\n 1 [1, 2, 3, 4] [2, 1, 3, 4]\n 2 [1, 2, 3, 4] [1, 2, 3, 4]\n 3 [1, 2, 3, 4] [1, 2, 4, 3]\n 1 [1, 2, 4, 3] [2, 1, 4, 3]\n 2 [1, 2, 4, 3] [1, 4, 2, 3]\n 3 [1, 2, 4, 3] [1, 2, 3, 4]\n 1 [1, 4, 2, 3] [1, 4, 2, 3]\n 2 [1, 4, 2, 3] [1, 2, 4, 3]\n 3 [1, 4, 2, 3] [1, 4, 2, 3]\n 1 [2, 1, 3, 4] [1, 2, 3, 4]\n 2 [2, 1, 3, 4] [2, 1, 3, 4]\n 3 [2, 1, 3, 4] [2, 1, 4, 3]\n 1 [2, 1, 4, 3] [1, 2, 4, 3]\n 2 [2, 1, 4, 3] [2, 1, 4, 3]\n 3 [2, 1, 4, 3] [2, 1, 3, 4]\n\n TESTS::\n\n sage: type(l.tau(1))\n <class \'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class\'>\n sage: l.tau(2) == l\n True\n ' P = self.poset() a = self[(i - 1)] b = self[i] if (P.lt(a, b) or P.lt(b, a)): return self with self.clone() as q: q[(i - 1)] = b q[i] = a return q<|docstring|>Returns the operator `\tau_i` on linear extensions ``self`` of a poset. INPUT: - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset. The operator `\tau_i` on a linear extension `\pi` of a poset `P` interchanges positions `i` and `i+1` if the result is again a linear extension of `P`, and otherwise acts trivially. For more details, see [Stan2009]_. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension=True) sage: L = P.linear_extensions() sage: l = L.an_element(); l [1, 2, 3, 4] sage: l.tau(1) [2, 1, 3, 4] sage: for p in L: ....: for i in range(1,4): ....: print("{} {} {}".format(i, p, p.tau(i))) 1 [1, 2, 3, 4] [2, 1, 3, 4] 2 [1, 2, 3, 4] [1, 2, 3, 4] 3 [1, 2, 3, 4] [1, 2, 4, 3] 1 [1, 2, 4, 3] [2, 1, 4, 3] 2 [1, 2, 4, 3] [1, 4, 2, 3] 3 [1, 2, 4, 3] [1, 2, 3, 4] 1 [1, 4, 2, 3] [1, 4, 2, 3] 2 [1, 4, 2, 3] [1, 2, 4, 3] 3 [1, 4, 2, 3] [1, 4, 2, 3] 1 [2, 1, 3, 4] [1, 2, 3, 4] 2 [2, 1, 3, 4] [2, 1, 3, 4] 3 [2, 1, 3, 4] [2, 1, 4, 3] 1 [2, 1, 4, 3] [1, 2, 4, 3] 2 [2, 1, 4, 3] [2, 1, 4, 3] 3 [2, 1, 4, 3] [2, 1, 3, 4] TESTS:: sage: type(l.tau(1)) <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category.element_class'> sage: l.tau(2) == l True<|endoftext|>
39d7dc976a2938b7d7362972a56abe716a6410f0c269258d3b9a61601bb7719e
def promotion(self, i=1): '\n Computes the (generalized) promotion on the linear extension of a poset.\n\n INPUT:\n\n - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset (default: `1`)\n\n The `i`-th generalized promotion operator `\\partial_i` on a linear extension\n `\\pi` is defined as `\\pi \\tau_i \\tau_{i+1} \\cdots \\tau_{n-1}`, where `n` is the\n size of the linear extension (or size of the underlying poset).\n\n For more details see [Stan2009]_.\n\n .. seealso:: :meth:`tau`, :meth:`evacuation`\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]]))\n sage: p = P.linear_extension([1,2,3,4,5,6,7])\n sage: q = p.promotion(4); q\n [1, 2, 3, 5, 6, 4, 7]\n sage: p.to_poset() == q.to_poset()\n False\n sage: p.to_poset().is_isomorphic(q.to_poset())\n True\n ' for j in range(i, len(self)): self = self.tau(j) return self
Computes the (generalized) promotion on the linear extension of a poset. INPUT: - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset (default: `1`) The `i`-th generalized promotion operator `\partial_i` on a linear extension `\pi` is defined as `\pi \tau_i \tau_{i+1} \cdots \tau_{n-1}`, where `n` is the size of the linear extension (or size of the underlying poset). For more details see [Stan2009]_. .. seealso:: :meth:`tau`, :meth:`evacuation` EXAMPLES:: sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]])) sage: p = P.linear_extension([1,2,3,4,5,6,7]) sage: q = p.promotion(4); q [1, 2, 3, 5, 6, 4, 7] sage: p.to_poset() == q.to_poset() False sage: p.to_poset().is_isomorphic(q.to_poset()) True
src/sage/combinat/posets/linear_extensions.py
promotion
robertwb/sage
2
python
def promotion(self, i=1): '\n Computes the (generalized) promotion on the linear extension of a poset.\n\n INPUT:\n\n - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset (default: `1`)\n\n The `i`-th generalized promotion operator `\\partial_i` on a linear extension\n `\\pi` is defined as `\\pi \\tau_i \\tau_{i+1} \\cdots \\tau_{n-1}`, where `n` is the\n size of the linear extension (or size of the underlying poset).\n\n For more details see [Stan2009]_.\n\n .. seealso:: :meth:`tau`, :meth:`evacuation`\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]]))\n sage: p = P.linear_extension([1,2,3,4,5,6,7])\n sage: q = p.promotion(4); q\n [1, 2, 3, 5, 6, 4, 7]\n sage: p.to_poset() == q.to_poset()\n False\n sage: p.to_poset().is_isomorphic(q.to_poset())\n True\n ' for j in range(i, len(self)): self = self.tau(j) return self
def promotion(self, i=1): '\n Computes the (generalized) promotion on the linear extension of a poset.\n\n INPUT:\n\n - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset (default: `1`)\n\n The `i`-th generalized promotion operator `\\partial_i` on a linear extension\n `\\pi` is defined as `\\pi \\tau_i \\tau_{i+1} \\cdots \\tau_{n-1}`, where `n` is the\n size of the linear extension (or size of the underlying poset).\n\n For more details see [Stan2009]_.\n\n .. seealso:: :meth:`tau`, :meth:`evacuation`\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]]))\n sage: p = P.linear_extension([1,2,3,4,5,6,7])\n sage: q = p.promotion(4); q\n [1, 2, 3, 5, 6, 4, 7]\n sage: p.to_poset() == q.to_poset()\n False\n sage: p.to_poset().is_isomorphic(q.to_poset())\n True\n ' for j in range(i, len(self)): self = self.tau(j) return self<|docstring|>Computes the (generalized) promotion on the linear extension of a poset. INPUT: - `i` -- an integer between `1` and `n-1`, where `n` is the cardinality of the poset (default: `1`) The `i`-th generalized promotion operator `\partial_i` on a linear extension `\pi` is defined as `\pi \tau_i \tau_{i+1} \cdots \tau_{n-1}`, where `n` is the size of the linear extension (or size of the underlying poset). For more details see [Stan2009]_. .. seealso:: :meth:`tau`, :meth:`evacuation` EXAMPLES:: sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]])) sage: p = P.linear_extension([1,2,3,4,5,6,7]) sage: q = p.promotion(4); q [1, 2, 3, 5, 6, 4, 7] sage: p.to_poset() == q.to_poset() False sage: p.to_poset().is_isomorphic(q.to_poset()) True<|endoftext|>
93c5727403e2960c30270dd3f8ff5cfcf7e04064aff51b611a88c0c006434ac4
def evacuation(self): '\n Computes evacuation on the linear extension of a poset.\n\n Evacuation on a linear extension `\\pi` of length `n` is defined as\n `\\pi (\\tau_1 \\cdots \\tau_{n-1}) (\\tau_1 \\cdots \\tau_{n-2}) \\cdots (\\tau_1)`.\n For more details see [Stan2009]_.\n\n .. seealso:: :meth:`tau`, :meth:`promotion`\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]]))\n sage: p = P.linear_extension([1,2,3,4,5,6,7])\n sage: p.evacuation()\n [1, 4, 2, 3, 7, 5, 6]\n sage: p.evacuation().evacuation() == p\n True\n ' for i in reversed(range(1, (len(self) + 1))): for j in range(1, i): self = self.tau(j) return self
Computes evacuation on the linear extension of a poset. Evacuation on a linear extension `\pi` of length `n` is defined as `\pi (\tau_1 \cdots \tau_{n-1}) (\tau_1 \cdots \tau_{n-2}) \cdots (\tau_1)`. For more details see [Stan2009]_. .. seealso:: :meth:`tau`, :meth:`promotion` EXAMPLES:: sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]])) sage: p = P.linear_extension([1,2,3,4,5,6,7]) sage: p.evacuation() [1, 4, 2, 3, 7, 5, 6] sage: p.evacuation().evacuation() == p True
src/sage/combinat/posets/linear_extensions.py
evacuation
robertwb/sage
2
python
def evacuation(self): '\n Computes evacuation on the linear extension of a poset.\n\n Evacuation on a linear extension `\\pi` of length `n` is defined as\n `\\pi (\\tau_1 \\cdots \\tau_{n-1}) (\\tau_1 \\cdots \\tau_{n-2}) \\cdots (\\tau_1)`.\n For more details see [Stan2009]_.\n\n .. seealso:: :meth:`tau`, :meth:`promotion`\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]]))\n sage: p = P.linear_extension([1,2,3,4,5,6,7])\n sage: p.evacuation()\n [1, 4, 2, 3, 7, 5, 6]\n sage: p.evacuation().evacuation() == p\n True\n ' for i in reversed(range(1, (len(self) + 1))): for j in range(1, i): self = self.tau(j) return self
def evacuation(self): '\n Computes evacuation on the linear extension of a poset.\n\n Evacuation on a linear extension `\\pi` of length `n` is defined as\n `\\pi (\\tau_1 \\cdots \\tau_{n-1}) (\\tau_1 \\cdots \\tau_{n-2}) \\cdots (\\tau_1)`.\n For more details see [Stan2009]_.\n\n .. seealso:: :meth:`tau`, :meth:`promotion`\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]]))\n sage: p = P.linear_extension([1,2,3,4,5,6,7])\n sage: p.evacuation()\n [1, 4, 2, 3, 7, 5, 6]\n sage: p.evacuation().evacuation() == p\n True\n ' for i in reversed(range(1, (len(self) + 1))): for j in range(1, i): self = self.tau(j) return self<|docstring|>Computes evacuation on the linear extension of a poset. Evacuation on a linear extension `\pi` of length `n` is defined as `\pi (\tau_1 \cdots \tau_{n-1}) (\tau_1 \cdots \tau_{n-2}) \cdots (\tau_1)`. For more details see [Stan2009]_. .. seealso:: :meth:`tau`, :meth:`promotion` EXAMPLES:: sage: P = Poset(([1,2,3,4,5,6,7], [[1,2],[1,4],[2,3],[2,5],[3,6],[4,7],[5,6]])) sage: p = P.linear_extension([1,2,3,4,5,6,7]) sage: p.evacuation() [1, 4, 2, 3, 7, 5, 6] sage: p.evacuation().evacuation() == p True<|endoftext|>
c956decd8c2e1b786b66dd1e176aaeacf6a5b537ff65a4d928b47938e2a98bbb
@staticmethod def __classcall_private__(cls, poset, facade=False): "\n Straighten arguments before unique representation.\n\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset\n sage: P = Poset(([1,2],[[1,2]]))\n sage: L = LinearExtensionsOfPoset(P)\n sage: type(L)\n <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category'>\n sage: L is LinearExtensionsOfPoset(P,facade=False)\n True\n " return super(LinearExtensionsOfPoset, cls).__classcall__(cls, poset, facade=facade)
Straighten arguments before unique representation. TESTS:: sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset sage: P = Poset(([1,2],[[1,2]])) sage: L = LinearExtensionsOfPoset(P) sage: type(L) <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category'> sage: L is LinearExtensionsOfPoset(P,facade=False) True
src/sage/combinat/posets/linear_extensions.py
__classcall_private__
robertwb/sage
2
python
@staticmethod def __classcall_private__(cls, poset, facade=False): "\n Straighten arguments before unique representation.\n\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset\n sage: P = Poset(([1,2],[[1,2]]))\n sage: L = LinearExtensionsOfPoset(P)\n sage: type(L)\n <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category'>\n sage: L is LinearExtensionsOfPoset(P,facade=False)\n True\n " return super(LinearExtensionsOfPoset, cls).__classcall__(cls, poset, facade=facade)
@staticmethod def __classcall_private__(cls, poset, facade=False): "\n Straighten arguments before unique representation.\n\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset\n sage: P = Poset(([1,2],[[1,2]]))\n sage: L = LinearExtensionsOfPoset(P)\n sage: type(L)\n <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category'>\n sage: L is LinearExtensionsOfPoset(P,facade=False)\n True\n " return super(LinearExtensionsOfPoset, cls).__classcall__(cls, poset, facade=facade)<|docstring|>Straighten arguments before unique representation. TESTS:: sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset sage: P = Poset(([1,2],[[1,2]])) sage: L = LinearExtensionsOfPoset(P) sage: type(L) <class 'sage.combinat.posets.linear_extensions.LinearExtensionsOfPoset_with_category'> sage: L is LinearExtensionsOfPoset(P,facade=False) True<|endoftext|>
ee4a77103697bbf4ced5a5a9fdb153b47f2ec35c0d96227ff52dcd195fab96c1
def __init__(self, poset, facade): '\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset\n sage: P = Poset(([1,2,3],[[1,2],[1,3]]))\n sage: L = P.linear_extensions()\n sage: L is LinearExtensionsOfPoset(P)\n True\n sage: L._poset is P\n True\n sage: L._linear_extensions_of_hasse_diagram\n Linear extensions of Hasse diagram of a poset containing 3 elements\n sage: TestSuite(L).run()\n\n sage: P = Poset((divisors(15), attrcall("divides")))\n sage: L = P.linear_extensions()\n sage: TestSuite(L).run()\n\n sage: P = Poset((divisors(15), attrcall("divides")), facade=True)\n sage: L = P.linear_extensions()\n sage: TestSuite(L).run()\n\n sage: L = P.linear_extensions(facade = True)\n sage: TestSuite(L).run(skip="_test_an_element")\n ' self._poset = poset self._linear_extensions_of_hasse_diagram = sage.graphs.linearextensions.LinearExtensions(poset._hasse_diagram) self._is_facade = facade if facade: facade = (list,) Parent.__init__(self, category=FiniteEnumeratedSets(), facade=facade)
TESTS:: sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset sage: P = Poset(([1,2,3],[[1,2],[1,3]])) sage: L = P.linear_extensions() sage: L is LinearExtensionsOfPoset(P) True sage: L._poset is P True sage: L._linear_extensions_of_hasse_diagram Linear extensions of Hasse diagram of a poset containing 3 elements sage: TestSuite(L).run() sage: P = Poset((divisors(15), attrcall("divides"))) sage: L = P.linear_extensions() sage: TestSuite(L).run() sage: P = Poset((divisors(15), attrcall("divides")), facade=True) sage: L = P.linear_extensions() sage: TestSuite(L).run() sage: L = P.linear_extensions(facade = True) sage: TestSuite(L).run(skip="_test_an_element")
src/sage/combinat/posets/linear_extensions.py
__init__
robertwb/sage
2
python
def __init__(self, poset, facade): '\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset\n sage: P = Poset(([1,2,3],[[1,2],[1,3]]))\n sage: L = P.linear_extensions()\n sage: L is LinearExtensionsOfPoset(P)\n True\n sage: L._poset is P\n True\n sage: L._linear_extensions_of_hasse_diagram\n Linear extensions of Hasse diagram of a poset containing 3 elements\n sage: TestSuite(L).run()\n\n sage: P = Poset((divisors(15), attrcall("divides")))\n sage: L = P.linear_extensions()\n sage: TestSuite(L).run()\n\n sage: P = Poset((divisors(15), attrcall("divides")), facade=True)\n sage: L = P.linear_extensions()\n sage: TestSuite(L).run()\n\n sage: L = P.linear_extensions(facade = True)\n sage: TestSuite(L).run(skip="_test_an_element")\n ' self._poset = poset self._linear_extensions_of_hasse_diagram = sage.graphs.linearextensions.LinearExtensions(poset._hasse_diagram) self._is_facade = facade if facade: facade = (list,) Parent.__init__(self, category=FiniteEnumeratedSets(), facade=facade)
def __init__(self, poset, facade): '\n TESTS::\n\n sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset\n sage: P = Poset(([1,2,3],[[1,2],[1,3]]))\n sage: L = P.linear_extensions()\n sage: L is LinearExtensionsOfPoset(P)\n True\n sage: L._poset is P\n True\n sage: L._linear_extensions_of_hasse_diagram\n Linear extensions of Hasse diagram of a poset containing 3 elements\n sage: TestSuite(L).run()\n\n sage: P = Poset((divisors(15), attrcall("divides")))\n sage: L = P.linear_extensions()\n sage: TestSuite(L).run()\n\n sage: P = Poset((divisors(15), attrcall("divides")), facade=True)\n sage: L = P.linear_extensions()\n sage: TestSuite(L).run()\n\n sage: L = P.linear_extensions(facade = True)\n sage: TestSuite(L).run(skip="_test_an_element")\n ' self._poset = poset self._linear_extensions_of_hasse_diagram = sage.graphs.linearextensions.LinearExtensions(poset._hasse_diagram) self._is_facade = facade if facade: facade = (list,) Parent.__init__(self, category=FiniteEnumeratedSets(), facade=facade)<|docstring|>TESTS:: sage: from sage.combinat.posets.linear_extensions import LinearExtensionsOfPoset sage: P = Poset(([1,2,3],[[1,2],[1,3]])) sage: L = P.linear_extensions() sage: L is LinearExtensionsOfPoset(P) True sage: L._poset is P True sage: L._linear_extensions_of_hasse_diagram Linear extensions of Hasse diagram of a poset containing 3 elements sage: TestSuite(L).run() sage: P = Poset((divisors(15), attrcall("divides"))) sage: L = P.linear_extensions() sage: TestSuite(L).run() sage: P = Poset((divisors(15), attrcall("divides")), facade=True) sage: L = P.linear_extensions() sage: TestSuite(L).run() sage: L = P.linear_extensions(facade = True) sage: TestSuite(L).run(skip="_test_an_element")<|endoftext|>
bc25d7c21ac2d1d91da314bef53cee73791cb84ef714a9a1b33861bc3dfe3936
def _repr_(self): '\n TESTS::\n\n sage: P = Poset(([1,2,3],[[1,2],[1,3]]))\n sage: P.linear_extensions()\n The set of all linear extensions of Finite poset containing 3 elements\n ' return ('The set of all linear extensions of %s' % self._poset)
TESTS:: sage: P = Poset(([1,2,3],[[1,2],[1,3]])) sage: P.linear_extensions() The set of all linear extensions of Finite poset containing 3 elements
src/sage/combinat/posets/linear_extensions.py
_repr_
robertwb/sage
2
python
def _repr_(self): '\n TESTS::\n\n sage: P = Poset(([1,2,3],[[1,2],[1,3]]))\n sage: P.linear_extensions()\n The set of all linear extensions of Finite poset containing 3 elements\n ' return ('The set of all linear extensions of %s' % self._poset)
def _repr_(self): '\n TESTS::\n\n sage: P = Poset(([1,2,3],[[1,2],[1,3]]))\n sage: P.linear_extensions()\n The set of all linear extensions of Finite poset containing 3 elements\n ' return ('The set of all linear extensions of %s' % self._poset)<|docstring|>TESTS:: sage: P = Poset(([1,2,3],[[1,2],[1,3]])) sage: P.linear_extensions() The set of all linear extensions of Finite poset containing 3 elements<|endoftext|>
87690d79bee5fc7081f108755dc0d1cdd41a198df0547185141021c950d87631
def poset(self): '\n Returns the underlying original poset.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]]))\n sage: L = P.linear_extensions()\n sage: L.poset()\n Finite poset containing 4 elements\n ' return self._poset
Returns the underlying original poset. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]])) sage: L = P.linear_extensions() sage: L.poset() Finite poset containing 4 elements
src/sage/combinat/posets/linear_extensions.py
poset
robertwb/sage
2
python
def poset(self): '\n Returns the underlying original poset.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]]))\n sage: L = P.linear_extensions()\n sage: L.poset()\n Finite poset containing 4 elements\n ' return self._poset
def poset(self): '\n Returns the underlying original poset.\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]]))\n sage: L = P.linear_extensions()\n sage: L.poset()\n Finite poset containing 4 elements\n ' return self._poset<|docstring|>Returns the underlying original poset. EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,2],[2,3],[1,4]])) sage: L = P.linear_extensions() sage: L.poset() Finite poset containing 4 elements<|endoftext|>
8d07c44fec5582b5c5a996191db5c06227bd6530e9b918a67910494c9a170a68
def cardinality(self): '\n Return the number of linear extensions.\n\n EXAMPLES::\n\n sage: N = Poset({0: [2, 3], 1: [3]})\n sage: N.linear_extensions().cardinality()\n 5\n\n TESTS::\n\n sage: Poset().linear_extensions().cardinality()\n 1\n sage: Posets.ChainPoset(1).linear_extensions().cardinality()\n 1\n ' from sage.rings.integer import Integer H = self._poset.order_ideals_lattice(as_ideals=False)._hasse_diagram L = H.level_sets() c = ([0] * H.order()) for l in L[0]: c[l] = 1 for lev in L[1:]: for l in lev: c[l] = sum((c[i] for i in H.lower_covers_iterator(l))) return Integer(sum((c[i] for i in H.sinks())))
Return the number of linear extensions. EXAMPLES:: sage: N = Poset({0: [2, 3], 1: [3]}) sage: N.linear_extensions().cardinality() 5 TESTS:: sage: Poset().linear_extensions().cardinality() 1 sage: Posets.ChainPoset(1).linear_extensions().cardinality() 1
src/sage/combinat/posets/linear_extensions.py
cardinality
robertwb/sage
2
python
def cardinality(self): '\n Return the number of linear extensions.\n\n EXAMPLES::\n\n sage: N = Poset({0: [2, 3], 1: [3]})\n sage: N.linear_extensions().cardinality()\n 5\n\n TESTS::\n\n sage: Poset().linear_extensions().cardinality()\n 1\n sage: Posets.ChainPoset(1).linear_extensions().cardinality()\n 1\n ' from sage.rings.integer import Integer H = self._poset.order_ideals_lattice(as_ideals=False)._hasse_diagram L = H.level_sets() c = ([0] * H.order()) for l in L[0]: c[l] = 1 for lev in L[1:]: for l in lev: c[l] = sum((c[i] for i in H.lower_covers_iterator(l))) return Integer(sum((c[i] for i in H.sinks())))
def cardinality(self): '\n Return the number of linear extensions.\n\n EXAMPLES::\n\n sage: N = Poset({0: [2, 3], 1: [3]})\n sage: N.linear_extensions().cardinality()\n 5\n\n TESTS::\n\n sage: Poset().linear_extensions().cardinality()\n 1\n sage: Posets.ChainPoset(1).linear_extensions().cardinality()\n 1\n ' from sage.rings.integer import Integer H = self._poset.order_ideals_lattice(as_ideals=False)._hasse_diagram L = H.level_sets() c = ([0] * H.order()) for l in L[0]: c[l] = 1 for lev in L[1:]: for l in lev: c[l] = sum((c[i] for i in H.lower_covers_iterator(l))) return Integer(sum((c[i] for i in H.sinks())))<|docstring|>Return the number of linear extensions. EXAMPLES:: sage: N = Poset({0: [2, 3], 1: [3]}) sage: N.linear_extensions().cardinality() 5 TESTS:: sage: Poset().linear_extensions().cardinality() 1 sage: Posets.ChainPoset(1).linear_extensions().cardinality() 1<|endoftext|>
6878aac877a43c150f95dc64c8894b13fccd589b684f8d870aa406ba03f79329
def __iter__(self): '\n Iterates through the linear extensions of the underlying poset.\n\n EXAMPLES::\n\n sage: elms = [1,2,3,4]\n sage: rels = [[1,3],[1,4],[2,3]]\n sage: P = Poset((elms, rels), linear_extension=True)\n sage: L = P.linear_extensions()\n sage: list(L)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n ' vertex_to_element = self._poset._vertex_to_element for lin_ext in self._linear_extensions_of_hasse_diagram: (yield self._element_constructor_([vertex_to_element(_) for _ in lin_ext]))
Iterates through the linear extensions of the underlying poset. EXAMPLES:: sage: elms = [1,2,3,4] sage: rels = [[1,3],[1,4],[2,3]] sage: P = Poset((elms, rels), linear_extension=True) sage: L = P.linear_extensions() sage: list(L) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]
src/sage/combinat/posets/linear_extensions.py
__iter__
robertwb/sage
2
python
def __iter__(self): '\n Iterates through the linear extensions of the underlying poset.\n\n EXAMPLES::\n\n sage: elms = [1,2,3,4]\n sage: rels = [[1,3],[1,4],[2,3]]\n sage: P = Poset((elms, rels), linear_extension=True)\n sage: L = P.linear_extensions()\n sage: list(L)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n ' vertex_to_element = self._poset._vertex_to_element for lin_ext in self._linear_extensions_of_hasse_diagram: (yield self._element_constructor_([vertex_to_element(_) for _ in lin_ext]))
def __iter__(self): '\n Iterates through the linear extensions of the underlying poset.\n\n EXAMPLES::\n\n sage: elms = [1,2,3,4]\n sage: rels = [[1,3],[1,4],[2,3]]\n sage: P = Poset((elms, rels), linear_extension=True)\n sage: L = P.linear_extensions()\n sage: list(L)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n ' vertex_to_element = self._poset._vertex_to_element for lin_ext in self._linear_extensions_of_hasse_diagram: (yield self._element_constructor_([vertex_to_element(_) for _ in lin_ext]))<|docstring|>Iterates through the linear extensions of the underlying poset. EXAMPLES:: sage: elms = [1,2,3,4] sage: rels = [[1,3],[1,4],[2,3]] sage: P = Poset((elms, rels), linear_extension=True) sage: L = P.linear_extensions() sage: list(L) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]<|endoftext|>
64e8252b650f2bbbc9413ceb06de19ea44d57bcc2cbb19eb60c1d38d941ee08c
def __contains__(self, obj): '\n Membership testing\n\n EXAMPLES::\n\n sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)\n sage: P.list()\n [1, 2, 3, 4, 6, 12]\n sage: L = P.linear_extensions()\n sage: L([1, 2, 4, 3, 6, 12]) in L\n True\n sage: [1, 2, 4, 3, 6, 12] in L\n False\n\n sage: L = P.linear_extensions(facade=True)\n sage: [1, 2, 4, 3, 6, 12] in L\n True\n sage: [1, 3, 2, 6, 4, 12] in L\n True\n sage: [1, 3, 6, 2, 4, 12] in L\n False\n\n sage: [p for p in Permutations(list(P)) if list(p) in L]\n [[1, 2, 3, 4, 6, 12], [1, 2, 3, 6, 4, 12], [1, 2, 4, 3, 6, 12], [1, 3, 2, 4, 6, 12], [1, 3, 2, 6, 4, 12]]\n\n ' if (not self._is_facade): return super(LinearExtensionsOfPoset, self).__contains__(obj) return (isinstance(obj, (list, tuple)) and self.poset().is_linear_extension(obj))
Membership testing EXAMPLES:: sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True) sage: P.list() [1, 2, 3, 4, 6, 12] sage: L = P.linear_extensions() sage: L([1, 2, 4, 3, 6, 12]) in L True sage: [1, 2, 4, 3, 6, 12] in L False sage: L = P.linear_extensions(facade=True) sage: [1, 2, 4, 3, 6, 12] in L True sage: [1, 3, 2, 6, 4, 12] in L True sage: [1, 3, 6, 2, 4, 12] in L False sage: [p for p in Permutations(list(P)) if list(p) in L] [[1, 2, 3, 4, 6, 12], [1, 2, 3, 6, 4, 12], [1, 2, 4, 3, 6, 12], [1, 3, 2, 4, 6, 12], [1, 3, 2, 6, 4, 12]]
src/sage/combinat/posets/linear_extensions.py
__contains__
robertwb/sage
2
python
def __contains__(self, obj): '\n Membership testing\n\n EXAMPLES::\n\n sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)\n sage: P.list()\n [1, 2, 3, 4, 6, 12]\n sage: L = P.linear_extensions()\n sage: L([1, 2, 4, 3, 6, 12]) in L\n True\n sage: [1, 2, 4, 3, 6, 12] in L\n False\n\n sage: L = P.linear_extensions(facade=True)\n sage: [1, 2, 4, 3, 6, 12] in L\n True\n sage: [1, 3, 2, 6, 4, 12] in L\n True\n sage: [1, 3, 6, 2, 4, 12] in L\n False\n\n sage: [p for p in Permutations(list(P)) if list(p) in L]\n [[1, 2, 3, 4, 6, 12], [1, 2, 3, 6, 4, 12], [1, 2, 4, 3, 6, 12], [1, 3, 2, 4, 6, 12], [1, 3, 2, 6, 4, 12]]\n\n ' if (not self._is_facade): return super(LinearExtensionsOfPoset, self).__contains__(obj) return (isinstance(obj, (list, tuple)) and self.poset().is_linear_extension(obj))
def __contains__(self, obj): '\n Membership testing\n\n EXAMPLES::\n\n sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)\n sage: P.list()\n [1, 2, 3, 4, 6, 12]\n sage: L = P.linear_extensions()\n sage: L([1, 2, 4, 3, 6, 12]) in L\n True\n sage: [1, 2, 4, 3, 6, 12] in L\n False\n\n sage: L = P.linear_extensions(facade=True)\n sage: [1, 2, 4, 3, 6, 12] in L\n True\n sage: [1, 3, 2, 6, 4, 12] in L\n True\n sage: [1, 3, 6, 2, 4, 12] in L\n False\n\n sage: [p for p in Permutations(list(P)) if list(p) in L]\n [[1, 2, 3, 4, 6, 12], [1, 2, 3, 6, 4, 12], [1, 2, 4, 3, 6, 12], [1, 3, 2, 4, 6, 12], [1, 3, 2, 6, 4, 12]]\n\n ' if (not self._is_facade): return super(LinearExtensionsOfPoset, self).__contains__(obj) return (isinstance(obj, (list, tuple)) and self.poset().is_linear_extension(obj))<|docstring|>Membership testing EXAMPLES:: sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True) sage: P.list() [1, 2, 3, 4, 6, 12] sage: L = P.linear_extensions() sage: L([1, 2, 4, 3, 6, 12]) in L True sage: [1, 2, 4, 3, 6, 12] in L False sage: L = P.linear_extensions(facade=True) sage: [1, 2, 4, 3, 6, 12] in L True sage: [1, 3, 2, 6, 4, 12] in L True sage: [1, 3, 6, 2, 4, 12] in L False sage: [p for p in Permutations(list(P)) if list(p) in L] [[1, 2, 3, 4, 6, 12], [1, 2, 3, 6, 4, 12], [1, 2, 4, 3, 6, 12], [1, 3, 2, 4, 6, 12], [1, 3, 2, 6, 4, 12]]<|endoftext|>
6d1cf269b77a00e6c8c7ef74f6998b071e4dcfa751a51fcb632d468b0f4cec11
def markov_chain_digraph(self, action='promotion', labeling='identity'): "\n Returns the digraph of the action of generalized promotion or tau on ``self``\n\n INPUT:\n\n - ``action`` -- 'promotion' or 'tau' (default: 'promotion')\n - ``labeling`` -- 'identity' or 'source' (default: 'identity')\n\n .. todo::\n\n - generalize this feature by accepting a family of operators as input\n - move up in some appropriate category\n\n This method creates a graph with vertices being the linear extensions of a given finite\n poset and an edge from `\\pi` to `\\pi'` if `\\pi' = \\pi \\partial_i` where `\\partial_i` is\n the promotion operator (see :meth:`promotion`) if ``action`` is set to ``promotion``\n and `\\tau_i` (see :meth:`tau`) if ``action`` is set to ``tau``. The label of the edge\n is `i` (resp. `\\pi_i`) if ``labeling`` is set to ``identity`` (resp. ``source``).\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True)\n sage: L = P.linear_extensions()\n sage: G = L.markov_chain_digraph(); G\n Looped multi-digraph on 5 vertices\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3),\n ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 2, 4, 3], 4),\n ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1),\n ([1, 4, 2, 3], [1, 2, 3, 4], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([1, 4, 2, 3], [1, 4, 2, 3], 4),\n ([2, 1, 3, 4], [1, 2, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 2),\n ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 2),\n ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 4)]\n\n sage: G = L.markov_chain_digraph(labeling = 'source')\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3),\n ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 4), ([1, 2, 4, 3], [1, 2, 4, 3], 3),\n ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1),\n ([1, 4, 2, 3], [1, 2, 3, 4], 4), ([1, 4, 2, 3], [1, 4, 2, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3),\n ([2, 1, 3, 4], [1, 2, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 1),\n ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 1),\n ([2, 1, 4, 3], [2, 1, 3, 4], 4), ([2, 1, 4, 3], [2, 1, 4, 3], 3)]\n\n The edges of the graph are by default colored using blue for\n edge 1, red for edge 2, green for edge 3, and yellow for edge 0000:0000:0000:0000:0000:0000:0000:0000\n\n sage: view(G) # optional - dot2tex graphviz, not tested (opens external window)\n\n Alternatively, one may get the graph of the action of the ``tau`` operator::\n\n sage: G = L.markov_chain_digraph(action='tau'); G\n Looped multi-digraph on 5 vertices\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 3, 4], 1),\n ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 4, 3], 1),\n ([1, 4, 2, 3], [1, 2, 4, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 1), ([1, 4, 2, 3], [1, 4, 2, 3], 3),\n ([2, 1, 3, 4], [1, 2, 3, 4], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3),\n ([2, 1, 4, 3], [1, 2, 4, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 2)]\n sage: view(G) # optional - dot2tex graphviz, not tested (opens external window)\n\n .. seealso:: :meth:`markov_chain_transition_matrix`, :meth:`promotion`, :meth:`tau`\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True, facade = True)\n sage: L = P.linear_extensions()\n sage: G = L.markov_chain_digraph(labeling = 'source'); G\n Looped multi-digraph on 5 vertices\n " d = dict(([x, dict(([y, []] for y in self))] for x in self)) if (action == 'promotion'): R = list(range(self.poset().cardinality())) else: R = list(range((self.poset().cardinality() - 1))) if (labeling == 'source'): for x in self: for i in R: child = getattr(x, action)((i + 1)) d[x][child] += [self.poset().unwrap(x[i])] else: for x in self: for i in R: child = getattr(x, action)((i + 1)) d[x][child] += [(i + 1)] G = DiGraph(d, format='dict_of_dicts') if have_dot2tex(): G.set_latex_options(format='dot2tex', edge_labels=True, color_by_label={1: 'blue', 2: 'red', 3: 'green', 4: 'yellow'}) return G
Returns the digraph of the action of generalized promotion or tau on ``self`` INPUT: - ``action`` -- 'promotion' or 'tau' (default: 'promotion') - ``labeling`` -- 'identity' or 'source' (default: 'identity') .. todo:: - generalize this feature by accepting a family of operators as input - move up in some appropriate category This method creates a graph with vertices being the linear extensions of a given finite poset and an edge from `\pi` to `\pi'` if `\pi' = \pi \partial_i` where `\partial_i` is the promotion operator (see :meth:`promotion`) if ``action`` is set to ``promotion`` and `\tau_i` (see :meth:`tau`) if ``action`` is set to ``tau``. The label of the edge is `i` (resp. `\pi_i`) if ``labeling`` is set to ``identity`` (resp. ``source``). EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True) sage: L = P.linear_extensions() sage: G = L.markov_chain_digraph(); G Looped multi-digraph on 5 vertices sage: sorted(G.vertices(), key = repr) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]] sage: sorted(G.edges(), key = repr) [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 2, 4, 3], 4), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([1, 4, 2, 3], [1, 4, 2, 3], 4), ([2, 1, 3, 4], [1, 2, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 4)] sage: G = L.markov_chain_digraph(labeling = 'source') sage: sorted(G.vertices(), key = repr) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]] sage: sorted(G.edges(), key = repr) [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 4), ([1, 2, 4, 3], [1, 2, 4, 3], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 4), ([1, 4, 2, 3], [1, 4, 2, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([2, 1, 3, 4], [1, 2, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 4), ([2, 1, 4, 3], [2, 1, 4, 3], 3)] The edges of the graph are by default colored using blue for edge 1, red for edge 2, green for edge 3, and yellow for edge 0000:0000:0000:0000:0000:0000:0000:0000 sage: view(G) # optional - dot2tex graphviz, not tested (opens external window) Alternatively, one may get the graph of the action of the ``tau`` operator:: sage: G = L.markov_chain_digraph(action='tau'); G Looped multi-digraph on 5 vertices sage: sorted(G.vertices(), key = repr) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]] sage: sorted(G.edges(), key = repr) [([1, 2, 3, 4], [1, 2, 3, 4], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 3, 4], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 4, 3], 1), ([1, 4, 2, 3], [1, 2, 4, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 1), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([2, 1, 3, 4], [1, 2, 3, 4], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 2, 4, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 2)] sage: view(G) # optional - dot2tex graphviz, not tested (opens external window) .. seealso:: :meth:`markov_chain_transition_matrix`, :meth:`promotion`, :meth:`tau` TESTS:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True, facade = True) sage: L = P.linear_extensions() sage: G = L.markov_chain_digraph(labeling = 'source'); G Looped multi-digraph on 5 vertices
src/sage/combinat/posets/linear_extensions.py
markov_chain_digraph
robertwb/sage
2
python
def markov_chain_digraph(self, action='promotion', labeling='identity'): "\n Returns the digraph of the action of generalized promotion or tau on ``self``\n\n INPUT:\n\n - ``action`` -- 'promotion' or 'tau' (default: 'promotion')\n - ``labeling`` -- 'identity' or 'source' (default: 'identity')\n\n .. todo::\n\n - generalize this feature by accepting a family of operators as input\n - move up in some appropriate category\n\n This method creates a graph with vertices being the linear extensions of a given finite\n poset and an edge from `\\pi` to `\\pi'` if `\\pi' = \\pi \\partial_i` where `\\partial_i` is\n the promotion operator (see :meth:`promotion`) if ``action`` is set to ``promotion``\n and `\\tau_i` (see :meth:`tau`) if ``action`` is set to ``tau``. The label of the edge\n is `i` (resp. `\\pi_i`) if ``labeling`` is set to ``identity`` (resp. ``source``).\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True)\n sage: L = P.linear_extensions()\n sage: G = L.markov_chain_digraph(); G\n Looped multi-digraph on 5 vertices\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3),\n ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 2, 4, 3], 4),\n ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1),\n ([1, 4, 2, 3], [1, 2, 3, 4], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([1, 4, 2, 3], [1, 4, 2, 3], 4),\n ([2, 1, 3, 4], [1, 2, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 2),\n ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 2),\n ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 4)]\n\n sage: G = L.markov_chain_digraph(labeling = 'source')\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3),\n ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 4), ([1, 2, 4, 3], [1, 2, 4, 3], 3),\n ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1),\n ([1, 4, 2, 3], [1, 2, 3, 4], 4), ([1, 4, 2, 3], [1, 4, 2, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3),\n ([2, 1, 3, 4], [1, 2, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 1),\n ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 1),\n ([2, 1, 4, 3], [2, 1, 3, 4], 4), ([2, 1, 4, 3], [2, 1, 4, 3], 3)]\n\n The edges of the graph are by default colored using blue for\n edge 1, red for edge 2, green for edge 3, and yellow for edge 0000:0000:0000:0000:0000:0000:0000:0000\n\n sage: view(G) # optional - dot2tex graphviz, not tested (opens external window)\n\n Alternatively, one may get the graph of the action of the ``tau`` operator::\n\n sage: G = L.markov_chain_digraph(action='tau'); G\n Looped multi-digraph on 5 vertices\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 3, 4], 1),\n ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 4, 3], 1),\n ([1, 4, 2, 3], [1, 2, 4, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 1), ([1, 4, 2, 3], [1, 4, 2, 3], 3),\n ([2, 1, 3, 4], [1, 2, 3, 4], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3),\n ([2, 1, 4, 3], [1, 2, 4, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 2)]\n sage: view(G) # optional - dot2tex graphviz, not tested (opens external window)\n\n .. seealso:: :meth:`markov_chain_transition_matrix`, :meth:`promotion`, :meth:`tau`\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True, facade = True)\n sage: L = P.linear_extensions()\n sage: G = L.markov_chain_digraph(labeling = 'source'); G\n Looped multi-digraph on 5 vertices\n " d = dict(([x, dict(([y, []] for y in self))] for x in self)) if (action == 'promotion'): R = list(range(self.poset().cardinality())) else: R = list(range((self.poset().cardinality() - 1))) if (labeling == 'source'): for x in self: for i in R: child = getattr(x, action)((i + 1)) d[x][child] += [self.poset().unwrap(x[i])] else: for x in self: for i in R: child = getattr(x, action)((i + 1)) d[x][child] += [(i + 1)] G = DiGraph(d, format='dict_of_dicts') if have_dot2tex(): G.set_latex_options(format='dot2tex', edge_labels=True, color_by_label={1: 'blue', 2: 'red', 3: 'green', 4: 'yellow'}) return G
def markov_chain_digraph(self, action='promotion', labeling='identity'): "\n Returns the digraph of the action of generalized promotion or tau on ``self``\n\n INPUT:\n\n - ``action`` -- 'promotion' or 'tau' (default: 'promotion')\n - ``labeling`` -- 'identity' or 'source' (default: 'identity')\n\n .. todo::\n\n - generalize this feature by accepting a family of operators as input\n - move up in some appropriate category\n\n This method creates a graph with vertices being the linear extensions of a given finite\n poset and an edge from `\\pi` to `\\pi'` if `\\pi' = \\pi \\partial_i` where `\\partial_i` is\n the promotion operator (see :meth:`promotion`) if ``action`` is set to ``promotion``\n and `\\tau_i` (see :meth:`tau`) if ``action`` is set to ``tau``. The label of the edge\n is `i` (resp. `\\pi_i`) if ``labeling`` is set to ``identity`` (resp. ``source``).\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True)\n sage: L = P.linear_extensions()\n sage: G = L.markov_chain_digraph(); G\n Looped multi-digraph on 5 vertices\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3),\n ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 2, 4, 3], 4),\n ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1),\n ([1, 4, 2, 3], [1, 2, 3, 4], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([1, 4, 2, 3], [1, 4, 2, 3], 4),\n ([2, 1, 3, 4], [1, 2, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 2),\n ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 2),\n ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 4)]\n\n sage: G = L.markov_chain_digraph(labeling = 'source')\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3),\n ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 4), ([1, 2, 4, 3], [1, 2, 4, 3], 3),\n ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1),\n ([1, 4, 2, 3], [1, 2, 3, 4], 4), ([1, 4, 2, 3], [1, 4, 2, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3),\n ([2, 1, 3, 4], [1, 2, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 1),\n ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 1),\n ([2, 1, 4, 3], [2, 1, 3, 4], 4), ([2, 1, 4, 3], [2, 1, 4, 3], 3)]\n\n The edges of the graph are by default colored using blue for\n edge 1, red for edge 2, green for edge 3, and yellow for edge 0000:0000:0000:0000:0000:0000:0000:0000\n\n sage: view(G) # optional - dot2tex graphviz, not tested (opens external window)\n\n Alternatively, one may get the graph of the action of the ``tau`` operator::\n\n sage: G = L.markov_chain_digraph(action='tau'); G\n Looped multi-digraph on 5 vertices\n sage: sorted(G.vertices(), key = repr)\n [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]]\n sage: sorted(G.edges(), key = repr)\n [([1, 2, 3, 4], [1, 2, 3, 4], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 3, 4], 1),\n ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 4, 3], 1),\n ([1, 4, 2, 3], [1, 2, 4, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 1), ([1, 4, 2, 3], [1, 4, 2, 3], 3),\n ([2, 1, 3, 4], [1, 2, 3, 4], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3),\n ([2, 1, 4, 3], [1, 2, 4, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 2)]\n sage: view(G) # optional - dot2tex graphviz, not tested (opens external window)\n\n .. seealso:: :meth:`markov_chain_transition_matrix`, :meth:`promotion`, :meth:`tau`\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True, facade = True)\n sage: L = P.linear_extensions()\n sage: G = L.markov_chain_digraph(labeling = 'source'); G\n Looped multi-digraph on 5 vertices\n " d = dict(([x, dict(([y, []] for y in self))] for x in self)) if (action == 'promotion'): R = list(range(self.poset().cardinality())) else: R = list(range((self.poset().cardinality() - 1))) if (labeling == 'source'): for x in self: for i in R: child = getattr(x, action)((i + 1)) d[x][child] += [self.poset().unwrap(x[i])] else: for x in self: for i in R: child = getattr(x, action)((i + 1)) d[x][child] += [(i + 1)] G = DiGraph(d, format='dict_of_dicts') if have_dot2tex(): G.set_latex_options(format='dot2tex', edge_labels=True, color_by_label={1: 'blue', 2: 'red', 3: 'green', 4: 'yellow'}) return G<|docstring|>Returns the digraph of the action of generalized promotion or tau on ``self`` INPUT: - ``action`` -- 'promotion' or 'tau' (default: 'promotion') - ``labeling`` -- 'identity' or 'source' (default: 'identity') .. todo:: - generalize this feature by accepting a family of operators as input - move up in some appropriate category This method creates a graph with vertices being the linear extensions of a given finite poset and an edge from `\pi` to `\pi'` if `\pi' = \pi \partial_i` where `\partial_i` is the promotion operator (see :meth:`promotion`) if ``action`` is set to ``promotion`` and `\tau_i` (see :meth:`tau`) if ``action`` is set to ``tau``. The label of the edge is `i` (resp. `\pi_i`) if ``labeling`` is set to ``identity`` (resp. ``source``). EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True) sage: L = P.linear_extensions() sage: G = L.markov_chain_digraph(); G Looped multi-digraph on 5 vertices sage: sorted(G.vertices(), key = repr) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]] sage: sorted(G.edges(), key = repr) [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 2, 4, 3], 4), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([1, 4, 2, 3], [1, 4, 2, 3], 4), ([2, 1, 3, 4], [1, 2, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 4)] sage: G = L.markov_chain_digraph(labeling = 'source') sage: sorted(G.vertices(), key = repr) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]] sage: sorted(G.edges(), key = repr) [([1, 2, 3, 4], [1, 2, 3, 4], 4), ([1, 2, 3, 4], [1, 2, 4, 3], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 4, 3], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 4), ([1, 2, 4, 3], [1, 2, 4, 3], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 1), ([1, 4, 2, 3], [1, 2, 3, 4], 4), ([1, 4, 2, 3], [1, 4, 2, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([2, 1, 3, 4], [1, 2, 4, 3], 2), ([2, 1, 3, 4], [2, 1, 3, 4], 4), ([2, 1, 3, 4], [2, 1, 4, 3], 1), ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 4, 2, 3], 2), ([2, 1, 4, 3], [2, 1, 3, 4], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 4), ([2, 1, 4, 3], [2, 1, 4, 3], 3)] The edges of the graph are by default colored using blue for edge 1, red for edge 2, green for edge 3, and yellow for edge 0000:0000:0000:0000:0000:0000:0000:0000 sage: view(G) # optional - dot2tex graphviz, not tested (opens external window) Alternatively, one may get the graph of the action of the ``tau`` operator:: sage: G = L.markov_chain_digraph(action='tau'); G Looped multi-digraph on 5 vertices sage: sorted(G.vertices(), key = repr) [[1, 2, 3, 4], [1, 2, 4, 3], [1, 4, 2, 3], [2, 1, 3, 4], [2, 1, 4, 3]] sage: sorted(G.edges(), key = repr) [([1, 2, 3, 4], [1, 2, 3, 4], 2), ([1, 2, 3, 4], [1, 2, 4, 3], 3), ([1, 2, 3, 4], [2, 1, 3, 4], 1), ([1, 2, 4, 3], [1, 2, 3, 4], 3), ([1, 2, 4, 3], [1, 4, 2, 3], 2), ([1, 2, 4, 3], [2, 1, 4, 3], 1), ([1, 4, 2, 3], [1, 2, 4, 3], 2), ([1, 4, 2, 3], [1, 4, 2, 3], 1), ([1, 4, 2, 3], [1, 4, 2, 3], 3), ([2, 1, 3, 4], [1, 2, 3, 4], 1), ([2, 1, 3, 4], [2, 1, 3, 4], 2), ([2, 1, 3, 4], [2, 1, 4, 3], 3), ([2, 1, 4, 3], [1, 2, 4, 3], 1), ([2, 1, 4, 3], [2, 1, 3, 4], 3), ([2, 1, 4, 3], [2, 1, 4, 3], 2)] sage: view(G) # optional - dot2tex graphviz, not tested (opens external window) .. seealso:: :meth:`markov_chain_transition_matrix`, :meth:`promotion`, :meth:`tau` TESTS:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True, facade = True) sage: L = P.linear_extensions() sage: G = L.markov_chain_digraph(labeling = 'source'); G Looped multi-digraph on 5 vertices<|endoftext|>
4770b09c3a0e1850f13bc377ae948b8b312519316b2f4eb0312eee3dd3ddb6bb
def markov_chain_transition_matrix(self, action='promotion', labeling='identity'): "\n Returns the transition matrix of the Markov chain for the action of generalized promotion or tau on ``self``\n\n INPUT:\n\n - ``action`` -- 'promotion' or 'tau' (default: 'promotion')\n - ``labeling`` -- 'identity' or 'source' (default: 'identity')\n\n This method yields the transition matrix of the Markov chain defined by the action of the generalized\n promotion operator `\\partial_i` (resp. `\\tau_i`) on the set of linear extensions of a finite poset.\n Here the transition from the linear extension `\\pi` to `\\pi'`, where `\\pi' = \\pi \\partial_i`\n (resp. `\\pi'= \\pi \\tau_i`) is counted with weight `x_i` (resp. `x_{\\pi_i}` if ``labeling`` is set to ``source``).\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True)\n sage: L = P.linear_extensions()\n sage: L.markov_chain_transition_matrix()\n [-x0 - x1 - x2 x2 x0 + x1 0 0]\n [ x1 + x2 -x0 - x1 - x2 0 x0 0]\n [ 0 x1 -x0 - x1 0 x0]\n [ 0 x0 0 -x0 - x1 - x2 x1 + x2]\n [ x0 0 0 x1 + x2 -x0 - x1 - x2]\n\n sage: L.markov_chain_transition_matrix(labeling = 'source')\n [-x0 - x1 - x2 x3 x0 + x3 0 0]\n [ x1 + x2 -x0 - x1 - x3 0 x1 0]\n [ 0 x1 -x0 - x3 0 x1]\n [ 0 x0 0 -x0 - x1 - x2 x0 + x3]\n [ x0 0 0 x0 + x2 -x0 - x1 - x3]\n\n sage: L.markov_chain_transition_matrix(action = 'tau')\n [ -x0 - x2 x2 0 x0 0]\n [ x2 -x0 - x1 - x2 x1 0 x0]\n [ 0 x1 -x1 0 0]\n [ x0 0 0 -x0 - x2 x2]\n [ 0 x0 0 x2 -x0 - x2]\n\n sage: L.markov_chain_transition_matrix(action = 'tau', labeling = 'source')\n [ -x0 - x2 x3 0 x1 0]\n [ x2 -x0 - x1 - x3 x3 0 x1]\n [ 0 x1 -x3 0 0]\n [ x0 0 0 -x1 - x2 x3]\n [ 0 x0 0 x2 -x1 - x3]\n\n .. seealso:: :meth:`markov_chain_digraph`, :meth:`promotion`, :meth:`tau`\n\n " from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing from sage.matrix.constructor import matrix L = self.list() n = self.poset().cardinality() R = PolynomialRing(QQ, 'x', n) x = [R.gen(i) for i in range(n)] l = self.cardinality() M = dict(([(i, j), 0] for i in range(l) for j in range(l))) if (labeling == 'source'): for i in range(l): perm = [self.poset().unwrap(k) for k in L[i]] for j in range((n - 1)): p = getattr(L[i], action)((j + 1)) M[(L.index(p), i)] += x[(perm[j] - 1)] else: for i in range(l): for j in range((n - 1)): p = getattr(L[i], action)((j + 1)) M[(L.index(p), i)] += x[j] for i in range(l): M[(i, i)] += (- sum((M[(j, i)] for j in range(l)))) return matrix(l, l, (lambda x, y: M[(x, y)]))
Returns the transition matrix of the Markov chain for the action of generalized promotion or tau on ``self`` INPUT: - ``action`` -- 'promotion' or 'tau' (default: 'promotion') - ``labeling`` -- 'identity' or 'source' (default: 'identity') This method yields the transition matrix of the Markov chain defined by the action of the generalized promotion operator `\partial_i` (resp. `\tau_i`) on the set of linear extensions of a finite poset. Here the transition from the linear extension `\pi` to `\pi'`, where `\pi' = \pi \partial_i` (resp. `\pi'= \pi \tau_i`) is counted with weight `x_i` (resp. `x_{\pi_i}` if ``labeling`` is set to ``source``). EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True) sage: L = P.linear_extensions() sage: L.markov_chain_transition_matrix() [-x0 - x1 - x2 x2 x0 + x1 0 0] [ x1 + x2 -x0 - x1 - x2 0 x0 0] [ 0 x1 -x0 - x1 0 x0] [ 0 x0 0 -x0 - x1 - x2 x1 + x2] [ x0 0 0 x1 + x2 -x0 - x1 - x2] sage: L.markov_chain_transition_matrix(labeling = 'source') [-x0 - x1 - x2 x3 x0 + x3 0 0] [ x1 + x2 -x0 - x1 - x3 0 x1 0] [ 0 x1 -x0 - x3 0 x1] [ 0 x0 0 -x0 - x1 - x2 x0 + x3] [ x0 0 0 x0 + x2 -x0 - x1 - x3] sage: L.markov_chain_transition_matrix(action = 'tau') [ -x0 - x2 x2 0 x0 0] [ x2 -x0 - x1 - x2 x1 0 x0] [ 0 x1 -x1 0 0] [ x0 0 0 -x0 - x2 x2] [ 0 x0 0 x2 -x0 - x2] sage: L.markov_chain_transition_matrix(action = 'tau', labeling = 'source') [ -x0 - x2 x3 0 x1 0] [ x2 -x0 - x1 - x3 x3 0 x1] [ 0 x1 -x3 0 0] [ x0 0 0 -x1 - x2 x3] [ 0 x0 0 x2 -x1 - x3] .. seealso:: :meth:`markov_chain_digraph`, :meth:`promotion`, :meth:`tau`
src/sage/combinat/posets/linear_extensions.py
markov_chain_transition_matrix
robertwb/sage
2
python
def markov_chain_transition_matrix(self, action='promotion', labeling='identity'): "\n Returns the transition matrix of the Markov chain for the action of generalized promotion or tau on ``self``\n\n INPUT:\n\n - ``action`` -- 'promotion' or 'tau' (default: 'promotion')\n - ``labeling`` -- 'identity' or 'source' (default: 'identity')\n\n This method yields the transition matrix of the Markov chain defined by the action of the generalized\n promotion operator `\\partial_i` (resp. `\\tau_i`) on the set of linear extensions of a finite poset.\n Here the transition from the linear extension `\\pi` to `\\pi'`, where `\\pi' = \\pi \\partial_i`\n (resp. `\\pi'= \\pi \\tau_i`) is counted with weight `x_i` (resp. `x_{\\pi_i}` if ``labeling`` is set to ``source``).\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True)\n sage: L = P.linear_extensions()\n sage: L.markov_chain_transition_matrix()\n [-x0 - x1 - x2 x2 x0 + x1 0 0]\n [ x1 + x2 -x0 - x1 - x2 0 x0 0]\n [ 0 x1 -x0 - x1 0 x0]\n [ 0 x0 0 -x0 - x1 - x2 x1 + x2]\n [ x0 0 0 x1 + x2 -x0 - x1 - x2]\n\n sage: L.markov_chain_transition_matrix(labeling = 'source')\n [-x0 - x1 - x2 x3 x0 + x3 0 0]\n [ x1 + x2 -x0 - x1 - x3 0 x1 0]\n [ 0 x1 -x0 - x3 0 x1]\n [ 0 x0 0 -x0 - x1 - x2 x0 + x3]\n [ x0 0 0 x0 + x2 -x0 - x1 - x3]\n\n sage: L.markov_chain_transition_matrix(action = 'tau')\n [ -x0 - x2 x2 0 x0 0]\n [ x2 -x0 - x1 - x2 x1 0 x0]\n [ 0 x1 -x1 0 0]\n [ x0 0 0 -x0 - x2 x2]\n [ 0 x0 0 x2 -x0 - x2]\n\n sage: L.markov_chain_transition_matrix(action = 'tau', labeling = 'source')\n [ -x0 - x2 x3 0 x1 0]\n [ x2 -x0 - x1 - x3 x3 0 x1]\n [ 0 x1 -x3 0 0]\n [ x0 0 0 -x1 - x2 x3]\n [ 0 x0 0 x2 -x1 - x3]\n\n .. seealso:: :meth:`markov_chain_digraph`, :meth:`promotion`, :meth:`tau`\n\n " from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing from sage.matrix.constructor import matrix L = self.list() n = self.poset().cardinality() R = PolynomialRing(QQ, 'x', n) x = [R.gen(i) for i in range(n)] l = self.cardinality() M = dict(([(i, j), 0] for i in range(l) for j in range(l))) if (labeling == 'source'): for i in range(l): perm = [self.poset().unwrap(k) for k in L[i]] for j in range((n - 1)): p = getattr(L[i], action)((j + 1)) M[(L.index(p), i)] += x[(perm[j] - 1)] else: for i in range(l): for j in range((n - 1)): p = getattr(L[i], action)((j + 1)) M[(L.index(p), i)] += x[j] for i in range(l): M[(i, i)] += (- sum((M[(j, i)] for j in range(l)))) return matrix(l, l, (lambda x, y: M[(x, y)]))
def markov_chain_transition_matrix(self, action='promotion', labeling='identity'): "\n Returns the transition matrix of the Markov chain for the action of generalized promotion or tau on ``self``\n\n INPUT:\n\n - ``action`` -- 'promotion' or 'tau' (default: 'promotion')\n - ``labeling`` -- 'identity' or 'source' (default: 'identity')\n\n This method yields the transition matrix of the Markov chain defined by the action of the generalized\n promotion operator `\\partial_i` (resp. `\\tau_i`) on the set of linear extensions of a finite poset.\n Here the transition from the linear extension `\\pi` to `\\pi'`, where `\\pi' = \\pi \\partial_i`\n (resp. `\\pi'= \\pi \\tau_i`) is counted with weight `x_i` (resp. `x_{\\pi_i}` if ``labeling`` is set to ``source``).\n\n EXAMPLES::\n\n sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True)\n sage: L = P.linear_extensions()\n sage: L.markov_chain_transition_matrix()\n [-x0 - x1 - x2 x2 x0 + x1 0 0]\n [ x1 + x2 -x0 - x1 - x2 0 x0 0]\n [ 0 x1 -x0 - x1 0 x0]\n [ 0 x0 0 -x0 - x1 - x2 x1 + x2]\n [ x0 0 0 x1 + x2 -x0 - x1 - x2]\n\n sage: L.markov_chain_transition_matrix(labeling = 'source')\n [-x0 - x1 - x2 x3 x0 + x3 0 0]\n [ x1 + x2 -x0 - x1 - x3 0 x1 0]\n [ 0 x1 -x0 - x3 0 x1]\n [ 0 x0 0 -x0 - x1 - x2 x0 + x3]\n [ x0 0 0 x0 + x2 -x0 - x1 - x3]\n\n sage: L.markov_chain_transition_matrix(action = 'tau')\n [ -x0 - x2 x2 0 x0 0]\n [ x2 -x0 - x1 - x2 x1 0 x0]\n [ 0 x1 -x1 0 0]\n [ x0 0 0 -x0 - x2 x2]\n [ 0 x0 0 x2 -x0 - x2]\n\n sage: L.markov_chain_transition_matrix(action = 'tau', labeling = 'source')\n [ -x0 - x2 x3 0 x1 0]\n [ x2 -x0 - x1 - x3 x3 0 x1]\n [ 0 x1 -x3 0 0]\n [ x0 0 0 -x1 - x2 x3]\n [ 0 x0 0 x2 -x1 - x3]\n\n .. seealso:: :meth:`markov_chain_digraph`, :meth:`promotion`, :meth:`tau`\n\n " from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing from sage.matrix.constructor import matrix L = self.list() n = self.poset().cardinality() R = PolynomialRing(QQ, 'x', n) x = [R.gen(i) for i in range(n)] l = self.cardinality() M = dict(([(i, j), 0] for i in range(l) for j in range(l))) if (labeling == 'source'): for i in range(l): perm = [self.poset().unwrap(k) for k in L[i]] for j in range((n - 1)): p = getattr(L[i], action)((j + 1)) M[(L.index(p), i)] += x[(perm[j] - 1)] else: for i in range(l): for j in range((n - 1)): p = getattr(L[i], action)((j + 1)) M[(L.index(p), i)] += x[j] for i in range(l): M[(i, i)] += (- sum((M[(j, i)] for j in range(l)))) return matrix(l, l, (lambda x, y: M[(x, y)]))<|docstring|>Returns the transition matrix of the Markov chain for the action of generalized promotion or tau on ``self`` INPUT: - ``action`` -- 'promotion' or 'tau' (default: 'promotion') - ``labeling`` -- 'identity' or 'source' (default: 'identity') This method yields the transition matrix of the Markov chain defined by the action of the generalized promotion operator `\partial_i` (resp. `\tau_i`) on the set of linear extensions of a finite poset. Here the transition from the linear extension `\pi` to `\pi'`, where `\pi' = \pi \partial_i` (resp. `\pi'= \pi \tau_i`) is counted with weight `x_i` (resp. `x_{\pi_i}` if ``labeling`` is set to ``source``). EXAMPLES:: sage: P = Poset(([1,2,3,4], [[1,3],[1,4],[2,3]]), linear_extension = True) sage: L = P.linear_extensions() sage: L.markov_chain_transition_matrix() [-x0 - x1 - x2 x2 x0 + x1 0 0] [ x1 + x2 -x0 - x1 - x2 0 x0 0] [ 0 x1 -x0 - x1 0 x0] [ 0 x0 0 -x0 - x1 - x2 x1 + x2] [ x0 0 0 x1 + x2 -x0 - x1 - x2] sage: L.markov_chain_transition_matrix(labeling = 'source') [-x0 - x1 - x2 x3 x0 + x3 0 0] [ x1 + x2 -x0 - x1 - x3 0 x1 0] [ 0 x1 -x0 - x3 0 x1] [ 0 x0 0 -x0 - x1 - x2 x0 + x3] [ x0 0 0 x0 + x2 -x0 - x1 - x3] sage: L.markov_chain_transition_matrix(action = 'tau') [ -x0 - x2 x2 0 x0 0] [ x2 -x0 - x1 - x2 x1 0 x0] [ 0 x1 -x1 0 0] [ x0 0 0 -x0 - x2 x2] [ 0 x0 0 x2 -x0 - x2] sage: L.markov_chain_transition_matrix(action = 'tau', labeling = 'source') [ -x0 - x2 x3 0 x1 0] [ x2 -x0 - x1 - x3 x3 0 x1] [ 0 x1 -x3 0 0] [ x0 0 0 -x1 - x2 x3] [ 0 x0 0 x2 -x1 - x3] .. seealso:: :meth:`markov_chain_digraph`, :meth:`promotion`, :meth:`tau`<|endoftext|>
6711cf90864051e2288e3333815755ed2cfadc457ddbbaba01698357cb45c96c
def _element_constructor_(self, lst, check=True): '\n Constructor for elements of this class.\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[1,4],[2,3]]))\n sage: L = P.linear_extensions()\n sage: x = L._element_constructor_([1,2,4,3]); x\n [1, 2, 4, 3]\n sage: x.parent() is L\n True\n\n sage: L._element_constructor_([4,3,2,1])\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n sage: L._element_constructor_([4,3,2,1],check=False)\n [4, 3, 2, 1]\n ' if isinstance(lst, LinearExtensionOfPoset): lst = list(lst) if (not isinstance(lst, (list, tuple))): raise TypeError('input should be a list or tuple') lst = [self._poset(_) for _ in lst] if self._is_facade: return lst else: return self.element_class(self, lst, check)
Constructor for elements of this class. TESTS:: sage: P = Poset(([1,2,3,4], [[1,2],[1,4],[2,3]])) sage: L = P.linear_extensions() sage: x = L._element_constructor_([1,2,4,3]); x [1, 2, 4, 3] sage: x.parent() is L True sage: L._element_constructor_([4,3,2,1]) Traceback (most recent call last): ... ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements sage: L._element_constructor_([4,3,2,1],check=False) [4, 3, 2, 1]
src/sage/combinat/posets/linear_extensions.py
_element_constructor_
robertwb/sage
2
python
def _element_constructor_(self, lst, check=True): '\n Constructor for elements of this class.\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[1,4],[2,3]]))\n sage: L = P.linear_extensions()\n sage: x = L._element_constructor_([1,2,4,3]); x\n [1, 2, 4, 3]\n sage: x.parent() is L\n True\n\n sage: L._element_constructor_([4,3,2,1])\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n sage: L._element_constructor_([4,3,2,1],check=False)\n [4, 3, 2, 1]\n ' if isinstance(lst, LinearExtensionOfPoset): lst = list(lst) if (not isinstance(lst, (list, tuple))): raise TypeError('input should be a list or tuple') lst = [self._poset(_) for _ in lst] if self._is_facade: return lst else: return self.element_class(self, lst, check)
def _element_constructor_(self, lst, check=True): '\n Constructor for elements of this class.\n\n TESTS::\n\n sage: P = Poset(([1,2,3,4], [[1,2],[1,4],[2,3]]))\n sage: L = P.linear_extensions()\n sage: x = L._element_constructor_([1,2,4,3]); x\n [1, 2, 4, 3]\n sage: x.parent() is L\n True\n\n sage: L._element_constructor_([4,3,2,1])\n Traceback (most recent call last):\n ...\n ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements\n sage: L._element_constructor_([4,3,2,1],check=False)\n [4, 3, 2, 1]\n ' if isinstance(lst, LinearExtensionOfPoset): lst = list(lst) if (not isinstance(lst, (list, tuple))): raise TypeError('input should be a list or tuple') lst = [self._poset(_) for _ in lst] if self._is_facade: return lst else: return self.element_class(self, lst, check)<|docstring|>Constructor for elements of this class. TESTS:: sage: P = Poset(([1,2,3,4], [[1,2],[1,4],[2,3]])) sage: L = P.linear_extensions() sage: x = L._element_constructor_([1,2,4,3]); x [1, 2, 4, 3] sage: x.parent() is L True sage: L._element_constructor_([4,3,2,1]) Traceback (most recent call last): ... ValueError: [4, 3, 2, 1] is not a linear extension of Finite poset containing 4 elements sage: L._element_constructor_([4,3,2,1],check=False) [4, 3, 2, 1]<|endoftext|>
b5163542d1679be4eff282c08a27fe5a75029d6d64485cecadfe4f1decf9b931
def send_message(self, message): '\n Inserts data from a list of dictionaries into a BigQuery table.\n\n A sample message input that might be passed might be\n [{"num_rows":1, "letter":"a"}, {"num_rows":5, "letter":"g"}]\n for a BigQuery table with "num_rows" and "letter" as\n the respective columns.\n\n Note that the order of the json attributes doesnt matter--\n only that the dictionary contains the correct keys.\n Therefore be sure to verify that the keys included in\n message also exist as columns in the BigQuery table.\n\n :param message: Rows to be inserted into Destination table\n :type message: List(Dict)\n ' hook = BigQueryHook(bigquery_conn_id=self.connection_id) client = Client(project=hook._get_field('project'), credentials=hook._get_credentials()) table = client.get_table(self.table_id) errors = client.insert_rows_json(table, message) if (not errors): print('New rows have been added.') else: print(errors)
Inserts data from a list of dictionaries into a BigQuery table. A sample message input that might be passed might be [{"num_rows":1, "letter":"a"}, {"num_rows":5, "letter":"g"}] for a BigQuery table with "num_rows" and "letter" as the respective columns. Note that the order of the json attributes doesnt matter-- only that the dictionary contains the correct keys. Therefore be sure to verify that the keys included in message also exist as columns in the BigQuery table. :param message: Rows to be inserted into Destination table :type message: List(Dict)
core/message_writers/bigquery_writer.py
send_message
Raybeam/airflow-dq
2
python
def send_message(self, message): '\n Inserts data from a list of dictionaries into a BigQuery table.\n\n A sample message input that might be passed might be\n [{"num_rows":1, "letter":"a"}, {"num_rows":5, "letter":"g"}]\n for a BigQuery table with "num_rows" and "letter" as\n the respective columns.\n\n Note that the order of the json attributes doesnt matter--\n only that the dictionary contains the correct keys.\n Therefore be sure to verify that the keys included in\n message also exist as columns in the BigQuery table.\n\n :param message: Rows to be inserted into Destination table\n :type message: List(Dict)\n ' hook = BigQueryHook(bigquery_conn_id=self.connection_id) client = Client(project=hook._get_field('project'), credentials=hook._get_credentials()) table = client.get_table(self.table_id) errors = client.insert_rows_json(table, message) if (not errors): print('New rows have been added.') else: print(errors)
def send_message(self, message): '\n Inserts data from a list of dictionaries into a BigQuery table.\n\n A sample message input that might be passed might be\n [{"num_rows":1, "letter":"a"}, {"num_rows":5, "letter":"g"}]\n for a BigQuery table with "num_rows" and "letter" as\n the respective columns.\n\n Note that the order of the json attributes doesnt matter--\n only that the dictionary contains the correct keys.\n Therefore be sure to verify that the keys included in\n message also exist as columns in the BigQuery table.\n\n :param message: Rows to be inserted into Destination table\n :type message: List(Dict)\n ' hook = BigQueryHook(bigquery_conn_id=self.connection_id) client = Client(project=hook._get_field('project'), credentials=hook._get_credentials()) table = client.get_table(self.table_id) errors = client.insert_rows_json(table, message) if (not errors): print('New rows have been added.') else: print(errors)<|docstring|>Inserts data from a list of dictionaries into a BigQuery table. A sample message input that might be passed might be [{"num_rows":1, "letter":"a"}, {"num_rows":5, "letter":"g"}] for a BigQuery table with "num_rows" and "letter" as the respective columns. Note that the order of the json attributes doesnt matter-- only that the dictionary contains the correct keys. Therefore be sure to verify that the keys included in message also exist as columns in the BigQuery table. :param message: Rows to be inserted into Destination table :type message: List(Dict)<|endoftext|>
f334e094709c5392d8fd0fba805a7669b621583513fde7ad2c7730cc2b6c4e2e
def process_data(data): 'Process string data to convenient list of tuples.\n\n Args:\n data (str): length x width x height \\n ... (without spaces)\n\n Returns:\n list: list of tuples [(length, width, height), (...), ...]\n\n ' box = namedtuple('Box', ['length', 'height', 'width']) dimensions = [box(*[int(x) for x in size.split('x')]) for size in data.strip().split('\n')] return dimensions
Process string data to convenient list of tuples. Args: data (str): length x width x height \n ... (without spaces) Returns: list: list of tuples [(length, width, height), (...), ...]
src/year2015/day02a.py
process_data
lancelote/advent_of_code
10
python
def process_data(data): 'Process string data to convenient list of tuples.\n\n Args:\n data (str): length x width x height \\n ... (without spaces)\n\n Returns:\n list: list of tuples [(length, width, height), (...), ...]\n\n ' box = namedtuple('Box', ['length', 'height', 'width']) dimensions = [box(*[int(x) for x in size.split('x')]) for size in data.strip().split('\n')] return dimensions
def process_data(data): 'Process string data to convenient list of tuples.\n\n Args:\n data (str): length x width x height \\n ... (without spaces)\n\n Returns:\n list: list of tuples [(length, width, height), (...), ...]\n\n ' box = namedtuple('Box', ['length', 'height', 'width']) dimensions = [box(*[int(x) for x in size.split('x')]) for size in data.strip().split('\n')] return dimensions<|docstring|>Process string data to convenient list of tuples. Args: data (str): length x width x height \n ... (without spaces) Returns: list: list of tuples [(length, width, height), (...), ...]<|endoftext|>
856cbfb93e1d8e04c4c58cd38a44926cd13bec1fea269f229930d85833cdb90b
def solve(task): 'Solve the puzzle.\n\n Args:\n task (str): length x width x height \\n ... (without spaces)\n\n Returns:\n int: Total square feet of wrapping paper\n\n ' result = 0 data = process_data(task) for size in data: sides = ((size.length * size.height), (size.length * size.width), (size.height * size.width)) result += ((2 * sum(sides)) + min(sides)) return result
Solve the puzzle. Args: task (str): length x width x height \n ... (without spaces) Returns: int: Total square feet of wrapping paper
src/year2015/day02a.py
solve
lancelote/advent_of_code
10
python
def solve(task): 'Solve the puzzle.\n\n Args:\n task (str): length x width x height \\n ... (without spaces)\n\n Returns:\n int: Total square feet of wrapping paper\n\n ' result = 0 data = process_data(task) for size in data: sides = ((size.length * size.height), (size.length * size.width), (size.height * size.width)) result += ((2 * sum(sides)) + min(sides)) return result
def solve(task): 'Solve the puzzle.\n\n Args:\n task (str): length x width x height \\n ... (without spaces)\n\n Returns:\n int: Total square feet of wrapping paper\n\n ' result = 0 data = process_data(task) for size in data: sides = ((size.length * size.height), (size.length * size.width), (size.height * size.width)) result += ((2 * sum(sides)) + min(sides)) return result<|docstring|>Solve the puzzle. Args: task (str): length x width x height \n ... (without spaces) Returns: int: Total square feet of wrapping paper<|endoftext|>
9a9f6e92fa8f917d3d33254eadc20412b273a80d85d264f2b790c1a2c54251a6
def db_connect(): '\n Performs database connection using database settings from settings.py.\n Returns sqlalchemy engine instance\n ' return create_engine(postgres_str)
Performs database connection using database settings from settings.py. Returns sqlalchemy engine instance
src/models.py
db_connect
jurra/sql-python-env-demo
1
python
def db_connect(): '\n Performs database connection using database settings from settings.py.\n Returns sqlalchemy engine instance\n ' return create_engine(postgres_str)
def db_connect(): '\n Performs database connection using database settings from settings.py.\n Returns sqlalchemy engine instance\n ' return create_engine(postgres_str)<|docstring|>Performs database connection using database settings from settings.py. Returns sqlalchemy engine instance<|endoftext|>
d209a1d15c4bd4c2ac937e254a19b2b88830c93292eb81ab7aaf367a329ab1fe
def empty_que(self): 'take out the data from the que and return them in a list. コネクションにたまっているdictionary型を全て取り出してlist形式にして返す' mylist = [] while True: if (not self.que.empty()): mylist.append(self.que.get()) else: break return mylist
take out the data from the que and return them in a list. コネクションにたまっているdictionary型を全て取り出してlist形式にして返す
raipy/MyPyqtGraph.py
empty_que
threemeninaboat3247/raipy
0
python
def empty_que(self): mylist = [] while True: if (not self.que.empty()): mylist.append(self.que.get()) else: break return mylist
def empty_que(self): mylist = [] while True: if (not self.que.empty()): mylist.append(self.que.get()) else: break return mylist<|docstring|>take out the data from the que and return them in a list. コネクションにたまっているdictionary型を全て取り出してlist形式にして返す<|endoftext|>
37f80d6d420f6eb6aa8f1c7613bcafb573a34bcefa56db81d0280c769f9e51a7
def distance_between_point_and_line_segment_2d(p, p1, p2): 'Calculate the distance between a point p and a line segment p1, p2\n ' x0 = p[0] y0 = p[1] x1 = p1[0] y1 = p1[1] x2 = p2[0] y2 = p2[1] numerator = np.linalg.norm((((x2 - x1) * (y1 - y0)) - ((x1 - x0) * (y2 - y1)))) denominator = np.sqrt((((x2 - x1) ** 2) + ((y2 - y1) ** 2))) return (numerator / denominator)
Calculate the distance between a point p and a line segment p1, p2
src/napari_threedee/utils/selection_utils.py
distance_between_point_and_line_segment_2d
alisterburt/napari-threedee
5
python
def distance_between_point_and_line_segment_2d(p, p1, p2): '\n ' x0 = p[0] y0 = p[1] x1 = p1[0] y1 = p1[1] x2 = p2[0] y2 = p2[1] numerator = np.linalg.norm((((x2 - x1) * (y1 - y0)) - ((x1 - x0) * (y2 - y1)))) denominator = np.sqrt((((x2 - x1) ** 2) + ((y2 - y1) ** 2))) return (numerator / denominator)
def distance_between_point_and_line_segment_2d(p, p1, p2): '\n ' x0 = p[0] y0 = p[1] x1 = p1[0] y1 = p1[1] x2 = p2[0] y2 = p2[1] numerator = np.linalg.norm((((x2 - x1) * (y1 - y0)) - ((x1 - x0) * (y2 - y1)))) denominator = np.sqrt((((x2 - x1) ** 2) + ((y2 - y1) ** 2))) return (numerator / denominator)<|docstring|>Calculate the distance between a point p and a line segment p1, p2<|endoftext|>
813ec20aaa3129a95779044476c9e1720b9c869f26eadd82856de231fb4955e2
def select_triangle_from_click(click_point: np.ndarray, view_direction: np.ndarray, triangles: np.ndarray): 'Determine if a line goes through any of a set of triangles.\n\n For example, this could be used to determine if a click was\n in a triangle of a mesh.\n\n Parameters\n ----------\n click_point : np.ndarray\n (3,) array containing the location that was clicked. This\n should be in the same coordinate system as the vertices.\n view_direction : np.ndarray\n (3,) array describing the direction camera is pointing in\n the scene. This should be in the same coordinate system as\n the vertices.\n triangles : np.ndarray\n (n, 3, 3) array containing the coordinates for the 3 corners\n of n triangles.\n\n Returns\n -------\n in_triangles : np.ndarray\n (n,) boolean array that is True of the ray intersects the triangle\n ' vertices = triangles.reshape(((- 1), triangles.shape[2])) (vertices_plane, signed_distance_to_plane) = project_points_onto_plane(points=vertices, plane_point=click_point, plane_normal=view_direction) rotation_matrix = rotation_matrix_from_vectors_3d(view_direction, [0, 0, 1]) rotated_vertices = (vertices_plane @ rotation_matrix.T) rotated_vertices_2d = rotated_vertices[(:, :2)] rotated_triangles_2d = rotated_vertices_2d.reshape((- 1), 3, 2) line_pos_2D = rotation_matrix.dot(click_point)[:2] candidate_matches = inside_triangles((rotated_triangles_2d - line_pos_2D)) candidate_match_indices = np.argwhere(candidate_matches) n_matches = len(candidate_match_indices) if (n_matches == 0): triangle_index = None elif (n_matches == 1): triangle_index = candidate_match_indices[0] else: potential_match_distances = signed_distance_to_plane[candidate_match_indices] triangle_index = candidate_match_indices[np.argmin(potential_match_distances)] return triangle_index
Determine if a line goes through any of a set of triangles. For example, this could be used to determine if a click was in a triangle of a mesh. Parameters ---------- click_point : np.ndarray (3,) array containing the location that was clicked. This should be in the same coordinate system as the vertices. view_direction : np.ndarray (3,) array describing the direction camera is pointing in the scene. This should be in the same coordinate system as the vertices. triangles : np.ndarray (n, 3, 3) array containing the coordinates for the 3 corners of n triangles. Returns ------- in_triangles : np.ndarray (n,) boolean array that is True of the ray intersects the triangle
src/napari_threedee/utils/selection_utils.py
select_triangle_from_click
alisterburt/napari-threedee
5
python
def select_triangle_from_click(click_point: np.ndarray, view_direction: np.ndarray, triangles: np.ndarray): 'Determine if a line goes through any of a set of triangles.\n\n For example, this could be used to determine if a click was\n in a triangle of a mesh.\n\n Parameters\n ----------\n click_point : np.ndarray\n (3,) array containing the location that was clicked. This\n should be in the same coordinate system as the vertices.\n view_direction : np.ndarray\n (3,) array describing the direction camera is pointing in\n the scene. This should be in the same coordinate system as\n the vertices.\n triangles : np.ndarray\n (n, 3, 3) array containing the coordinates for the 3 corners\n of n triangles.\n\n Returns\n -------\n in_triangles : np.ndarray\n (n,) boolean array that is True of the ray intersects the triangle\n ' vertices = triangles.reshape(((- 1), triangles.shape[2])) (vertices_plane, signed_distance_to_plane) = project_points_onto_plane(points=vertices, plane_point=click_point, plane_normal=view_direction) rotation_matrix = rotation_matrix_from_vectors_3d(view_direction, [0, 0, 1]) rotated_vertices = (vertices_plane @ rotation_matrix.T) rotated_vertices_2d = rotated_vertices[(:, :2)] rotated_triangles_2d = rotated_vertices_2d.reshape((- 1), 3, 2) line_pos_2D = rotation_matrix.dot(click_point)[:2] candidate_matches = inside_triangles((rotated_triangles_2d - line_pos_2D)) candidate_match_indices = np.argwhere(candidate_matches) n_matches = len(candidate_match_indices) if (n_matches == 0): triangle_index = None elif (n_matches == 1): triangle_index = candidate_match_indices[0] else: potential_match_distances = signed_distance_to_plane[candidate_match_indices] triangle_index = candidate_match_indices[np.argmin(potential_match_distances)] return triangle_index
def select_triangle_from_click(click_point: np.ndarray, view_direction: np.ndarray, triangles: np.ndarray): 'Determine if a line goes through any of a set of triangles.\n\n For example, this could be used to determine if a click was\n in a triangle of a mesh.\n\n Parameters\n ----------\n click_point : np.ndarray\n (3,) array containing the location that was clicked. This\n should be in the same coordinate system as the vertices.\n view_direction : np.ndarray\n (3,) array describing the direction camera is pointing in\n the scene. This should be in the same coordinate system as\n the vertices.\n triangles : np.ndarray\n (n, 3, 3) array containing the coordinates for the 3 corners\n of n triangles.\n\n Returns\n -------\n in_triangles : np.ndarray\n (n,) boolean array that is True of the ray intersects the triangle\n ' vertices = triangles.reshape(((- 1), triangles.shape[2])) (vertices_plane, signed_distance_to_plane) = project_points_onto_plane(points=vertices, plane_point=click_point, plane_normal=view_direction) rotation_matrix = rotation_matrix_from_vectors_3d(view_direction, [0, 0, 1]) rotated_vertices = (vertices_plane @ rotation_matrix.T) rotated_vertices_2d = rotated_vertices[(:, :2)] rotated_triangles_2d = rotated_vertices_2d.reshape((- 1), 3, 2) line_pos_2D = rotation_matrix.dot(click_point)[:2] candidate_matches = inside_triangles((rotated_triangles_2d - line_pos_2D)) candidate_match_indices = np.argwhere(candidate_matches) n_matches = len(candidate_match_indices) if (n_matches == 0): triangle_index = None elif (n_matches == 1): triangle_index = candidate_match_indices[0] else: potential_match_distances = signed_distance_to_plane[candidate_match_indices] triangle_index = candidate_match_indices[np.argmin(potential_match_distances)] return triangle_index<|docstring|>Determine if a line goes through any of a set of triangles. For example, this could be used to determine if a click was in a triangle of a mesh. Parameters ---------- click_point : np.ndarray (3,) array containing the location that was clicked. This should be in the same coordinate system as the vertices. view_direction : np.ndarray (3,) array describing the direction camera is pointing in the scene. This should be in the same coordinate system as the vertices. triangles : np.ndarray (n, 3, 3) array containing the coordinates for the 3 corners of n triangles. Returns ------- in_triangles : np.ndarray (n,) boolean array that is True of the ray intersects the triangle<|endoftext|>
64c71c653a929ce94acdd21343efedb81740c69772353d64bc11b7422e2b2f5a
def __init__(self, player_id, player_name=None): '\n\n :param player_id:\n :param player_name:\n ' super().__init__(player_id, player_name)
:param player_id: :param player_name:
src/ia/playeria.py
__init__
yoyonel/2018_papayoo
0
python
def __init__(self, player_id, player_name=None): '\n\n :param player_id:\n :param player_name:\n ' super().__init__(player_id, player_name)
def __init__(self, player_id, player_name=None): '\n\n :param player_id:\n :param player_name:\n ' super().__init__(player_id, player_name)<|docstring|>:param player_id: :param player_name:<|endoftext|>
e490e2e577f11c7aa98a7d0ace6b56eb530e2d6df6308c7df3b47b721c77ecfb
def do_discards(self, discard): '\n\n :param discard:\n :type discard: DiscardCards\n :return:\n :rtype: list[Cards]\n ' raise NotImplemented('Not implemented!')
:param discard: :type discard: DiscardCards :return: :rtype: list[Cards]
src/ia/playeria.py
do_discards
yoyonel/2018_papayoo
0
python
def do_discards(self, discard): '\n\n :param discard:\n :type discard: DiscardCards\n :return:\n :rtype: list[Cards]\n ' raise NotImplemented('Not implemented!')
def do_discards(self, discard): '\n\n :param discard:\n :type discard: DiscardCards\n :return:\n :rtype: list[Cards]\n ' raise NotImplemented('Not implemented!')<|docstring|>:param discard: :type discard: DiscardCards :return: :rtype: list[Cards]<|endoftext|>
2bc9626e23226d684474adc49611df117211493a48113a4e1afdcea37b4819ae
def get_from_discards(self, cards): '\n\n :param cards:\n :type cards: list[Cards]\n :return:\n ' raise NotImplemented('Not implemented!')
:param cards: :type cards: list[Cards] :return:
src/ia/playeria.py
get_from_discards
yoyonel/2018_papayoo
0
python
def get_from_discards(self, cards): '\n\n :param cards:\n :type cards: list[Cards]\n :return:\n ' raise NotImplemented('Not implemented!')
def get_from_discards(self, cards): '\n\n :param cards:\n :type cards: list[Cards]\n :return:\n ' raise NotImplemented('Not implemented!')<|docstring|>:param cards: :type cards: list[Cards] :return:<|endoftext|>
58275ad68c576489a7dda0699e4cdd0cf12892a9fa1fb3c704b0b3cc4c87e33e
def play(self, cards_already_played): '\n\n :param cards_already_played:\n :type cards_already_played: list[Cards]\n :return:\n ' raise NotImplemented('Not implemented!')
:param cards_already_played: :type cards_already_played: list[Cards] :return:
src/ia/playeria.py
play
yoyonel/2018_papayoo
0
python
def play(self, cards_already_played): '\n\n :param cards_already_played:\n :type cards_already_played: list[Cards]\n :return:\n ' raise NotImplemented('Not implemented!')
def play(self, cards_already_played): '\n\n :param cards_already_played:\n :type cards_already_played: list[Cards]\n :return:\n ' raise NotImplemented('Not implemented!')<|docstring|>:param cards_already_played: :type cards_already_played: list[Cards] :return:<|endoftext|>
bb89be4d4a3c45f9e27f6b9519b581eec26f24e2fa3ab4a4245b4cc7e639c1bc
def _active_fget(conf): 'getter for davos.config.active' return config._active
getter for davos.config.active
davos/implementations/__init__.py
_active_fget
jeremymanning/davos
18
python
def _active_fget(conf): return config._active
def _active_fget(conf): return config._active<|docstring|>getter for davos.config.active<|endoftext|>
b1ef46df6ef24394eb32cc4d00bebe6e609723cc266e43ffdae1794b6253fd07
def _active_fset(conf, value): 'setter for davos.config.active' if (value is True): _activate_helper(smuggle, full_parser) elif (value is False): _deactivate_helper(smuggle, full_parser) else: raise DavosConfigError('active', "field may be 'True' or 'False'") conf._active = value
setter for davos.config.active
davos/implementations/__init__.py
_active_fset
jeremymanning/davos
18
python
def _active_fset(conf, value): if (value is True): _activate_helper(smuggle, full_parser) elif (value is False): _deactivate_helper(smuggle, full_parser) else: raise DavosConfigError('active', "field may be 'True' or 'False'") conf._active = value
def _active_fset(conf, value): if (value is True): _activate_helper(smuggle, full_parser) elif (value is False): _deactivate_helper(smuggle, full_parser) else: raise DavosConfigError('active', "field may be 'True' or 'False'") conf._active = value<|docstring|>setter for davos.config.active<|endoftext|>
8721c605489b45418f909e2efea33dbb16c9f77fe9a9d0d250922ced78970334
def _conda_avail_fget(conf): 'getter for davos.config.conda_avail' if (conf._conda_avail is None): check_conda() return conf._conda_avail
getter for davos.config.conda_avail
davos/implementations/__init__.py
_conda_avail_fget
jeremymanning/davos
18
python
def _conda_avail_fget(conf): if (conf._conda_avail is None): check_conda() return conf._conda_avail
def _conda_avail_fget(conf): if (conf._conda_avail is None): check_conda() return conf._conda_avail<|docstring|>getter for davos.config.conda_avail<|endoftext|>
83ab63befba68d7da6e0889e48ddb06c403e9c63172c54d0b984b9f78c5cc2ec
def _conda_avail_fset(conf, _): 'setter for davos.config.conda_avail' raise DavosConfigError('conda_avail', 'field is read-only')
setter for davos.config.conda_avail
davos/implementations/__init__.py
_conda_avail_fset
jeremymanning/davos
18
python
def _conda_avail_fset(conf, _): raise DavosConfigError('conda_avail', 'field is read-only')
def _conda_avail_fset(conf, _): raise DavosConfigError('conda_avail', 'field is read-only')<|docstring|>setter for davos.config.conda_avail<|endoftext|>
1cee094394c70e25d3e4d2e74789fd6bd75d25d57e1e2f1110cd983105794034
def _conda_env_fget(conf): 'getter for davos.config.conda_env' if (conf._conda_avail is None): check_conda() return conf._conda_env
getter for davos.config.conda_env
davos/implementations/__init__.py
_conda_env_fget
jeremymanning/davos
18
python
def _conda_env_fget(conf): if (conf._conda_avail is None): check_conda() return conf._conda_env
def _conda_env_fget(conf): if (conf._conda_avail is None): check_conda() return conf._conda_env<|docstring|>getter for davos.config.conda_env<|endoftext|>
5f2c2a35b076689feea566545663c66e47ee623ae5a2623d0c74105be68dcd89
def _conda_env_fset(conf, new_env): 'setter for davos.config.conda_env' if (conf._conda_avail is None): check_conda() if (conf._conda_avail is False): raise DavosConfigError('conda_env', 'cannot set conda environment. No local conda installation found') if (new_env != conf._conda_env): if ((conf._conda_envs_dirs is not None) and (new_env not in conf._conda_envs_dirs.keys())): local_envs = {"', '".join(conf._conda_envs_dirs.keys())} raise DavosConfigError('conda_env', f'''unrecognized environment name: '{new_env}'. Local environments are: '{local_envs}'''') conf._conda_env = new_env
setter for davos.config.conda_env
davos/implementations/__init__.py
_conda_env_fset
jeremymanning/davos
18
python
def _conda_env_fset(conf, new_env): if (conf._conda_avail is None): check_conda() if (conf._conda_avail is False): raise DavosConfigError('conda_env', 'cannot set conda environment. No local conda installation found') if (new_env != conf._conda_env): if ((conf._conda_envs_dirs is not None) and (new_env not in conf._conda_envs_dirs.keys())): local_envs = {"', '".join(conf._conda_envs_dirs.keys())} raise DavosConfigError('conda_env', f'unrecognized environment name: '{new_env}'. Local environments are: '{local_envs}) conf._conda_env = new_env
def _conda_env_fset(conf, new_env): if (conf._conda_avail is None): check_conda() if (conf._conda_avail is False): raise DavosConfigError('conda_env', 'cannot set conda environment. No local conda installation found') if (new_env != conf._conda_env): if ((conf._conda_envs_dirs is not None) and (new_env not in conf._conda_envs_dirs.keys())): local_envs = {"', '".join(conf._conda_envs_dirs.keys())} raise DavosConfigError('conda_env', f'unrecognized environment name: '{new_env}'. Local environments are: '{local_envs}) conf._conda_env = new_env<|docstring|>setter for davos.config.conda_env<|endoftext|>
5f4130d14df489bd1076fa34f5bb942d4aab5e4eaf7ddfe1c8ed783071ec99a3
def _conda_envs_dirs_fget(conf): 'getter for davos.config.conda_envs_dirs' if (conf._conda_avail is None): check_conda() return conf._conda_envs_dirs
getter for davos.config.conda_envs_dirs
davos/implementations/__init__.py
_conda_envs_dirs_fget
jeremymanning/davos
18
python
def _conda_envs_dirs_fget(conf): if (conf._conda_avail is None): check_conda() return conf._conda_envs_dirs
def _conda_envs_dirs_fget(conf): if (conf._conda_avail is None): check_conda() return conf._conda_envs_dirs<|docstring|>getter for davos.config.conda_envs_dirs<|endoftext|>
3544674642d4c43e7c41585e95c4c0b825760b356998ea13921c3d788c74d476
def _conda_envs_dirs_fset(conf, _): 'setter for davos.config.conda_envs_dirs' raise DavosConfigError('conda_envs_dirs', 'field is read-only')
setter for davos.config.conda_envs_dirs
davos/implementations/__init__.py
_conda_envs_dirs_fset
jeremymanning/davos
18
python
def _conda_envs_dirs_fset(conf, _): raise DavosConfigError('conda_envs_dirs', 'field is read-only')
def _conda_envs_dirs_fset(conf, _): raise DavosConfigError('conda_envs_dirs', 'field is read-only')<|docstring|>setter for davos.config.conda_envs_dirs<|endoftext|>
9948eee9483978fcf99d44518a420e81fd3c98a0056839b286339d6cb5359163
def get_file_path(category, file_name): '\n ie: examples/generic/adder.qasm\n - category: "generic"\n - file_name: "adder"\n ' return os.path.join(os.path.dirname(__file__), '../examples', category, (file_name + '.qasm'))
ie: examples/generic/adder.qasm - category: "generic" - file_name: "adder"
program/qiskit-demo/helper.py
get_file_path
ctuning/qiskit
7
python
def get_file_path(category, file_name): '\n ie: examples/generic/adder.qasm\n - category: "generic"\n - file_name: "adder"\n ' return os.path.join(os.path.dirname(__file__), '../examples', category, (file_name + '.qasm'))
def get_file_path(category, file_name): '\n ie: examples/generic/adder.qasm\n - category: "generic"\n - file_name: "adder"\n ' return os.path.join(os.path.dirname(__file__), '../examples', category, (file_name + '.qasm'))<|docstring|>ie: examples/generic/adder.qasm - category: "generic" - file_name: "adder"<|endoftext|>
5497d2f676663dcea41be4af8e416ec3d38629ac8e4fa80cc742c77db0ebb3c6
def parse(file_path, verbose=False, prec=15): '\n - file_path: Path to the OpenQASM file\n - prec: Precision for the returned string\n ' qiskit_qasm = qasm.Qasm(file_path) try: qiskit_qasm.parse().qasm(prec) return True except qasm.QasmError as err: if verbose: print('Error:') print(err) return False
- file_path: Path to the OpenQASM file - prec: Precision for the returned string
program/qiskit-demo/helper.py
parse
ctuning/qiskit
7
python
def parse(file_path, verbose=False, prec=15): '\n - file_path: Path to the OpenQASM file\n - prec: Precision for the returned string\n ' qiskit_qasm = qasm.Qasm(file_path) try: qiskit_qasm.parse().qasm(prec) return True except qasm.QasmError as err: if verbose: print('Error:') print(err) return False
def parse(file_path, verbose=False, prec=15): '\n - file_path: Path to the OpenQASM file\n - prec: Precision for the returned string\n ' qiskit_qasm = qasm.Qasm(file_path) try: qiskit_qasm.parse().qasm(prec) return True except qasm.QasmError as err: if verbose: print('Error:') print(err) return False<|docstring|>- file_path: Path to the OpenQASM file - prec: Precision for the returned string<|endoftext|>
732f470b758ed7d3b62d7bdad3f1f516dd07b930aeee7914bcbf3d8cfe37fcad
def get_value(line): '\n - line: Line with QASM code to inspect\n ' return line.split(':')[1].strip()
- line: Line with QASM code to inspect
program/qiskit-demo/helper.py
get_value
ctuning/qiskit
7
python
def get_value(line): '\n \n ' return line.split(':')[1].strip()
def get_value(line): '\n \n ' return line.split(':')[1].strip()<|docstring|>- line: Line with QASM code to inspect<|endoftext|>
7ed1c79d05c5884c245f9aacdecc6ddd4086f03daf19a73359d37a666e3bc246
@staticmethod def assertFile(file_path, verbose=False, invalid=False): '\n Custom asserts for QASM files.\n - file_path: Path to the OpenQASM file\n - invalid: If we´re checking an invalid file\n ' src = open(file_path, 'r') lines = src.readlines() src.close() name = None section = None for line in lines: if ('//' in line): if ('name:' in line): name = get_value(line) if ('section:' in line): section = get_value(line) break category = os.path.basename(os.path.dirname(file_path)) msg = ' - ' if (not name): msg = (msg + os.path.splitext(os.path.basename(file_path))[0]) else: msg = (msg + name) if section: msg = ((msg + ', section: ') + section) msg = (((msg + ' (') + category) + ')') print(msg) res = parse(file_path, verbose) if (((not res) and (not invalid)) or (res and invalid)): raise AssertionError(msg)
Custom asserts for QASM files. - file_path: Path to the OpenQASM file - invalid: If we´re checking an invalid file
program/qiskit-demo/helper.py
assertFile
ctuning/qiskit
7
python
@staticmethod def assertFile(file_path, verbose=False, invalid=False): '\n Custom asserts for QASM files.\n - file_path: Path to the OpenQASM file\n - invalid: If we´re checking an invalid file\n ' src = open(file_path, 'r') lines = src.readlines() src.close() name = None section = None for line in lines: if ('//' in line): if ('name:' in line): name = get_value(line) if ('section:' in line): section = get_value(line) break category = os.path.basename(os.path.dirname(file_path)) msg = ' - ' if (not name): msg = (msg + os.path.splitext(os.path.basename(file_path))[0]) else: msg = (msg + name) if section: msg = ((msg + ', section: ') + section) msg = (((msg + ' (') + category) + ')') print(msg) res = parse(file_path, verbose) if (((not res) and (not invalid)) or (res and invalid)): raise AssertionError(msg)
@staticmethod def assertFile(file_path, verbose=False, invalid=False): '\n Custom asserts for QASM files.\n - file_path: Path to the OpenQASM file\n - invalid: If we´re checking an invalid file\n ' src = open(file_path, 'r') lines = src.readlines() src.close() name = None section = None for line in lines: if ('//' in line): if ('name:' in line): name = get_value(line) if ('section:' in line): section = get_value(line) break category = os.path.basename(os.path.dirname(file_path)) msg = ' - ' if (not name): msg = (msg + os.path.splitext(os.path.basename(file_path))[0]) else: msg = (msg + name) if section: msg = ((msg + ', section: ') + section) msg = (((msg + ' (') + category) + ')') print(msg) res = parse(file_path, verbose) if (((not res) and (not invalid)) or (res and invalid)): raise AssertionError(msg)<|docstring|>Custom asserts for QASM files. - file_path: Path to the OpenQASM file - invalid: If we´re checking an invalid file<|endoftext|>
3414f60a42ffe8ba5c3c3d454c6a86b43ea858af8b58d66a79f3816ceaf1b6b0
def ecb_chosen_plaintext_attack(encrypt_oracle: typing.Callable[([bytes], bool)], plaintext_space: bytes=b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789{}_', known_plaintext: bytes=b'', block_size: int=16, verbose: bool=False): 'AES ECB mode chosen plaintext attack\n\n This function helps solving chosen plaintext attack.\n\n Args:\n encrypt_oracle (typing.Callable[[bytes], bool]): the encryption oracle.\n plaintext_space (bytes, optional): Defaults to uppercase + lowercase + numbers + "{}_".\n known_plaintext (bytes, optional): Defaults to b"".\n block_size (int, optional): Defaults to 16.\n verbose (bool, optional): Defaults to False.\n Returns:\n bytes: The plaintext.\n ' from random import sample block_end = (block_size * ((len(known_plaintext) // (block_size - 2)) + 1)) for _ in range(1, 100): plaintext_space = bytes(sample(bytearray(plaintext_space), len(plaintext_space))) if verbose: log.progress('Getting the encrypted block which includes the beginning of FLAG') if ((len(known_plaintext) % block_size) == (block_size - 1)): block_end += block_size chosen_plaintext = (b'\x00' * ((block_end - len(known_plaintext)) - 1)) encrypted_block = encrypt_oracle(chosen_plaintext) encrypted_block = encrypted_block[(block_end - block_size):block_end] if verbose: log.progress('Bruteforcing all of the characters in plaintext_space') for c in plaintext_space: if verbose: sys.stderr.write(f''' {log.colored(Style.Color.GREY, known_plaintext[:(- 1)].decode())}{log.colored(Style.Color.RED, known_plaintext[(- 1):].decode())}{log.colored(Style.MAGENTA, chr(c))}''') payload = (((b'\x00' * ((block_end - len(known_plaintext)) - 1)) + known_plaintext) + bytearray([c])) enc_block = encrypt_oracle(payload)[(block_end - block_size):block_end] if (encrypted_block == enc_block): known_plaintext += bytearray([c]) if verbose: sys.stderr.write('\n') break
AES ECB mode chosen plaintext attack This function helps solving chosen plaintext attack. Args: encrypt_oracle (typing.Callable[[bytes], bool]): the encryption oracle. plaintext_space (bytes, optional): Defaults to uppercase + lowercase + numbers + "{}_". known_plaintext (bytes, optional): Defaults to b"". block_size (int, optional): Defaults to 16. verbose (bool, optional): Defaults to False. Returns: bytes: The plaintext.
toyotama/crypto/aes.py
ecb_chosen_plaintext_attack
Laika/Toyotama
0
python
def ecb_chosen_plaintext_attack(encrypt_oracle: typing.Callable[([bytes], bool)], plaintext_space: bytes=b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789{}_', known_plaintext: bytes=b, block_size: int=16, verbose: bool=False): 'AES ECB mode chosen plaintext attack\n\n This function helps solving chosen plaintext attack.\n\n Args:\n encrypt_oracle (typing.Callable[[bytes], bool]): the encryption oracle.\n plaintext_space (bytes, optional): Defaults to uppercase + lowercase + numbers + "{}_".\n known_plaintext (bytes, optional): Defaults to b.\n block_size (int, optional): Defaults to 16.\n verbose (bool, optional): Defaults to False.\n Returns:\n bytes: The plaintext.\n ' from random import sample block_end = (block_size * ((len(known_plaintext) // (block_size - 2)) + 1)) for _ in range(1, 100): plaintext_space = bytes(sample(bytearray(plaintext_space), len(plaintext_space))) if verbose: log.progress('Getting the encrypted block which includes the beginning of FLAG') if ((len(known_plaintext) % block_size) == (block_size - 1)): block_end += block_size chosen_plaintext = (b'\x00' * ((block_end - len(known_plaintext)) - 1)) encrypted_block = encrypt_oracle(chosen_plaintext) encrypted_block = encrypted_block[(block_end - block_size):block_end] if verbose: log.progress('Bruteforcing all of the characters in plaintext_space') for c in plaintext_space: if verbose: sys.stderr.write(f' {log.colored(Style.Color.GREY, known_plaintext[:(- 1)].decode())}{log.colored(Style.Color.RED, known_plaintext[(- 1):].decode())}{log.colored(Style.MAGENTA, chr(c))}') payload = (((b'\x00' * ((block_end - len(known_plaintext)) - 1)) + known_plaintext) + bytearray([c])) enc_block = encrypt_oracle(payload)[(block_end - block_size):block_end] if (encrypted_block == enc_block): known_plaintext += bytearray([c]) if verbose: sys.stderr.write('\n') break
def ecb_chosen_plaintext_attack(encrypt_oracle: typing.Callable[([bytes], bool)], plaintext_space: bytes=b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789{}_', known_plaintext: bytes=b, block_size: int=16, verbose: bool=False): 'AES ECB mode chosen plaintext attack\n\n This function helps solving chosen plaintext attack.\n\n Args:\n encrypt_oracle (typing.Callable[[bytes], bool]): the encryption oracle.\n plaintext_space (bytes, optional): Defaults to uppercase + lowercase + numbers + "{}_".\n known_plaintext (bytes, optional): Defaults to b.\n block_size (int, optional): Defaults to 16.\n verbose (bool, optional): Defaults to False.\n Returns:\n bytes: The plaintext.\n ' from random import sample block_end = (block_size * ((len(known_plaintext) // (block_size - 2)) + 1)) for _ in range(1, 100): plaintext_space = bytes(sample(bytearray(plaintext_space), len(plaintext_space))) if verbose: log.progress('Getting the encrypted block which includes the beginning of FLAG') if ((len(known_plaintext) % block_size) == (block_size - 1)): block_end += block_size chosen_plaintext = (b'\x00' * ((block_end - len(known_plaintext)) - 1)) encrypted_block = encrypt_oracle(chosen_plaintext) encrypted_block = encrypted_block[(block_end - block_size):block_end] if verbose: log.progress('Bruteforcing all of the characters in plaintext_space') for c in plaintext_space: if verbose: sys.stderr.write(f' {log.colored(Style.Color.GREY, known_plaintext[:(- 1)].decode())}{log.colored(Style.Color.RED, known_plaintext[(- 1):].decode())}{log.colored(Style.MAGENTA, chr(c))}') payload = (((b'\x00' * ((block_end - len(known_plaintext)) - 1)) + known_plaintext) + bytearray([c])) enc_block = encrypt_oracle(payload)[(block_end - block_size):block_end] if (encrypted_block == enc_block): known_plaintext += bytearray([c]) if verbose: sys.stderr.write('\n') break<|docstring|>AES ECB mode chosen plaintext attack This function helps solving chosen plaintext attack. Args: encrypt_oracle (typing.Callable[[bytes], bool]): the encryption oracle. plaintext_space (bytes, optional): Defaults to uppercase + lowercase + numbers + "{}_". known_plaintext (bytes, optional): Defaults to b"". block_size (int, optional): Defaults to 16. verbose (bool, optional): Defaults to False. Returns: bytes: The plaintext.<|endoftext|>
edffe07ee9b06f4cc963ff453031601da0de198009a171d387e387b1ca995ba6
def padding_oracle_attack(ciphertext: bytes, padding_oracle: typing.Callable, iv: bytes=b'', block_size: int=16, verbose: bool=False) -> bytes: 'Padding Oracle Attack solver.\n\n This function helps solving "Padding Oracle Attack"\n\n Args:\n ciphertext (bytes): The ciphertext.\n padding_oracle (Callable): The padding oracle function. This function receives ciphertext and returns the ciphertext is valid or not.\n iv (bytes, optional): An initialization vector. Defaults to b"".\n block_size (int, optional): The block size of AES. Defaults to 16.\n verbose (bool, optional): Show more information if True, otherwise no output.\n Returns:\n bytes: The plaintext\n ' cipher_block = [ciphertext[i:(i + block_size)] for i in range(0, len(ciphertext), block_size)] cipher_block.reverse() plaintext = b'' def is_valid(c_target, d_prev, nth_byte, i): attempt_byte = bytes.fromhex(f'{i:02x}') adjusted_bytes = bytes(((c ^ nth_byte) for c in d_prev)) payload = ((((b'\x00' * (block_size - nth_byte)) + attempt_byte) + adjusted_bytes) + c_target) if verbose: sys.stdout.write((((('\x1b[2k\x1b[g' + log.colored(Style.GREY, repr((b'\x00' * (block_size - nth_byte)))[2:(- 1)])) + log.colored(Style.RED, repr(attempt_byte)[2:(- 1)])) + log.colored(Style.MAGENTA, repr(adjusted_bytes)[2:(- 1)])) + log.colored(Style.DARK_GREY, repr(c_target)[2:(- 1)]))) sys.stdout.flush() return padding_oracle(payload) for _ in range((len(cipher_block) - 1)): (c_target, c_prev) = cipher_block[:2] print(cipher_block) cipher_block.pop(0) nth_byte = 1 i = 0 m = d_prev = b'' while True: if is_valid(c_target, d_prev, nth_byte, i): m += bytes.fromhex(f'{((i ^ nth_byte) ^ c_prev[(- nth_byte)]):02x}') d_prev = (bytes.fromhex(f'{(i ^ nth_byte):02x}') + d_prev) nth_byte += 1 i = 0 if (nth_byte <= block_size): continue break i += 1 if (i > 255): log.error('[padding_oracle_attack] Not Found') return None plaintext = (m[::(- 1)] + plaintext) if verbose: print() log.information(f'Decrypt(c{len(cipher_block)}): {repr(d_prev)[2:(- 1)]}') log.information(f'm{len(cipher_block)}: {repr(m[::(- 1)])[2:(- 1)]}') log.information(f'plaintext: {repr(plaintext)[2:(- 1)]}') return plaintext
Padding Oracle Attack solver. This function helps solving "Padding Oracle Attack" Args: ciphertext (bytes): The ciphertext. padding_oracle (Callable): The padding oracle function. This function receives ciphertext and returns the ciphertext is valid or not. iv (bytes, optional): An initialization vector. Defaults to b"". block_size (int, optional): The block size of AES. Defaults to 16. verbose (bool, optional): Show more information if True, otherwise no output. Returns: bytes: The plaintext
toyotama/crypto/aes.py
padding_oracle_attack
Laika/Toyotama
0
python
def padding_oracle_attack(ciphertext: bytes, padding_oracle: typing.Callable, iv: bytes=b, block_size: int=16, verbose: bool=False) -> bytes: 'Padding Oracle Attack solver.\n\n This function helps solving "Padding Oracle Attack"\n\n Args:\n ciphertext (bytes): The ciphertext.\n padding_oracle (Callable): The padding oracle function. This function receives ciphertext and returns the ciphertext is valid or not.\n iv (bytes, optional): An initialization vector. Defaults to b.\n block_size (int, optional): The block size of AES. Defaults to 16.\n verbose (bool, optional): Show more information if True, otherwise no output.\n Returns:\n bytes: The plaintext\n ' cipher_block = [ciphertext[i:(i + block_size)] for i in range(0, len(ciphertext), block_size)] cipher_block.reverse() plaintext = b def is_valid(c_target, d_prev, nth_byte, i): attempt_byte = bytes.fromhex(f'{i:02x}') adjusted_bytes = bytes(((c ^ nth_byte) for c in d_prev)) payload = ((((b'\x00' * (block_size - nth_byte)) + attempt_byte) + adjusted_bytes) + c_target) if verbose: sys.stdout.write((((('\x1b[2k\x1b[g' + log.colored(Style.GREY, repr((b'\x00' * (block_size - nth_byte)))[2:(- 1)])) + log.colored(Style.RED, repr(attempt_byte)[2:(- 1)])) + log.colored(Style.MAGENTA, repr(adjusted_bytes)[2:(- 1)])) + log.colored(Style.DARK_GREY, repr(c_target)[2:(- 1)]))) sys.stdout.flush() return padding_oracle(payload) for _ in range((len(cipher_block) - 1)): (c_target, c_prev) = cipher_block[:2] print(cipher_block) cipher_block.pop(0) nth_byte = 1 i = 0 m = d_prev = b while True: if is_valid(c_target, d_prev, nth_byte, i): m += bytes.fromhex(f'{((i ^ nth_byte) ^ c_prev[(- nth_byte)]):02x}') d_prev = (bytes.fromhex(f'{(i ^ nth_byte):02x}') + d_prev) nth_byte += 1 i = 0 if (nth_byte <= block_size): continue break i += 1 if (i > 255): log.error('[padding_oracle_attack] Not Found') return None plaintext = (m[::(- 1)] + plaintext) if verbose: print() log.information(f'Decrypt(c{len(cipher_block)}): {repr(d_prev)[2:(- 1)]}') log.information(f'm{len(cipher_block)}: {repr(m[::(- 1)])[2:(- 1)]}') log.information(f'plaintext: {repr(plaintext)[2:(- 1)]}') return plaintext
def padding_oracle_attack(ciphertext: bytes, padding_oracle: typing.Callable, iv: bytes=b, block_size: int=16, verbose: bool=False) -> bytes: 'Padding Oracle Attack solver.\n\n This function helps solving "Padding Oracle Attack"\n\n Args:\n ciphertext (bytes): The ciphertext.\n padding_oracle (Callable): The padding oracle function. This function receives ciphertext and returns the ciphertext is valid or not.\n iv (bytes, optional): An initialization vector. Defaults to b.\n block_size (int, optional): The block size of AES. Defaults to 16.\n verbose (bool, optional): Show more information if True, otherwise no output.\n Returns:\n bytes: The plaintext\n ' cipher_block = [ciphertext[i:(i + block_size)] for i in range(0, len(ciphertext), block_size)] cipher_block.reverse() plaintext = b def is_valid(c_target, d_prev, nth_byte, i): attempt_byte = bytes.fromhex(f'{i:02x}') adjusted_bytes = bytes(((c ^ nth_byte) for c in d_prev)) payload = ((((b'\x00' * (block_size - nth_byte)) + attempt_byte) + adjusted_bytes) + c_target) if verbose: sys.stdout.write((((('\x1b[2k\x1b[g' + log.colored(Style.GREY, repr((b'\x00' * (block_size - nth_byte)))[2:(- 1)])) + log.colored(Style.RED, repr(attempt_byte)[2:(- 1)])) + log.colored(Style.MAGENTA, repr(adjusted_bytes)[2:(- 1)])) + log.colored(Style.DARK_GREY, repr(c_target)[2:(- 1)]))) sys.stdout.flush() return padding_oracle(payload) for _ in range((len(cipher_block) - 1)): (c_target, c_prev) = cipher_block[:2] print(cipher_block) cipher_block.pop(0) nth_byte = 1 i = 0 m = d_prev = b while True: if is_valid(c_target, d_prev, nth_byte, i): m += bytes.fromhex(f'{((i ^ nth_byte) ^ c_prev[(- nth_byte)]):02x}') d_prev = (bytes.fromhex(f'{(i ^ nth_byte):02x}') + d_prev) nth_byte += 1 i = 0 if (nth_byte <= block_size): continue break i += 1 if (i > 255): log.error('[padding_oracle_attack] Not Found') return None plaintext = (m[::(- 1)] + plaintext) if verbose: print() log.information(f'Decrypt(c{len(cipher_block)}): {repr(d_prev)[2:(- 1)]}') log.information(f'm{len(cipher_block)}: {repr(m[::(- 1)])[2:(- 1)]}') log.information(f'plaintext: {repr(plaintext)[2:(- 1)]}') return plaintext<|docstring|>Padding Oracle Attack solver. This function helps solving "Padding Oracle Attack" Args: ciphertext (bytes): The ciphertext. padding_oracle (Callable): The padding oracle function. This function receives ciphertext and returns the ciphertext is valid or not. iv (bytes, optional): An initialization vector. Defaults to b"". block_size (int, optional): The block size of AES. Defaults to 16. verbose (bool, optional): Show more information if True, otherwise no output. Returns: bytes: The plaintext<|endoftext|>
3d9053d1ac1dd71b952b711bf8c5de30da5909913b6121fc5a97ff66f1d6aeb3
def extract_windows(eg, line, regex, outfile, win_size=62): '\n given a line of labels, and the saved block of feature vectors,\n this function will extract windows of a given size and assign them\n to their label in a label -- flattened_data file\n win_size comes from the left and right context you provided to kaldi \n to splice the frames\n ' for (i, label) in enumerate(regex.findall(line)): catFeats = '' for row in eg[i:(i + win_size)]: row = row[0].replace(']', '') catFeats += (row + ' ') print(label, catFeats, file=outfile)
given a line of labels, and the saved block of feature vectors, this function will extract windows of a given size and assign them to their label in a label -- flattened_data file win_size comes from the left and right context you provided to kaldi to splice the frames
egs-to-csv.py
extract_windows
JRMeyer/kaldi-tf
15
python
def extract_windows(eg, line, regex, outfile, win_size=62): '\n given a line of labels, and the saved block of feature vectors,\n this function will extract windows of a given size and assign them\n to their label in a label -- flattened_data file\n win_size comes from the left and right context you provided to kaldi \n to splice the frames\n ' for (i, label) in enumerate(regex.findall(line)): catFeats = for row in eg[i:(i + win_size)]: row = row[0].replace(']', ) catFeats += (row + ' ') print(label, catFeats, file=outfile)
def extract_windows(eg, line, regex, outfile, win_size=62): '\n given a line of labels, and the saved block of feature vectors,\n this function will extract windows of a given size and assign them\n to their label in a label -- flattened_data file\n win_size comes from the left and right context you provided to kaldi \n to splice the frames\n ' for (i, label) in enumerate(regex.findall(line)): catFeats = for row in eg[i:(i + win_size)]: row = row[0].replace(']', ) catFeats += (row + ' ') print(label, catFeats, file=outfile)<|docstring|>given a line of labels, and the saved block of feature vectors, this function will extract windows of a given size and assign them to their label in a label -- flattened_data file win_size comes from the left and right context you provided to kaldi to splice the frames<|endoftext|>
bf8c16243a53bf7ab9cec92409671ebe5bdfe046ed1cbaf31a008f337d3bffcd
def get_eg_dim(arkfile): '\n given kaldi ark file in txt format, find the dimension of the target labels\n ' with open(arkfile, 'r') as arkf: for line in arkf: if ('dim=' in line): egDim = re.search('dim=([0-9]*)', line).group(1) break else: pass return egDim
given kaldi ark file in txt format, find the dimension of the target labels
egs-to-csv.py
get_eg_dim
JRMeyer/kaldi-tf
15
python
def get_eg_dim(arkfile): '\n \n ' with open(arkfile, 'r') as arkf: for line in arkf: if ('dim=' in line): egDim = re.search('dim=([0-9]*)', line).group(1) break else: pass return egDim
def get_eg_dim(arkfile): '\n \n ' with open(arkfile, 'r') as arkf: for line in arkf: if ('dim=' in line): egDim = re.search('dim=([0-9]*)', line).group(1) break else: pass return egDim<|docstring|>given kaldi ark file in txt format, find the dimension of the target labels<|endoftext|>
c275f67ebeadad6489f82ea56d120d4880d30f14818ac9da65d8e8ac96c1da1a
def main(arkfile, outfile): '\n arkfile: is the input ark file from kaldi (egs.ark)\n regex: matches the labels of each eg based on number of dims in output layer\n outfile: where to save the output\n ' regex = re.compile('dim=[0-9]+ \\[ ([0-9]+) ') eg = [] with open(arkfile, 'r') as arkf: with open(outfile, 'a') as outf: for line in arkf: if ('input' in line): eg = [] pass elif ('output' in line): extract_windows(eg, line, regex, outf) else: eg.append([line.strip()])
arkfile: is the input ark file from kaldi (egs.ark) regex: matches the labels of each eg based on number of dims in output layer outfile: where to save the output
egs-to-csv.py
main
JRMeyer/kaldi-tf
15
python
def main(arkfile, outfile): '\n arkfile: is the input ark file from kaldi (egs.ark)\n regex: matches the labels of each eg based on number of dims in output layer\n outfile: where to save the output\n ' regex = re.compile('dim=[0-9]+ \\[ ([0-9]+) ') eg = [] with open(arkfile, 'r') as arkf: with open(outfile, 'a') as outf: for line in arkf: if ('input' in line): eg = [] pass elif ('output' in line): extract_windows(eg, line, regex, outf) else: eg.append([line.strip()])
def main(arkfile, outfile): '\n arkfile: is the input ark file from kaldi (egs.ark)\n regex: matches the labels of each eg based on number of dims in output layer\n outfile: where to save the output\n ' regex = re.compile('dim=[0-9]+ \\[ ([0-9]+) ') eg = [] with open(arkfile, 'r') as arkf: with open(outfile, 'a') as outf: for line in arkf: if ('input' in line): eg = [] pass elif ('output' in line): extract_windows(eg, line, regex, outf) else: eg.append([line.strip()])<|docstring|>arkfile: is the input ark file from kaldi (egs.ark) regex: matches the labels of each eg based on number of dims in output layer outfile: where to save the output<|endoftext|>
29e16bbb0527cadcd1e425b8b4128a3c08a5c53c5770a1be0631d435edf376dc
@pytest.mark.aggregator def test_get_supported_aggregations(aggregate_agent, query_agent): '\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n :return:\n ' query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) print('Aggregation agent: {}'.format(aggregate_agent.get('connection').get('type'))) result = query_agent.vip.rpc.call(AGG_AGENT_VIP, 'get_supported_aggregations').get(timeout=10) assert result print(result) conn = aggregate_agent.get('connection') if conn: if (conn.get('type') == 'mysql'): assert (result == ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'BIT_AND', 'BIT_OR', 'BIT_XOR', 'GROUP_CONCAT', 'STD', 'STDDEV', 'STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP', 'VARIANCE']) elif (conn.get('type') == 'sqlite'): assert (result == ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'TOTAL', 'GROUP_CONCAT']) elif (conn.get('type') == 'mongodb'): assert (result == ['SUM', 'COUNT', 'AVG', 'MIN', 'MAX', 'STDDEVPOP', 'STDDEVSAMP'])
:param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian :return:
volttrontesting/services/aggregate_historian/test_aggregate_historian.py
test_get_supported_aggregations
laroque/volttron
2
python
@pytest.mark.aggregator def test_get_supported_aggregations(aggregate_agent, query_agent): '\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n :return:\n ' query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) print('Aggregation agent: {}'.format(aggregate_agent.get('connection').get('type'))) result = query_agent.vip.rpc.call(AGG_AGENT_VIP, 'get_supported_aggregations').get(timeout=10) assert result print(result) conn = aggregate_agent.get('connection') if conn: if (conn.get('type') == 'mysql'): assert (result == ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'BIT_AND', 'BIT_OR', 'BIT_XOR', 'GROUP_CONCAT', 'STD', 'STDDEV', 'STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP', 'VARIANCE']) elif (conn.get('type') == 'sqlite'): assert (result == ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'TOTAL', 'GROUP_CONCAT']) elif (conn.get('type') == 'mongodb'): assert (result == ['SUM', 'COUNT', 'AVG', 'MIN', 'MAX', 'STDDEVPOP', 'STDDEVSAMP'])
@pytest.mark.aggregator def test_get_supported_aggregations(aggregate_agent, query_agent): '\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n :return:\n ' query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) print('Aggregation agent: {}'.format(aggregate_agent.get('connection').get('type'))) result = query_agent.vip.rpc.call(AGG_AGENT_VIP, 'get_supported_aggregations').get(timeout=10) assert result print(result) conn = aggregate_agent.get('connection') if conn: if (conn.get('type') == 'mysql'): assert (result == ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'BIT_AND', 'BIT_OR', 'BIT_XOR', 'GROUP_CONCAT', 'STD', 'STDDEV', 'STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP', 'VARIANCE']) elif (conn.get('type') == 'sqlite'): assert (result == ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'TOTAL', 'GROUP_CONCAT']) elif (conn.get('type') == 'mongodb'): assert (result == ['SUM', 'COUNT', 'AVG', 'MIN', 'MAX', 'STDDEVPOP', 'STDDEVSAMP'])<|docstring|>:param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian :return:<|endoftext|>
2dbb94f3ad66c0d3c4691089d568152c13b041933c4d5a7417bd1b1031684920
@pytest.mark.aggregator def test_single_topic_pattern(aggregate_agent, query_agent): '\n Test the basic functionality of aggregate historian when aggregating a\n single topic that is identified by topic_name_pattern instead of\n explicit topic\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed correctly after restart as well.\n 2. Aggregate topics should be get updated instead of inserting a new record\n 3. aggregate topic should be based on user provided\n aggregation_topic_name even though the second time around we are\n aggregating for a single topic\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 10) gevent.sleep(1) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_name_pattern': 'device1/out_.*', 'aggregation_topic_name': 'device1/outsidetemp_aggregate', 'aggregation_type': 'sum', 'min_count': 2}, {'topic_name_pattern': 'device1/in_*', 'aggregation_topic_name': 'device1/intemp_aggregate', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/outsidetemp_aggregate', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print(result1) assert (result1['metadata'] == {}) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/intemp_aggregate', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) assert (result2['values'][0][0] == result1['values'][0][0]) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) assert (result2['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) expected_list = [['device1/outsidetemp_aggregate', 'sum', '1m', 'device1/out_.*'], ['device1/intemp_aggregate', 'sum', '1m', 'device1/in_*']] assert (len(result) == 2) for row in result: expected_list.remove(row) assert (len(expected_list) == 0) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
Test the basic functionality of aggregate historian when aggregating a single topic that is identified by topic_name_pattern instead of explicit topic 1. Publish fake data 2. Start aggregator agent with configurtion to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data Expected result: 1. Aggregate data should be computed correctly after restart as well. 2. Aggregate topics should be get updated instead of inserting a new record 3. aggregate topic should be based on user provided aggregation_topic_name even though the second time around we are aggregating for a single topic :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian
volttrontesting/services/aggregate_historian/test_aggregate_historian.py
test_single_topic_pattern
laroque/volttron
2
python
@pytest.mark.aggregator def test_single_topic_pattern(aggregate_agent, query_agent): '\n Test the basic functionality of aggregate historian when aggregating a\n single topic that is identified by topic_name_pattern instead of\n explicit topic\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed correctly after restart as well.\n 2. Aggregate topics should be get updated instead of inserting a new record\n 3. aggregate topic should be based on user provided\n aggregation_topic_name even though the second time around we are\n aggregating for a single topic\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 10) gevent.sleep(1) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_name_pattern': 'device1/out_.*', 'aggregation_topic_name': 'device1/outsidetemp_aggregate', 'aggregation_type': 'sum', 'min_count': 2}, {'topic_name_pattern': 'device1/in_*', 'aggregation_topic_name': 'device1/intemp_aggregate', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/outsidetemp_aggregate', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print(result1) assert (result1['metadata'] == {}) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/intemp_aggregate', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) assert (result2['values'][0][0] == result1['values'][0][0]) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) assert (result2['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) expected_list = [['device1/outsidetemp_aggregate', 'sum', '1m', 'device1/out_.*'], ['device1/intemp_aggregate', 'sum', '1m', 'device1/in_*']] assert (len(result) == 2) for row in result: expected_list.remove(row) assert (len(expected_list) == 0) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
@pytest.mark.aggregator def test_single_topic_pattern(aggregate_agent, query_agent): '\n Test the basic functionality of aggregate historian when aggregating a\n single topic that is identified by topic_name_pattern instead of\n explicit topic\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed correctly after restart as well.\n 2. Aggregate topics should be get updated instead of inserting a new record\n 3. aggregate topic should be based on user provided\n aggregation_topic_name even though the second time around we are\n aggregating for a single topic\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 10) gevent.sleep(1) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_name_pattern': 'device1/out_.*', 'aggregation_topic_name': 'device1/outsidetemp_aggregate', 'aggregation_type': 'sum', 'min_count': 2}, {'topic_name_pattern': 'device1/in_*', 'aggregation_topic_name': 'device1/intemp_aggregate', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/outsidetemp_aggregate', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print(result1) assert (result1['metadata'] == {}) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/intemp_aggregate', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) assert (result2['values'][0][0] == result1['values'][0][0]) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) assert (result2['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) expected_list = [['device1/outsidetemp_aggregate', 'sum', '1m', 'device1/out_.*'], ['device1/intemp_aggregate', 'sum', '1m', 'device1/in_*']] assert (len(result) == 2) for row in result: expected_list.remove(row) assert (len(expected_list) == 0) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])<|docstring|>Test the basic functionality of aggregate historian when aggregating a single topic that is identified by topic_name_pattern instead of explicit topic 1. Publish fake data 2. Start aggregator agent with configurtion to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data Expected result: 1. Aggregate data should be computed correctly after restart as well. 2. Aggregate topics should be get updated instead of inserting a new record 3. aggregate topic should be based on user provided aggregation_topic_name even though the second time around we are aggregating for a single topic :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian<|endoftext|>
a59540d903a5e590e669d613a0415d271b6d04600e7ed11bc84ecb5d34669f69
@pytest.mark.timeout(180) @pytest.mark.aggregator def test_single_topic(aggregate_agent, query_agent): '\n Test the basic functionality of aggregate historian when aggregating a\n single topic\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 4 minutes\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed for both 2m and 3m intervals and\n for both the configured points.\n 2. timestamp for both points within a single aggregation group should be\n time synchronized.\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to publish to and query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 30) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp'], 'aggregation_type': 'sum', 'min_count': 2}, {'topic_names': ['device1/in_temp'], 'aggregation_type': 'sum', 'min_count': 2}]}, {'aggregation_period': '2m', 'use_calendar_time_periods': False, 'points': [{'topic_names': ['device1/out_temp'], 'aggregation_type': 'sum', 'min_count': 2}, {'topic_names': ['device1/in_temp'], 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep((2.5 * 60)) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/out_temp', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result1: {}'.format(result1)) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/in_temp', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result2: {}'.format(result2)) assert (result2['values'][0][0] == result1['values'][0][0]) assert (result2['values'][1][0] == result1['values'][1][0]) diff = compute_timediff_seconds(result2['values'][1][0], result2['values'][0][0]) assert (diff == 60) assert (result1['metadata'] == result2['metadata'] == {'units': 'F', 'tz': 'UTC', 'type': 'float'}) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][1][0], 1) assert (result1['values'][1][1] == expected_sum) assert (result2['values'][1][1] == expected_sum) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/in_temp', agg_type='sum', agg_period='2m', count=20, order='FIRST_TO_LAST').get(timeout=100) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/out_temp', agg_type='sum', agg_period='2m', count=20, order='FIRST_TO_LAST').get(timeout=100) assert (result2['values'][0][0] == result1['values'][0][0]) assert (result2['values'][1][0] == result1['values'][1][0]) diff = compute_timediff_seconds(result2['values'][1][0], result2['values'][0][0]) assert (diff == 120) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 2) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][1][0], 2) assert (result1['values'][1][1] == expected_sum) assert (result2['values'][1][1] == expected_sum) assert (result1['metadata'] == result2['metadata'] == {'units': 'F', 'tz': 'UTC', 'type': 'float'}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 4) expected_list = [['device1/in_temp', 'sum', '1m', 'device1/in_temp'], ['device1/out_temp', 'sum', '1m', ['device1/out_temp']], ['device1/in_temp', 'sum', '2m', ['device1/in_temp']], ['device1/out_temp', 'sum', '2m', ['device1/out_temp']]] for row in result: assert ([row[0]] == row[3]) assert (row[1] == 'sum') assert ((row[2] == '1m') or (row[2] == '2m')) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m', 'sum_2m'])
Test the basic functionality of aggregate historian when aggregating a single topic 1. Publish fake data 2. Start aggregator agent with configurtion to collect sum of data in two different intervals. 3. Sleep for 4 minutes 4. Do an rpc call to historian to verify data Expected result: 1. Aggregate data should be computed for both 2m and 3m intervals and for both the configured points. 2. timestamp for both points within a single aggregation group should be time synchronized. :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to publish to and query historian
volttrontesting/services/aggregate_historian/test_aggregate_historian.py
test_single_topic
laroque/volttron
2
python
@pytest.mark.timeout(180) @pytest.mark.aggregator def test_single_topic(aggregate_agent, query_agent): '\n Test the basic functionality of aggregate historian when aggregating a\n single topic\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 4 minutes\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed for both 2m and 3m intervals and\n for both the configured points.\n 2. timestamp for both points within a single aggregation group should be\n time synchronized.\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to publish to and query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 30) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp'], 'aggregation_type': 'sum', 'min_count': 2}, {'topic_names': ['device1/in_temp'], 'aggregation_type': 'sum', 'min_count': 2}]}, {'aggregation_period': '2m', 'use_calendar_time_periods': False, 'points': [{'topic_names': ['device1/out_temp'], 'aggregation_type': 'sum', 'min_count': 2}, {'topic_names': ['device1/in_temp'], 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep((2.5 * 60)) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/out_temp', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result1: {}'.format(result1)) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/in_temp', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result2: {}'.format(result2)) assert (result2['values'][0][0] == result1['values'][0][0]) assert (result2['values'][1][0] == result1['values'][1][0]) diff = compute_timediff_seconds(result2['values'][1][0], result2['values'][0][0]) assert (diff == 60) assert (result1['metadata'] == result2['metadata'] == {'units': 'F', 'tz': 'UTC', 'type': 'float'}) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][1][0], 1) assert (result1['values'][1][1] == expected_sum) assert (result2['values'][1][1] == expected_sum) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/in_temp', agg_type='sum', agg_period='2m', count=20, order='FIRST_TO_LAST').get(timeout=100) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/out_temp', agg_type='sum', agg_period='2m', count=20, order='FIRST_TO_LAST').get(timeout=100) assert (result2['values'][0][0] == result1['values'][0][0]) assert (result2['values'][1][0] == result1['values'][1][0]) diff = compute_timediff_seconds(result2['values'][1][0], result2['values'][0][0]) assert (diff == 120) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 2) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][1][0], 2) assert (result1['values'][1][1] == expected_sum) assert (result2['values'][1][1] == expected_sum) assert (result1['metadata'] == result2['metadata'] == {'units': 'F', 'tz': 'UTC', 'type': 'float'}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 4) expected_list = [['device1/in_temp', 'sum', '1m', 'device1/in_temp'], ['device1/out_temp', 'sum', '1m', ['device1/out_temp']], ['device1/in_temp', 'sum', '2m', ['device1/in_temp']], ['device1/out_temp', 'sum', '2m', ['device1/out_temp']]] for row in result: assert ([row[0]] == row[3]) assert (row[1] == 'sum') assert ((row[2] == '1m') or (row[2] == '2m')) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m', 'sum_2m'])
@pytest.mark.timeout(180) @pytest.mark.aggregator def test_single_topic(aggregate_agent, query_agent): '\n Test the basic functionality of aggregate historian when aggregating a\n single topic\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 4 minutes\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed for both 2m and 3m intervals and\n for both the configured points.\n 2. timestamp for both points within a single aggregation group should be\n time synchronized.\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to publish to and query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 30) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp'], 'aggregation_type': 'sum', 'min_count': 2}, {'topic_names': ['device1/in_temp'], 'aggregation_type': 'sum', 'min_count': 2}]}, {'aggregation_period': '2m', 'use_calendar_time_periods': False, 'points': [{'topic_names': ['device1/out_temp'], 'aggregation_type': 'sum', 'min_count': 2}, {'topic_names': ['device1/in_temp'], 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep((2.5 * 60)) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/out_temp', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result1: {}'.format(result1)) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/in_temp', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result2: {}'.format(result2)) assert (result2['values'][0][0] == result1['values'][0][0]) assert (result2['values'][1][0] == result1['values'][1][0]) diff = compute_timediff_seconds(result2['values'][1][0], result2['values'][0][0]) assert (diff == 60) assert (result1['metadata'] == result2['metadata'] == {'units': 'F', 'tz': 'UTC', 'type': 'float'}) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][1][0], 1) assert (result1['values'][1][1] == expected_sum) assert (result2['values'][1][1] == expected_sum) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/in_temp', agg_type='sum', agg_period='2m', count=20, order='FIRST_TO_LAST').get(timeout=100) result2 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/out_temp', agg_type='sum', agg_period='2m', count=20, order='FIRST_TO_LAST').get(timeout=100) assert (result2['values'][0][0] == result1['values'][0][0]) assert (result2['values'][1][0] == result1['values'][1][0]) diff = compute_timediff_seconds(result2['values'][1][0], result2['values'][0][0]) assert (diff == 120) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][0][0], 2) assert (result1['values'][0][1] == expected_sum) assert (result2['values'][0][1] == expected_sum) expected_sum = get_expected_sum(query_agent, 'device1/in_temp', result2['values'][1][0], 2) assert (result1['values'][1][1] == expected_sum) assert (result2['values'][1][1] == expected_sum) assert (result1['metadata'] == result2['metadata'] == {'units': 'F', 'tz': 'UTC', 'type': 'float'}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 4) expected_list = [['device1/in_temp', 'sum', '1m', 'device1/in_temp'], ['device1/out_temp', 'sum', '1m', ['device1/out_temp']], ['device1/in_temp', 'sum', '2m', ['device1/in_temp']], ['device1/out_temp', 'sum', '2m', ['device1/out_temp']]] for row in result: assert ([row[0]] == row[3]) assert (row[1] == 'sum') assert ((row[2] == '1m') or (row[2] == '2m')) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m', 'sum_2m'])<|docstring|>Test the basic functionality of aggregate historian when aggregating a single topic 1. Publish fake data 2. Start aggregator agent with configurtion to collect sum of data in two different intervals. 3. Sleep for 4 minutes 4. Do an rpc call to historian to verify data Expected result: 1. Aggregate data should be computed for both 2m and 3m intervals and for both the configured points. 2. timestamp for both points within a single aggregation group should be time synchronized. :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to publish to and query historian<|endoftext|>
9d28ca6e925201fd7b2bbe48372e334050b90e1d2faee5622ffd8020951996cf
@pytest.mark.aggregator def test_multiple_topic_pattern(aggregate_agent, query_agent): '\n Test aggregate historian when aggregating across multiple topics\n that are identified by topic_name_pattern instead of explicit topic name\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed for both 2m for the two configured\n points.\n 2. timestamp for both points within a single aggregation group should be\n time synchronized\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 10) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_name_pattern': 'device1/*', 'aggregation_topic_name': 'device1/all', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/all', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result1 {}'.format(result1)) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/all') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (result[0][3] == 'device1/*') finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
Test aggregate historian when aggregating across multiple topics that are identified by topic_name_pattern instead of explicit topic name 1. Publish fake data 2. Start aggregator agent with configurtion to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data Expected result: 1. Aggregate data should be computed for both 2m for the two configured points. 2. timestamp for both points within a single aggregation group should be time synchronized :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian
volttrontesting/services/aggregate_historian/test_aggregate_historian.py
test_multiple_topic_pattern
laroque/volttron
2
python
@pytest.mark.aggregator def test_multiple_topic_pattern(aggregate_agent, query_agent): '\n Test aggregate historian when aggregating across multiple topics\n that are identified by topic_name_pattern instead of explicit topic name\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed for both 2m for the two configured\n points.\n 2. timestamp for both points within a single aggregation group should be\n time synchronized\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 10) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_name_pattern': 'device1/*', 'aggregation_topic_name': 'device1/all', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/all', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result1 {}'.format(result1)) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/all') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (result[0][3] == 'device1/*') finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
@pytest.mark.aggregator def test_multiple_topic_pattern(aggregate_agent, query_agent): '\n Test aggregate historian when aggregating across multiple topics\n that are identified by topic_name_pattern instead of explicit topic name\n 1. Publish fake data\n 2. Start aggregator agent with configurtion to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n Expected result:\n 1. Aggregate data should be computed for both 2m for the two configured\n points.\n 2. timestamp for both points within a single aggregation group should be\n time synchronized\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 10) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_name_pattern': 'device1/*', 'aggregation_topic_name': 'device1/all', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/all', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print('result1 {}'.format(result1)) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/all') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (result[0][3] == 'device1/*') finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])<|docstring|>Test aggregate historian when aggregating across multiple topics that are identified by topic_name_pattern instead of explicit topic name 1. Publish fake data 2. Start aggregator agent with configurtion to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data Expected result: 1. Aggregate data should be computed for both 2m for the two configured points. 2. timestamp for both points within a single aggregation group should be time synchronized :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian<|endoftext|>
ee03f65ec4586b04ceea8b5956ca2dc48de03cafac936a9e176180470dc15071
@pytest.mark.aggregator def test_multiple_topic_list(aggregate_agent, query_agent): '\n Test aggregate historian when aggregating across multiple topics\n that are identified by explicit list of topic names\n 1. Publish fake data\n 2. Start aggregator agent with configuration to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 5) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp', 'device1/in_temp'], 'aggregation_topic_name': 'device1/all2', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/all2', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print(result1) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/all2') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (set(result[0][3]) == {'device1/in_temp', 'device1/out_temp'}) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
Test aggregate historian when aggregating across multiple topics that are identified by explicit list of topic names 1. Publish fake data 2. Start aggregator agent with configuration to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian
volttrontesting/services/aggregate_historian/test_aggregate_historian.py
test_multiple_topic_list
laroque/volttron
2
python
@pytest.mark.aggregator def test_multiple_topic_list(aggregate_agent, query_agent): '\n Test aggregate historian when aggregating across multiple topics\n that are identified by explicit list of topic names\n 1. Publish fake data\n 2. Start aggregator agent with configuration to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 5) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp', 'device1/in_temp'], 'aggregation_topic_name': 'device1/all2', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/all2', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print(result1) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/all2') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (set(result[0][3]) == {'device1/in_temp', 'device1/out_temp'}) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
@pytest.mark.aggregator def test_multiple_topic_list(aggregate_agent, query_agent): '\n Test aggregate historian when aggregating across multiple topics\n that are identified by explicit list of topic names\n 1. Publish fake data\n 2. Start aggregator agent with configuration to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n ' start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 5) gevent.sleep(0.5) try: aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp', 'device1/in_temp'], 'aggregation_topic_name': 'device1/all2', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/all2', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=100) print(result1) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/all2') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (set(result[0][3]) == {'device1/in_temp', 'device1/out_temp'}) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])<|docstring|>Test aggregate historian when aggregating across multiple topics that are identified by explicit list of topic names 1. Publish fake data 2. Start aggregator agent with configuration to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian<|endoftext|>
8ad02be545834c647bb3ab2f15e7ad1d8228bb15e291c6625138424af7669ea3
@pytest.mark.timeout(180) @pytest.mark.aggregator def test_topic_reconfiguration(aggregate_agent, query_agent): "\n Test aggregate historian when topic names/topic pattern is updated and\n restarted. Check if aggregate topic list gets updated correctly and doesn't\n cause any issue with historian\n 1. Publish fake data\n 2. Start aggregator agent with configuration to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n " try: start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 3) gevent.sleep(0.5) aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp', 'device1/in_temp'], 'aggregation_topic_name': 'device1/aggregation_name', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/aggregation_name', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print(result1) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/aggregation_name') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (set(result[0][3]) == {'device1/in_temp', 'device1/out_temp'}) start_time = datetime.utcnow() publish_test_data(query_agent, start_time, 0, 5) aggregate_agent['aggregations'][0]['points'][0]['topic_names'] = ['device1/out_temp'] aggregate_agent['aggregations'][0]['points'][0]['min_count'] = 1 print('Before reinstall current time is {}'.format(datetime.utcnow())) query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() print('After configure\n\n') gevent.sleep(3) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/aggregation_name', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print('Result:{}'.format(result1)) lindex = (len(result1['values']) - 1) print('lindex = {}'.format(lindex)) expected_sum = get_expected_sum(query_agent, ['device1/out_temp'], result1['values'][lindex][0], 1) assert (result1['values'][lindex][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/aggregation_name') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (result[0][3] == ['device1/out_temp']) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
Test aggregate historian when topic names/topic pattern is updated and restarted. Check if aggregate topic list gets updated correctly and doesn't cause any issue with historian 1. Publish fake data 2. Start aggregator agent with configuration to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian
volttrontesting/services/aggregate_historian/test_aggregate_historian.py
test_topic_reconfiguration
laroque/volttron
2
python
@pytest.mark.timeout(180) @pytest.mark.aggregator def test_topic_reconfiguration(aggregate_agent, query_agent): "\n Test aggregate historian when topic names/topic pattern is updated and\n restarted. Check if aggregate topic list gets updated correctly and doesn't\n cause any issue with historian\n 1. Publish fake data\n 2. Start aggregator agent with configuration to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n " try: start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 3) gevent.sleep(0.5) aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp', 'device1/in_temp'], 'aggregation_topic_name': 'device1/aggregation_name', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/aggregation_name', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print(result1) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/aggregation_name') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (set(result[0][3]) == {'device1/in_temp', 'device1/out_temp'}) start_time = datetime.utcnow() publish_test_data(query_agent, start_time, 0, 5) aggregate_agent['aggregations'][0]['points'][0]['topic_names'] = ['device1/out_temp'] aggregate_agent['aggregations'][0]['points'][0]['min_count'] = 1 print('Before reinstall current time is {}'.format(datetime.utcnow())) query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() print('After configure\n\n') gevent.sleep(3) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/aggregation_name', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print('Result:{}'.format(result1)) lindex = (len(result1['values']) - 1) print('lindex = {}'.format(lindex)) expected_sum = get_expected_sum(query_agent, ['device1/out_temp'], result1['values'][lindex][0], 1) assert (result1['values'][lindex][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/aggregation_name') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (result[0][3] == ['device1/out_temp']) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])
@pytest.mark.timeout(180) @pytest.mark.aggregator def test_topic_reconfiguration(aggregate_agent, query_agent): "\n Test aggregate historian when topic names/topic pattern is updated and\n restarted. Check if aggregate topic list gets updated correctly and doesn't\n cause any issue with historian\n 1. Publish fake data\n 2. Start aggregator agent with configuration to collect sum of data in\n two different intervals.\n 3. Sleep for 1 minute\n 4. Do an rpc call to historian to verify data\n\n\n :param aggregate_agent: the aggregate historian configuration\n :param query_agent: fake agent used to query historian\n " try: start_time = (datetime.utcnow() - timedelta(minutes=2)) publish_test_data(query_agent, start_time, 0, 3) gevent.sleep(0.5) aggregate_agent['aggregations'] = [{'aggregation_period': '1m', 'use_calendar_time_periods': True, 'points': [{'topic_names': ['device1/out_temp', 'device1/in_temp'], 'aggregation_topic_name': 'device1/aggregation_name', 'aggregation_type': 'sum', 'min_count': 2}]}] query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() gevent.sleep(1) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/aggregation_name', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print(result1) expected_sum = get_expected_sum(query_agent, ['device1/in_temp', 'device1/out_temp'], result1['values'][0][0], 1) assert (result1['values'][0][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/aggregation_name') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (set(result[0][3]) == {'device1/in_temp', 'device1/out_temp'}) start_time = datetime.utcnow() publish_test_data(query_agent, start_time, 0, 5) aggregate_agent['aggregations'][0]['points'][0]['topic_names'] = ['device1/out_temp'] aggregate_agent['aggregations'][0]['points'][0]['min_count'] = 1 print('Before reinstall current time is {}'.format(datetime.utcnow())) query_agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', AGG_AGENT_VIP, 'config', aggregate_agent).get() print('After configure\n\n') gevent.sleep(3) result1 = query_agent.vip.rpc.call('platform.historian', 'query', topic='device1/aggregation_name', agg_type='sum', agg_period='1m', count=20, order='FIRST_TO_LAST').get(timeout=10) print('Result:{}'.format(result1)) lindex = (len(result1['values']) - 1) print('lindex = {}'.format(lindex)) expected_sum = get_expected_sum(query_agent, ['device1/out_temp'], result1['values'][lindex][0], 1) assert (result1['values'][lindex][1] == expected_sum) assert (result1['metadata'] == {}) result = query_agent.vip.rpc.call('platform.historian', 'get_aggregate_topics').get(10) print('agg topic list {}'.format(result)) assert (len(result) == 1) assert (result[0][0] == 'device1/aggregation_name') assert (result[0][1] == 'sum') assert (result[0][2] == '1m') assert (result[0][3] == ['device1/out_temp']) finally: cleanup(aggregate_agent['connection']['type'], ['sum_1m'])<|docstring|>Test aggregate historian when topic names/topic pattern is updated and restarted. Check if aggregate topic list gets updated correctly and doesn't cause any issue with historian 1. Publish fake data 2. Start aggregator agent with configuration to collect sum of data in two different intervals. 3. Sleep for 1 minute 4. Do an rpc call to historian to verify data :param aggregate_agent: the aggregate historian configuration :param query_agent: fake agent used to query historian<|endoftext|>
35e8bffd02f07127cb07b596daaf9439c371abc78ec136d8b07d9f6523017c78
def fromLista1(lista): 'Create tree from a list [value, childlist]\n\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0 Where child list contains trees or is the list empty. ' r = Tree(lista[0]) r.f = [fromLista1(x) for x in lista[1]] return r
Create tree from a list [value, childlist] Where child list contains trees or is the list empty.
draw_tree.py
fromLista1
IvanC-IT/Mutliway-Tree
0
python
def fromLista1(lista): 'Create tree from a list [value, childlist]\n\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0 Where child list contains trees or is the list empty. ' r = Tree(lista[0]) r.f = [fromLista1(x) for x in lista[1]] return r
def fromLista1(lista): 'Create tree from a list [value, childlist]\n\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0 Where child list contains trees or is the list empty. ' r = Tree(lista[0]) r.f = [fromLista1(x) for x in lista[1]] return r<|docstring|>Create tree from a list [value, childlist] Where child list contains trees or is the list empty.<|endoftext|>
9a3e95eb05fb5a2427d2b19070c256c8b7efc95193e443696f4915eb98486d52
def toLista(node): ' Convert the tree to a list of lists [value, childlist]' return [node.id, [toLista(x) for x in node.f]]
Convert the tree to a list of lists [value, childlist]
draw_tree.py
toLista
IvanC-IT/Mutliway-Tree
0
python
def toLista(node): ' ' return [node.id, [toLista(x) for x in node.f]]
def toLista(node): ' ' return [node.id, [toLista(x) for x in node.f]]<|docstring|>Convert the tree to a list of lists [value, childlist]<|endoftext|>
f43f553356f4b00aa738aa591c38fd54774c139c4915f8d5ba3c894b886fe5f0
def create_conditions(self, solution): 'Create the conditions to be verified with an SMT solver.' con1 = (solution.V_sym <= 0) con2 = (solution.V_sym > 0) con3 = sp.Or((solution.V_sym > 0), (solution.dtV_sym <= (- self.gamma))) return (con1, con2, con3)
Create the conditions to be verified with an SMT solver.
f4cs/specifications/reach_while_stay.py
create_conditions
CFVerdier/F4CS
2
python
def create_conditions(self, solution): con1 = (solution.V_sym <= 0) con2 = (solution.V_sym > 0) con3 = sp.Or((solution.V_sym > 0), (solution.dtV_sym <= (- self.gamma))) return (con1, con2, con3)
def create_conditions(self, solution): con1 = (solution.V_sym <= 0) con2 = (solution.V_sym > 0) con3 = sp.Or((solution.V_sym > 0), (solution.dtV_sym <= (- self.gamma))) return (con1, con2, con3)<|docstring|>Create the conditions to be verified with an SMT solver.<|endoftext|>
8064a69be470d91e929e7cfbf225928db26b06de2043c2e09040fae7f877beca
def __init__(self, edges=None, join=False, vertices=None): ' Assumes edges are hyperbolic lines or hypercycles of intersecting\n lines ' if (vertices is None): vertices = ([None] * len(edges)) for i in range(len(edges)): (e1, e2) = (edges[(i - 1)], edges[i]) if isinstance(e1, Point): vertices[i] = e1 continue if isinstance(e2, Point): vertices[i] = e2 continue pts = e1.intersectionsWithHcycle(e2) if (len(pts) <= 0): raise ValueError('Polygon edges do not intersect') elif (len(pts) > 1): raise ValueError('Polygon edge join is ambiguous') vertices[i] = pts[0] if (edges is None): edges = ([None] * len(vertices)) for i in range(len(vertices)): (startP, endP) = (vertices[i], vertices[((i + 1) % len(vertices))]) edges[i] = Line.fromPoints(*startP, *endP, segment=True) if join: edges = list(edges) for (i, edge) in enumerate(edges): (startP, endP) = (vertices[i], vertices[((i + 1) % len(edges))]) if isinstance(edge, Point): edges[i] = edge continue edges[i] = edge.trimmed(*startP, *endP, chooseShorter=True) self.edges = tuple(edges) self.vertices = tuple(vertices)
Assumes edges are hyperbolic lines or hypercycles of intersecting lines
hyperbolic/poincare/Polygon.py
__init__
imagirom/hyperbolic
77
python
def __init__(self, edges=None, join=False, vertices=None): ' Assumes edges are hyperbolic lines or hypercycles of intersecting\n lines ' if (vertices is None): vertices = ([None] * len(edges)) for i in range(len(edges)): (e1, e2) = (edges[(i - 1)], edges[i]) if isinstance(e1, Point): vertices[i] = e1 continue if isinstance(e2, Point): vertices[i] = e2 continue pts = e1.intersectionsWithHcycle(e2) if (len(pts) <= 0): raise ValueError('Polygon edges do not intersect') elif (len(pts) > 1): raise ValueError('Polygon edge join is ambiguous') vertices[i] = pts[0] if (edges is None): edges = ([None] * len(vertices)) for i in range(len(vertices)): (startP, endP) = (vertices[i], vertices[((i + 1) % len(vertices))]) edges[i] = Line.fromPoints(*startP, *endP, segment=True) if join: edges = list(edges) for (i, edge) in enumerate(edges): (startP, endP) = (vertices[i], vertices[((i + 1) % len(edges))]) if isinstance(edge, Point): edges[i] = edge continue edges[i] = edge.trimmed(*startP, *endP, chooseShorter=True) self.edges = tuple(edges) self.vertices = tuple(vertices)
def __init__(self, edges=None, join=False, vertices=None): ' Assumes edges are hyperbolic lines or hypercycles of intersecting\n lines ' if (vertices is None): vertices = ([None] * len(edges)) for i in range(len(edges)): (e1, e2) = (edges[(i - 1)], edges[i]) if isinstance(e1, Point): vertices[i] = e1 continue if isinstance(e2, Point): vertices[i] = e2 continue pts = e1.intersectionsWithHcycle(e2) if (len(pts) <= 0): raise ValueError('Polygon edges do not intersect') elif (len(pts) > 1): raise ValueError('Polygon edge join is ambiguous') vertices[i] = pts[0] if (edges is None): edges = ([None] * len(vertices)) for i in range(len(vertices)): (startP, endP) = (vertices[i], vertices[((i + 1) % len(vertices))]) edges[i] = Line.fromPoints(*startP, *endP, segment=True) if join: edges = list(edges) for (i, edge) in enumerate(edges): (startP, endP) = (vertices[i], vertices[((i + 1) % len(edges))]) if isinstance(edge, Point): edges[i] = edge continue edges[i] = edge.trimmed(*startP, *endP, chooseShorter=True) self.edges = tuple(edges) self.vertices = tuple(vertices)<|docstring|>Assumes edges are hyperbolic lines or hypercycles of intersecting lines<|endoftext|>
d630b20bba1dfafb84feed8d2c1042755835b5fc4f74013d232b1caf9a840c8a
def offsetPolygon(self, offset, reverseOrder=False): ' If self is a CCW polygon, returns a CCW polygon that is smaller by\n offset. ' edges = self.edges if reverseOrder: edges = reversed(edges) offEdges = [edge.makeOffset(offset) for edge in edges if ((not isinstance(edge, Point)) or edge.isIdeal())] return Polygon.fromEdges(offEdges, join=True)
If self is a CCW polygon, returns a CCW polygon that is smaller by offset.
hyperbolic/poincare/Polygon.py
offsetPolygon
imagirom/hyperbolic
77
python
def offsetPolygon(self, offset, reverseOrder=False): ' If self is a CCW polygon, returns a CCW polygon that is smaller by\n offset. ' edges = self.edges if reverseOrder: edges = reversed(edges) offEdges = [edge.makeOffset(offset) for edge in edges if ((not isinstance(edge, Point)) or edge.isIdeal())] return Polygon.fromEdges(offEdges, join=True)
def offsetPolygon(self, offset, reverseOrder=False): ' If self is a CCW polygon, returns a CCW polygon that is smaller by\n offset. ' edges = self.edges if reverseOrder: edges = reversed(edges) offEdges = [edge.makeOffset(offset) for edge in edges if ((not isinstance(edge, Point)) or edge.isIdeal())] return Polygon.fromEdges(offEdges, join=True)<|docstring|>If self is a CCW polygon, returns a CCW polygon that is smaller by offset.<|endoftext|>
7381d61165fd28b49ed271870db46cf4a7b6b0de2a125f2d4301bf0a01097eda
def makeRestorePoints(self): ' Returns a list of points that can be used to recreate the polygon.\n This can be used to transform the polygon. Recreate it with\n Polygon.fromRestorePoints(points). ' points = [] for (vert, edge) in zip(self.vertices, self.edges): if isinstance(edge, Point): continue points.append(vert) points.append(edge.midpointEuclid()) return points
Returns a list of points that can be used to recreate the polygon. This can be used to transform the polygon. Recreate it with Polygon.fromRestorePoints(points).
hyperbolic/poincare/Polygon.py
makeRestorePoints
imagirom/hyperbolic
77
python
def makeRestorePoints(self): ' Returns a list of points that can be used to recreate the polygon.\n This can be used to transform the polygon. Recreate it with\n Polygon.fromRestorePoints(points). ' points = [] for (vert, edge) in zip(self.vertices, self.edges): if isinstance(edge, Point): continue points.append(vert) points.append(edge.midpointEuclid()) return points
def makeRestorePoints(self): ' Returns a list of points that can be used to recreate the polygon.\n This can be used to transform the polygon. Recreate it with\n Polygon.fromRestorePoints(points). ' points = [] for (vert, edge) in zip(self.vertices, self.edges): if isinstance(edge, Point): continue points.append(vert) points.append(edge.midpointEuclid()) return points<|docstring|>Returns a list of points that can be used to recreate the polygon. This can be used to transform the polygon. Recreate it with Polygon.fromRestorePoints(points).<|endoftext|>
26c2b395ad282d74546ad9604530adcd23887f2aa0cb3830d14f643ab0705427
def __new__(cls, dict_=None, **kwargs): 'Create a new instance.\n\n Args:\n dict_ (Dict[str, Any]): Source :obj:`dict` data.\n\n Keyword Args:\n **kwargs: Arbitrary keyword arguments.\n\n Notes:\n Keys with the same names as the builtin methods will be renamed\n with ``2`` suffix implicitly and internally.\n\n ' def __read__(dict_): __dict__ = dict() for (key, value) in dict_.items(): if (key in self.__data__): key = f'{key}2' if isinstance(value, dict): __dict__[key] = Info(value) else: __dict__[key] = value return __dict__ temp = list() for obj in cls.mro(): temp.extend(dir(obj)) cls.__data__ = set(temp) self = super().__new__(cls) if (dict_ is not None): if isinstance(dict_, Info): self = copy.deepcopy(dict_) else: dict_check(dict_) self.__dict__.update(__read__(dict_)) self.__dict__.update(__read__(kwargs)) return self
Create a new instance. Args: dict_ (Dict[str, Any]): Source :obj:`dict` data. Keyword Args: **kwargs: Arbitrary keyword arguments. Notes: Keys with the same names as the builtin methods will be renamed with ``2`` suffix implicitly and internally.
pcapkit/corekit/infoclass.py
__new__
JarryShaw/PyPCAPK
131
python
def __new__(cls, dict_=None, **kwargs): 'Create a new instance.\n\n Args:\n dict_ (Dict[str, Any]): Source :obj:`dict` data.\n\n Keyword Args:\n **kwargs: Arbitrary keyword arguments.\n\n Notes:\n Keys with the same names as the builtin methods will be renamed\n with ``2`` suffix implicitly and internally.\n\n ' def __read__(dict_): __dict__ = dict() for (key, value) in dict_.items(): if (key in self.__data__): key = f'{key}2' if isinstance(value, dict): __dict__[key] = Info(value) else: __dict__[key] = value return __dict__ temp = list() for obj in cls.mro(): temp.extend(dir(obj)) cls.__data__ = set(temp) self = super().__new__(cls) if (dict_ is not None): if isinstance(dict_, Info): self = copy.deepcopy(dict_) else: dict_check(dict_) self.__dict__.update(__read__(dict_)) self.__dict__.update(__read__(kwargs)) return self
def __new__(cls, dict_=None, **kwargs): 'Create a new instance.\n\n Args:\n dict_ (Dict[str, Any]): Source :obj:`dict` data.\n\n Keyword Args:\n **kwargs: Arbitrary keyword arguments.\n\n Notes:\n Keys with the same names as the builtin methods will be renamed\n with ``2`` suffix implicitly and internally.\n\n ' def __read__(dict_): __dict__ = dict() for (key, value) in dict_.items(): if (key in self.__data__): key = f'{key}2' if isinstance(value, dict): __dict__[key] = Info(value) else: __dict__[key] = value return __dict__ temp = list() for obj in cls.mro(): temp.extend(dir(obj)) cls.__data__ = set(temp) self = super().__new__(cls) if (dict_ is not None): if isinstance(dict_, Info): self = copy.deepcopy(dict_) else: dict_check(dict_) self.__dict__.update(__read__(dict_)) self.__dict__.update(__read__(kwargs)) return self<|docstring|>Create a new instance. Args: dict_ (Dict[str, Any]): Source :obj:`dict` data. Keyword Args: **kwargs: Arbitrary keyword arguments. Notes: Keys with the same names as the builtin methods will be renamed with ``2`` suffix implicitly and internally.<|endoftext|>
f696dca92ce96d00d8a2fa0f2f9a4522eecca19bc9e2f61663e32aee4302d4d0
def info2dict(self): 'Convert :class:`Info` into :obj:`dict`.\n\n Returns:\n Dict[str, Any]: Converted :obj:`dict`.\n\n ' dict_ = dict() for (key, value) in self.__dict__.items(): if isinstance(value, Info): dict_[key] = value.info2dict() elif isinstance(value, (tuple, list, set, frozenset)): temp = list() for item in value: if isinstance(item, Info): temp.append(item.info2dict()) else: temp.append(item) dict_[key] = value.__class__(temp) else: dict_[key] = value return dict_
Convert :class:`Info` into :obj:`dict`. Returns: Dict[str, Any]: Converted :obj:`dict`.
pcapkit/corekit/infoclass.py
info2dict
JarryShaw/PyPCAPK
131
python
def info2dict(self): 'Convert :class:`Info` into :obj:`dict`.\n\n Returns:\n Dict[str, Any]: Converted :obj:`dict`.\n\n ' dict_ = dict() for (key, value) in self.__dict__.items(): if isinstance(value, Info): dict_[key] = value.info2dict() elif isinstance(value, (tuple, list, set, frozenset)): temp = list() for item in value: if isinstance(item, Info): temp.append(item.info2dict()) else: temp.append(item) dict_[key] = value.__class__(temp) else: dict_[key] = value return dict_
def info2dict(self): 'Convert :class:`Info` into :obj:`dict`.\n\n Returns:\n Dict[str, Any]: Converted :obj:`dict`.\n\n ' dict_ = dict() for (key, value) in self.__dict__.items(): if isinstance(value, Info): dict_[key] = value.info2dict() elif isinstance(value, (tuple, list, set, frozenset)): temp = list() for item in value: if isinstance(item, Info): temp.append(item.info2dict()) else: temp.append(item) dict_[key] = value.__class__(temp) else: dict_[key] = value return dict_<|docstring|>Convert :class:`Info` into :obj:`dict`. Returns: Dict[str, Any]: Converted :obj:`dict`.<|endoftext|>
95675b89704e5bfe35ba33c1bdfa69d636ee6558397b6473bec408fc3e7a7cbb
def get_env_variable(var_name): 'Get the env var value or throw an exception' try: return os.environ[var_name] except KeyError: error_msg = ('Set the %s environment variable' % var_name) raise ImproperlyConfigured(error_msg)
Get the env var value or throw an exception
backend/backend/settings/base.py
get_env_variable
shearichard/django-react-todo-demo
0
python
def get_env_variable(var_name): try: return os.environ[var_name] except KeyError: error_msg = ('Set the %s environment variable' % var_name) raise ImproperlyConfigured(error_msg)
def get_env_variable(var_name): try: return os.environ[var_name] except KeyError: error_msg = ('Set the %s environment variable' % var_name) raise ImproperlyConfigured(error_msg)<|docstring|>Get the env var value or throw an exception<|endoftext|>
95c6220c40241006b32842bb93e0e8c7d280359aef545ff159f4d8ca78597e70
def run(self): ' Run it! ' import subprocess cmd = (((('pointless HKLIN ' + self.mtzin) + ' HKLOUT ') + self.mtzout) + ' ') cmd += f'''<< eof >>{self.log} ''' if self.spacegroup: cmd += f'''spacegroup {self.spacegroup} ''' if self.custom: cmd += (str(self.custom) + '\n') cmd += '\neof' if (self.showcommand == True): print(cmd) import sys log = open(self.log, 'w') log.write('Pointless run through python wrapper using command:\n\n') log.write((('pointlesswrap.py ' + ' '.join(sys.argv[1:])) + '\n\n')) log.close() if self.logview: subprocess.Popen(['logview', self.log], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) bold = '\x1b[1m' italic = '\x1b[3m' underline = '\x1b[4m' blink = '\x1b[5m' clear = '\x1b[0m' green = '\x1b[32m' yellow = '\x1b[33m' red = '\x1b[31m' purple = '\x1b[35m' print(f''' {underline}pointless.py{clear} > {purple}{self.log}{clear} ''') print(f'Running... ', end='', flush=True) try: s = subprocess.check_output(cmd, shell=True, stderr=subprocess.DEVNULL) self.result = s.decode('ascii') except: print(f'''{red}Error!{clear} ''') sys.exit() print(f'''{green}Complete!{clear} ''') import re shakes = open(self.log, 'r') for line in shakes: if re.match('.*Space group =.+', line): print((' ' + line.strip()))
Run it!
bin/pointless.py
run
jonathanrd/CCP4Py
1
python
def run(self): ' ' import subprocess cmd = (((('pointless HKLIN ' + self.mtzin) + ' HKLOUT ') + self.mtzout) + ' ') cmd += f'<< eof >>{self.log} ' if self.spacegroup: cmd += f'spacegroup {self.spacegroup} ' if self.custom: cmd += (str(self.custom) + '\n') cmd += '\neof' if (self.showcommand == True): print(cmd) import sys log = open(self.log, 'w') log.write('Pointless run through python wrapper using command:\n\n') log.write((('pointlesswrap.py ' + ' '.join(sys.argv[1:])) + '\n\n')) log.close() if self.logview: subprocess.Popen(['logview', self.log], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) bold = '\x1b[1m' italic = '\x1b[3m' underline = '\x1b[4m' blink = '\x1b[5m' clear = '\x1b[0m' green = '\x1b[32m' yellow = '\x1b[33m' red = '\x1b[31m' purple = '\x1b[35m' print(f' {underline}pointless.py{clear} > {purple}{self.log}{clear} ') print(f'Running... ', end=, flush=True) try: s = subprocess.check_output(cmd, shell=True, stderr=subprocess.DEVNULL) self.result = s.decode('ascii') except: print(f'{red}Error!{clear} ') sys.exit() print(f'{green}Complete!{clear} ') import re shakes = open(self.log, 'r') for line in shakes: if re.match('.*Space group =.+', line): print((' ' + line.strip()))
def run(self): ' ' import subprocess cmd = (((('pointless HKLIN ' + self.mtzin) + ' HKLOUT ') + self.mtzout) + ' ') cmd += f'<< eof >>{self.log} ' if self.spacegroup: cmd += f'spacegroup {self.spacegroup} ' if self.custom: cmd += (str(self.custom) + '\n') cmd += '\neof' if (self.showcommand == True): print(cmd) import sys log = open(self.log, 'w') log.write('Pointless run through python wrapper using command:\n\n') log.write((('pointlesswrap.py ' + ' '.join(sys.argv[1:])) + '\n\n')) log.close() if self.logview: subprocess.Popen(['logview', self.log], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) bold = '\x1b[1m' italic = '\x1b[3m' underline = '\x1b[4m' blink = '\x1b[5m' clear = '\x1b[0m' green = '\x1b[32m' yellow = '\x1b[33m' red = '\x1b[31m' purple = '\x1b[35m' print(f' {underline}pointless.py{clear} > {purple}{self.log}{clear} ') print(f'Running... ', end=, flush=True) try: s = subprocess.check_output(cmd, shell=True, stderr=subprocess.DEVNULL) self.result = s.decode('ascii') except: print(f'{red}Error!{clear} ') sys.exit() print(f'{green}Complete!{clear} ') import re shakes = open(self.log, 'r') for line in shakes: if re.match('.*Space group =.+', line): print((' ' + line.strip()))<|docstring|>Run it!<|endoftext|>
a4bf95b13feade9b18ab8d250756e0eb5789109a5f90bf03fe95eb03c0ddeb45
def getopts(argv): 'Collect command-line options in a dictionary' opts = {} while argv: if (argv[0][0] == '-'): opts[argv[0]] = argv[1] argv = argv[1:] return opts
Collect command-line options in a dictionary
translate.py
getopts
ivan-magda/neural-machine-translation
0
python
def getopts(argv): opts = {} while argv: if (argv[0][0] == '-'): opts[argv[0]] = argv[1] argv = argv[1:] return opts
def getopts(argv): opts = {} while argv: if (argv[0][0] == '-'): opts[argv[0]] = argv[1] argv = argv[1:] return opts<|docstring|>Collect command-line options in a dictionary<|endoftext|>
8286c77ff8e5d2297bf9fec77585610315270fbcb1638dac148c6ce83b14ff48
def sentence_to_seq(sentence, vocab_to_int): '\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n ' lowercase = sentence.lower() default = vocab_to_int['<UNK>'] sentence_id = [vocab_to_int.get(word, default) for word in lowercase.split()] return sentence_id
Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids
translate.py
sentence_to_seq
ivan-magda/neural-machine-translation
0
python
def sentence_to_seq(sentence, vocab_to_int): '\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n ' lowercase = sentence.lower() default = vocab_to_int['<UNK>'] sentence_id = [vocab_to_int.get(word, default) for word in lowercase.split()] return sentence_id
def sentence_to_seq(sentence, vocab_to_int): '\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n ' lowercase = sentence.lower() default = vocab_to_int['<UNK>'] sentence_id = [vocab_to_int.get(word, default) for word in lowercase.split()] return sentence_id<|docstring|>Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids<|endoftext|>
5285987c269c908ff403ca0db053855030059547e9a097c6fd53dffba949c138
def get_qualifying_clusters(rImage, strat_dbz, conv_dbz, int_dbz, min_length, conv_buffer, min_size=10, strat_buffer=0): 'Combines the logic of get_intense_cells,\n connect_intense_cells, and connect_stratiform_to_lines\n to return pixels associated with qualifying slices.\n\n Stratiform >= 20 (dBZ)\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract qualifying lines.\n\n strat_dbz: int\n Threshold used to identify stratiform pixels\n\n conv_dbz: int\n Threshold used to identify convective pixels\n\n int_dbz: int\n Threshold used to identify intense pixels\n\n min_length: int\n Minimum length for a qualifying merged lines\n\n conv_buffer: int\n Distance within which intense cells are merged\n (Multiply by minimum search disk radius (3) to get\n buffer size)\n\n min_size: int\n Minimum size for an intense cell to be considered in\n line-building process.\n\n strat_buffer: int\n Distance within which stratiform pixels are merged\n with qualifying merged lines.\n (Multiply by minimum search disk radius of 3\n to get buffer size in km)\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n Returns\n -------\n regions: list\n A list of regionprops for each qualifying slice.\n See scikit-image.measure.regionprops for more information.\n ' convection = (1 * (rImage >= conv_dbz)) stratiform = (1 * (rImage >= strat_dbz)) (labeled_image, _) = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < int_dbz): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 thresholded_image = (1 * binary_closing((labeled_image > 0), structure=disk(3), iterations=int(conv_buffer))) (labeled_image, _) = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (region.major_axis_length < min_length): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 strat_mask = ((1 * stratiform) * binary_dilation((1 * (labeled_image > 0)), structure=disk(3), iterations=strat_buffer)) thresholded_image = ((1 * (labeled_image > 0)) + strat_mask) (labeled_image, _) = label((1 * (thresholded_image > 0)), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 2): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return regionprops(labeled_image, intensity_image=rImage, coordinates='rc')
Combines the logic of get_intense_cells, connect_intense_cells, and connect_stratiform_to_lines to return pixels associated with qualifying slices. Stratiform >= 20 (dBZ) Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract qualifying lines. strat_dbz: int Threshold used to identify stratiform pixels conv_dbz: int Threshold used to identify convective pixels int_dbz: int Threshold used to identify intense pixels min_length: int Minimum length for a qualifying merged lines conv_buffer: int Distance within which intense cells are merged (Multiply by minimum search disk radius (3) to get buffer size) min_size: int Minimum size for an intense cell to be considered in line-building process. strat_buffer: int Distance within which stratiform pixels are merged with qualifying merged lines. (Multiply by minimum search disk radius of 3 to get buffer size in km) conv_buffer: integer Distance to search for nearby intense cells. Returns ------- regions: list A list of regionprops for each qualifying slice. See scikit-image.measure.regionprops for more information.
utils/segmentation.py
get_qualifying_clusters
jthielen/svrimg47
0
python
def get_qualifying_clusters(rImage, strat_dbz, conv_dbz, int_dbz, min_length, conv_buffer, min_size=10, strat_buffer=0): 'Combines the logic of get_intense_cells,\n connect_intense_cells, and connect_stratiform_to_lines\n to return pixels associated with qualifying slices.\n\n Stratiform >= 20 (dBZ)\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract qualifying lines.\n\n strat_dbz: int\n Threshold used to identify stratiform pixels\n\n conv_dbz: int\n Threshold used to identify convective pixels\n\n int_dbz: int\n Threshold used to identify intense pixels\n\n min_length: int\n Minimum length for a qualifying merged lines\n\n conv_buffer: int\n Distance within which intense cells are merged\n (Multiply by minimum search disk radius (3) to get\n buffer size)\n\n min_size: int\n Minimum size for an intense cell to be considered in\n line-building process.\n\n strat_buffer: int\n Distance within which stratiform pixels are merged\n with qualifying merged lines.\n (Multiply by minimum search disk radius of 3\n to get buffer size in km)\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n Returns\n -------\n regions: list\n A list of regionprops for each qualifying slice.\n See scikit-image.measure.regionprops for more information.\n ' convection = (1 * (rImage >= conv_dbz)) stratiform = (1 * (rImage >= strat_dbz)) (labeled_image, _) = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < int_dbz): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 thresholded_image = (1 * binary_closing((labeled_image > 0), structure=disk(3), iterations=int(conv_buffer))) (labeled_image, _) = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (region.major_axis_length < min_length): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 strat_mask = ((1 * stratiform) * binary_dilation((1 * (labeled_image > 0)), structure=disk(3), iterations=strat_buffer)) thresholded_image = ((1 * (labeled_image > 0)) + strat_mask) (labeled_image, _) = label((1 * (thresholded_image > 0)), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 2): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return regionprops(labeled_image, intensity_image=rImage, coordinates='rc')
def get_qualifying_clusters(rImage, strat_dbz, conv_dbz, int_dbz, min_length, conv_buffer, min_size=10, strat_buffer=0): 'Combines the logic of get_intense_cells,\n connect_intense_cells, and connect_stratiform_to_lines\n to return pixels associated with qualifying slices.\n\n Stratiform >= 20 (dBZ)\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract qualifying lines.\n\n strat_dbz: int\n Threshold used to identify stratiform pixels\n\n conv_dbz: int\n Threshold used to identify convective pixels\n\n int_dbz: int\n Threshold used to identify intense pixels\n\n min_length: int\n Minimum length for a qualifying merged lines\n\n conv_buffer: int\n Distance within which intense cells are merged\n (Multiply by minimum search disk radius (3) to get\n buffer size)\n\n min_size: int\n Minimum size for an intense cell to be considered in\n line-building process.\n\n strat_buffer: int\n Distance within which stratiform pixels are merged\n with qualifying merged lines.\n (Multiply by minimum search disk radius of 3\n to get buffer size in km)\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n Returns\n -------\n regions: list\n A list of regionprops for each qualifying slice.\n See scikit-image.measure.regionprops for more information.\n ' convection = (1 * (rImage >= conv_dbz)) stratiform = (1 * (rImage >= strat_dbz)) (labeled_image, _) = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < int_dbz): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 thresholded_image = (1 * binary_closing((labeled_image > 0), structure=disk(3), iterations=int(conv_buffer))) (labeled_image, _) = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (region.major_axis_length < min_length): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 strat_mask = ((1 * stratiform) * binary_dilation((1 * (labeled_image > 0)), structure=disk(3), iterations=strat_buffer)) thresholded_image = ((1 * (labeled_image > 0)) + strat_mask) (labeled_image, _) = label((1 * (thresholded_image > 0)), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 2): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return regionprops(labeled_image, intensity_image=rImage, coordinates='rc')<|docstring|>Combines the logic of get_intense_cells, connect_intense_cells, and connect_stratiform_to_lines to return pixels associated with qualifying slices. Stratiform >= 20 (dBZ) Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract qualifying lines. strat_dbz: int Threshold used to identify stratiform pixels conv_dbz: int Threshold used to identify convective pixels int_dbz: int Threshold used to identify intense pixels min_length: int Minimum length for a qualifying merged lines conv_buffer: int Distance within which intense cells are merged (Multiply by minimum search disk radius (3) to get buffer size) min_size: int Minimum size for an intense cell to be considered in line-building process. strat_buffer: int Distance within which stratiform pixels are merged with qualifying merged lines. (Multiply by minimum search disk radius of 3 to get buffer size in km) conv_buffer: integer Distance to search for nearby intense cells. Returns ------- regions: list A list of regionprops for each qualifying slice. See scikit-image.measure.regionprops for more information.<|endoftext|>
d75df377cdb61dae65411819d4ffb1ecd988f3f1b5b6decc9f170033b0ee3aa2
def find_lines(rImage, conv_buffer, min_length=75): 'Combines the logic of get_intense_cells and\n connect_intense_cells to return pixels associated\n with qualifying merged lines.\n\n Stratiform >= 20 (dBZ)\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract qualifying lines.\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n min_length: integer\n Minimum size requirment to be considered an MCS.\n Default is 75 (km)\n\n Returns\n -------\n labeled_image: (N, M) ndarray\n Binary image of pixels in qualifying merged lines.\n Same dimensions as rImage.\n ' convection = (1 * (rImage >= 40)) (labeled_image, _) = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=10, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 10): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 thresholded_image = (1 * binary_closing((labeled_image > 0), structure=disk(3), iterations=int(conv_buffer))) (labeled_image, _) = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (region.major_axis_length < min_length): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return labeled_image
Combines the logic of get_intense_cells and connect_intense_cells to return pixels associated with qualifying merged lines. Stratiform >= 20 (dBZ) Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract qualifying lines. conv_buffer: integer Distance to search for nearby intense cells. min_length: integer Minimum size requirment to be considered an MCS. Default is 75 (km) Returns ------- labeled_image: (N, M) ndarray Binary image of pixels in qualifying merged lines. Same dimensions as rImage.
utils/segmentation.py
find_lines
jthielen/svrimg47
0
python
def find_lines(rImage, conv_buffer, min_length=75): 'Combines the logic of get_intense_cells and\n connect_intense_cells to return pixels associated\n with qualifying merged lines.\n\n Stratiform >= 20 (dBZ)\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract qualifying lines.\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n min_length: integer\n Minimum size requirment to be considered an MCS.\n Default is 75 (km)\n\n Returns\n -------\n labeled_image: (N, M) ndarray\n Binary image of pixels in qualifying merged lines.\n Same dimensions as rImage.\n ' convection = (1 * (rImage >= 40)) (labeled_image, _) = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=10, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 10): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 thresholded_image = (1 * binary_closing((labeled_image > 0), structure=disk(3), iterations=int(conv_buffer))) (labeled_image, _) = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (region.major_axis_length < min_length): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return labeled_image
def find_lines(rImage, conv_buffer, min_length=75): 'Combines the logic of get_intense_cells and\n connect_intense_cells to return pixels associated\n with qualifying merged lines.\n\n Stratiform >= 20 (dBZ)\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract qualifying lines.\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n min_length: integer\n Minimum size requirment to be considered an MCS.\n Default is 75 (km)\n\n Returns\n -------\n labeled_image: (N, M) ndarray\n Binary image of pixels in qualifying merged lines.\n Same dimensions as rImage.\n ' convection = (1 * (rImage >= 40)) (labeled_image, _) = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=10, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 10): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 thresholded_image = (1 * binary_closing((labeled_image > 0), structure=disk(3), iterations=int(conv_buffer))) (labeled_image, _) = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage, coordinates='rc') for region in regions: if (region.major_axis_length < min_length): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return labeled_image<|docstring|>Combines the logic of get_intense_cells and connect_intense_cells to return pixels associated with qualifying merged lines. Stratiform >= 20 (dBZ) Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract qualifying lines. conv_buffer: integer Distance to search for nearby intense cells. min_length: integer Minimum size requirment to be considered an MCS. Default is 75 (km) Returns ------- labeled_image: (N, M) ndarray Binary image of pixels in qualifying merged lines. Same dimensions as rImage.<|endoftext|>
57b7bd48e2462398b79bf485edeba93df84bc44c6a8eca150a76b4029f172e19
def get_discrete_intense_cells(rImage, min_size=10): 'Return regionprops and labeled image associated with intense thunderstorm cells.\n\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Labeled image of intense cells. Same dimensions as rImage.\n ' convection = np.uint8((rImage >= 40)) (labeled_image, _) = label(convection, np.ones((3, 3))) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) return (regionprops(labeled_image, intensity_image=rImage, coordinates='rc'), labeled_image)
Return regionprops and labeled image associated with intense thunderstorm cells. Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract intense cells. Returns ------- labeled_image1: (N, M) ndarray Labeled image of intense cells. Same dimensions as rImage.
utils/segmentation.py
get_discrete_intense_cells
jthielen/svrimg47
0
python
def get_discrete_intense_cells(rImage, min_size=10): 'Return regionprops and labeled image associated with intense thunderstorm cells.\n\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Labeled image of intense cells. Same dimensions as rImage.\n ' convection = np.uint8((rImage >= 40)) (labeled_image, _) = label(convection, np.ones((3, 3))) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) return (regionprops(labeled_image, intensity_image=rImage, coordinates='rc'), labeled_image)
def get_discrete_intense_cells(rImage, min_size=10): 'Return regionprops and labeled image associated with intense thunderstorm cells.\n\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Labeled image of intense cells. Same dimensions as rImage.\n ' convection = np.uint8((rImage >= 40)) (labeled_image, _) = label(convection, np.ones((3, 3))) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) return (regionprops(labeled_image, intensity_image=rImage, coordinates='rc'), labeled_image)<|docstring|>Return regionprops and labeled image associated with intense thunderstorm cells. Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract intense cells. Returns ------- labeled_image1: (N, M) ndarray Labeled image of intense cells. Same dimensions as rImage.<|endoftext|>
8f811db9b6230697415a4208b6326784742239299f0a3090225b8714d5cc55dc
def get_intense_cells(rImage, min_size=10): 'Return pixel coordinates and unique labels associated\n with intense thunderstorm cells.\n\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Labeled image of intense cells. Same dimensions as rImage.\n ' (regions, labeled_image) = get_discrete_intense_cells(rImage, min_size) labeled_image1 = np.zeros(labeled_image.shape, dtype=int) for region in regions: if (np.max(region.intensity_image) >= 10): labeled_image1 += ((labeled_image == region.label) * rImage) return labeled_image1
Return pixel coordinates and unique labels associated with intense thunderstorm cells. Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract intense cells. Returns ------- labeled_image1: (N, M) ndarray Labeled image of intense cells. Same dimensions as rImage.
utils/segmentation.py
get_intense_cells
jthielen/svrimg47
0
python
def get_intense_cells(rImage, min_size=10): 'Return pixel coordinates and unique labels associated\n with intense thunderstorm cells.\n\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Labeled image of intense cells. Same dimensions as rImage.\n ' (regions, labeled_image) = get_discrete_intense_cells(rImage, min_size) labeled_image1 = np.zeros(labeled_image.shape, dtype=int) for region in regions: if (np.max(region.intensity_image) >= 10): labeled_image1 += ((labeled_image == region.label) * rImage) return labeled_image1
def get_intense_cells(rImage, min_size=10): 'Return pixel coordinates and unique labels associated\n with intense thunderstorm cells.\n\n Convection >= 40 (dBZ)\n Intense >= 50 (dBZ)\n\n Parameters\n ----------\n rImage: (N, M) ndarray\n Radar Image from which to extract intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Labeled image of intense cells. Same dimensions as rImage.\n ' (regions, labeled_image) = get_discrete_intense_cells(rImage, min_size) labeled_image1 = np.zeros(labeled_image.shape, dtype=int) for region in regions: if (np.max(region.intensity_image) >= 10): labeled_image1 += ((labeled_image == region.label) * rImage) return labeled_image1<|docstring|>Return pixel coordinates and unique labels associated with intense thunderstorm cells. Convection >= 40 (dBZ) Intense >= 50 (dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract intense cells. Returns ------- labeled_image1: (N, M) ndarray Labeled image of intense cells. Same dimensions as rImage.<|endoftext|>
074766806ca40132c0406023cc3de1b6414b3e62ecc2c1d41064f2d911af83b9
def connect_intense_cells(int_cells, conv_buffer): 'Merge nearby intense cells if they are within a given\n convective region search radius.\n\n Parameters\n ----------\n int_cells: (N, M) ndarray\n Pixels associated with intense cells.\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Binary image of merged intense cells. Same dimensions as int_cells.\n ' return binary_closing((int_cells > 0), structure=disk(3), iterations=conv_buffer)
Merge nearby intense cells if they are within a given convective region search radius. Parameters ---------- int_cells: (N, M) ndarray Pixels associated with intense cells. conv_buffer: integer Distance to search for nearby intense cells. Returns ------- labeled_image1: (N, M) ndarray Binary image of merged intense cells. Same dimensions as int_cells.
utils/segmentation.py
connect_intense_cells
jthielen/svrimg47
0
python
def connect_intense_cells(int_cells, conv_buffer): 'Merge nearby intense cells if they are within a given\n convective region search radius.\n\n Parameters\n ----------\n int_cells: (N, M) ndarray\n Pixels associated with intense cells.\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Binary image of merged intense cells. Same dimensions as int_cells.\n ' return binary_closing((int_cells > 0), structure=disk(3), iterations=conv_buffer)
def connect_intense_cells(int_cells, conv_buffer): 'Merge nearby intense cells if they are within a given\n convective region search radius.\n\n Parameters\n ----------\n int_cells: (N, M) ndarray\n Pixels associated with intense cells.\n\n conv_buffer: integer\n Distance to search for nearby intense cells.\n\n Returns\n -------\n labeled_image1: (N, M) ndarray\n Binary image of merged intense cells. Same dimensions as int_cells.\n ' return binary_closing((int_cells > 0), structure=disk(3), iterations=conv_buffer)<|docstring|>Merge nearby intense cells if they are within a given convective region search radius. Parameters ---------- int_cells: (N, M) ndarray Pixels associated with intense cells. conv_buffer: integer Distance to search for nearby intense cells. Returns ------- labeled_image1: (N, M) ndarray Binary image of merged intense cells. Same dimensions as int_cells.<|endoftext|>
214edf0f8522d216c049895dfd36a8d350c962bb06906f33d4032ed55782b5b6
def connect_stratiform_to_lines(lines, stratiform, strat_buffer): 'Connect pixels with values of 20 dBZ or greater surrounding\n merged lines within a given stratiform search radius.\n\n Parameters\n ----------\n lines: (N, M) ndarray\n Pixels associated with merged lines.\n\n stratiform: (N, M) ndarray\n Binary image using a threshold of 20 dBZ.\n\n strat_buffer: integer\n Distance to search for stratiform pixels to\n connect to merged lines.\n\n Returns\n -------\n labeled_image: (N, M) ndarray\n Labeled image where each slice has a unique value.\n Has same dimensions as lines and stratiform.\n ' strat_mask = ((1 * stratiform) * binary_dilation((1 * (lines > 0)), structure=disk(3), iterations=strat_buffer)) thresholded_image = ((1 * (lines > 0)) + strat_mask) (labeled_image, _) = label((1 * (thresholded_image > 0)), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 2): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return labeled_image
Connect pixels with values of 20 dBZ or greater surrounding merged lines within a given stratiform search radius. Parameters ---------- lines: (N, M) ndarray Pixels associated with merged lines. stratiform: (N, M) ndarray Binary image using a threshold of 20 dBZ. strat_buffer: integer Distance to search for stratiform pixels to connect to merged lines. Returns ------- labeled_image: (N, M) ndarray Labeled image where each slice has a unique value. Has same dimensions as lines and stratiform.
utils/segmentation.py
connect_stratiform_to_lines
jthielen/svrimg47
0
python
def connect_stratiform_to_lines(lines, stratiform, strat_buffer): 'Connect pixels with values of 20 dBZ or greater surrounding\n merged lines within a given stratiform search radius.\n\n Parameters\n ----------\n lines: (N, M) ndarray\n Pixels associated with merged lines.\n\n stratiform: (N, M) ndarray\n Binary image using a threshold of 20 dBZ.\n\n strat_buffer: integer\n Distance to search for stratiform pixels to\n connect to merged lines.\n\n Returns\n -------\n labeled_image: (N, M) ndarray\n Labeled image where each slice has a unique value.\n Has same dimensions as lines and stratiform.\n ' strat_mask = ((1 * stratiform) * binary_dilation((1 * (lines > 0)), structure=disk(3), iterations=strat_buffer)) thresholded_image = ((1 * (lines > 0)) + strat_mask) (labeled_image, _) = label((1 * (thresholded_image > 0)), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 2): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return labeled_image
def connect_stratiform_to_lines(lines, stratiform, strat_buffer): 'Connect pixels with values of 20 dBZ or greater surrounding\n merged lines within a given stratiform search radius.\n\n Parameters\n ----------\n lines: (N, M) ndarray\n Pixels associated with merged lines.\n\n stratiform: (N, M) ndarray\n Binary image using a threshold of 20 dBZ.\n\n strat_buffer: integer\n Distance to search for stratiform pixels to\n connect to merged lines.\n\n Returns\n -------\n labeled_image: (N, M) ndarray\n Labeled image where each slice has a unique value.\n Has same dimensions as lines and stratiform.\n ' strat_mask = ((1 * stratiform) * binary_dilation((1 * (lines > 0)), structure=disk(3), iterations=strat_buffer)) thresholded_image = ((1 * (lines > 0)) + strat_mask) (labeled_image, _) = label((1 * (thresholded_image > 0)), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image, coordinates='rc') for region in regions: if (np.max(region.intensity_image) < 2): (ymin, xmin) = (np.min(region.coords[(:, 0)]), np.min(region.coords[(:, 1)])) (y, x) = np.where((region.intensity_image > 0)) labeled_image[((ymin + y), (xmin + x))] = 0 return labeled_image<|docstring|>Connect pixels with values of 20 dBZ or greater surrounding merged lines within a given stratiform search radius. Parameters ---------- lines: (N, M) ndarray Pixels associated with merged lines. stratiform: (N, M) ndarray Binary image using a threshold of 20 dBZ. strat_buffer: integer Distance to search for stratiform pixels to connect to merged lines. Returns ------- labeled_image: (N, M) ndarray Labeled image where each slice has a unique value. Has same dimensions as lines and stratiform.<|endoftext|>
1b1a00678fe05edcfaecca1415767f62fb8ffd78ddaa0907e0cf32ab70599cba
def get_max_region(regions, field='area'): 'Return the region from the list that maximizes the field.' max_region = regions[0] for i in range(1, len(regions)): if (regions[i][field] > max_region[field]): max_region = regions[i] return max_region
Return the region from the list that maximizes the field.
utils/segmentation.py
get_max_region
jthielen/svrimg47
0
python
def get_max_region(regions, field='area'): max_region = regions[0] for i in range(1, len(regions)): if (regions[i][field] > max_region[field]): max_region = regions[i] return max_region
def get_max_region(regions, field='area'): max_region = regions[0] for i in range(1, len(regions)): if (regions[i][field] > max_region[field]): max_region = regions[i] return max_region<|docstring|>Return the region from the list that maximizes the field.<|endoftext|>
0ab12ffb3ebf48423b6f3cf57b056b1c0a6c4594922e07f30435e608c75c573b
def test_unzipped_examples(): ' Open and parse unzipped example files ' test_path = os.path.abspath('examples/unzipped') for file_name in os.listdir(test_path): test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])
Open and parse unzipped example files
tests/test_open_examples.py
test_unzipped_examples
spon-ww/nem-reader
1
python
def test_unzipped_examples(): ' ' test_path = os.path.abspath('examples/unzipped') for file_name in os.listdir(test_path): test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])
def test_unzipped_examples(): ' ' test_path = os.path.abspath('examples/unzipped') for file_name in os.listdir(test_path): test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])<|docstring|>Open and parse unzipped example files<|endoftext|>
c536bcee9e85f8fd3d2bd8dc0374e2b725a05d8d6235cebcd6fee350aa6af730
def test_nem12_examples(): ' Open and parse zipped NEM12 example files ' skips = ['NEM12#Scenario10#ETSAMDP#NEMMCO.zip'] test_path = os.path.abspath('examples/nem12') for file_name in os.listdir(test_path): if (file_name in skips): continue test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])
Open and parse zipped NEM12 example files
tests/test_open_examples.py
test_nem12_examples
spon-ww/nem-reader
1
python
def test_nem12_examples(): ' ' skips = ['NEM12#Scenario10#ETSAMDP#NEMMCO.zip'] test_path = os.path.abspath('examples/nem12') for file_name in os.listdir(test_path): if (file_name in skips): continue test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])
def test_nem12_examples(): ' ' skips = ['NEM12#Scenario10#ETSAMDP#NEMMCO.zip'] test_path = os.path.abspath('examples/nem12') for file_name in os.listdir(test_path): if (file_name in skips): continue test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])<|docstring|>Open and parse zipped NEM12 example files<|endoftext|>
3527c4525b9802e78c1b2435d5b63a2cb99d38f81d6f819e131e6c8586d68ad1
def test_nem13_examples(): ' Open and parse zipped NEM13 example files ' test_path = os.path.abspath('examples/nem13') for file_name in os.listdir(test_path): test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])
Open and parse zipped NEM13 example files
tests/test_open_examples.py
test_nem13_examples
spon-ww/nem-reader
1
python
def test_nem13_examples(): ' ' test_path = os.path.abspath('examples/nem13') for file_name in os.listdir(test_path): test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])
def test_nem13_examples(): ' ' test_path = os.path.abspath('examples/nem13') for file_name in os.listdir(test_path): test_file = os.path.join(test_path, file_name) meter_data = nr.read_nem_file(test_file) assert (meter_data.header.version_header in ['NEM12', 'NEM13'])<|docstring|>Open and parse zipped NEM13 example files<|endoftext|>
bc98857931e56b6a5c6c03617b6a2b577aac57b5f3b0981e1b2c022db8f68d29
def do_glob_math(self, cont): 'Performs #{}-interpolation. The result is always treated as a fixed\n syntactic unit and will not be re-evaluated.\n ' cont = str(cont) if ('#{' not in cont): return cont cont = _expr_glob_re.sub(self._pound_substitute, cont) return cont
Performs #{}-interpolation. The result is always treated as a fixed syntactic unit and will not be re-evaluated.
scss/expression.py
do_glob_math
ojengwa/pyScss
1
python
def do_glob_math(self, cont): 'Performs #{}-interpolation. The result is always treated as a fixed\n syntactic unit and will not be re-evaluated.\n ' cont = str(cont) if ('#{' not in cont): return cont cont = _expr_glob_re.sub(self._pound_substitute, cont) return cont
def do_glob_math(self, cont): 'Performs #{}-interpolation. The result is always treated as a fixed\n syntactic unit and will not be re-evaluated.\n ' cont = str(cont) if ('#{' not in cont): return cont cont = _expr_glob_re.sub(self._pound_substitute, cont) return cont<|docstring|>Performs #{}-interpolation. The result is always treated as a fixed syntactic unit and will not be re-evaluated.<|endoftext|>