hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79005721c54601bee3dc334dbf4d7d9489194f1a | 398 | py | Python | labelmeutils/utils/io.py | narumiruna/labelme-utils | fd0c4e6344c01ed3bc5d040b580a4840334d2459 | [
"MIT"
] | null | null | null | labelmeutils/utils/io.py | narumiruna/labelme-utils | fd0c4e6344c01ed3bc5d040b580a4840334d2459 | [
"MIT"
] | null | null | null | labelmeutils/utils/io.py | narumiruna/labelme-utils | fd0c4e6344c01ed3bc5d040b580a4840334d2459 | [
"MIT"
] | null | null | null | import json
import numpy as np
from PIL import Image
def load_json(f):
with open(f, 'r') as fp:
return json.load(fp)
def save_json(obj, f, ensure_ascii=True, indent=None):
with open(f, 'w') as fp:
json.dump(obj, fp, ensure_ascii=ensure_ascii, indent=indent)
def load_image(f, mode='RGB'):
with Image.open(f) as image:
return np.array(image.convert(mode))
| 19.9 | 68 | 0.658291 | import json
import numpy as np
from PIL import Image
def load_json(f):
with open(f, 'r') as fp:
return json.load(fp)
def save_json(obj, f, ensure_ascii=True, indent=None):
with open(f, 'w') as fp:
json.dump(obj, fp, ensure_ascii=ensure_ascii, indent=indent)
def load_image(f, mode='RGB'):
with Image.open(f) as image:
return np.array(image.convert(mode))
| true | true |
790057e25494057a1840f7b97d2f89c06efa1c56 | 997 | py | Python | src/melange/src/soc/models/expando_base.py | MatthewWilkes/mw4068-packaging | 5c5d50eea89372e967994dac3bd8b06d25b4f0fa | [
"Apache-2.0"
] | null | null | null | src/melange/src/soc/models/expando_base.py | MatthewWilkes/mw4068-packaging | 5c5d50eea89372e967994dac3bd8b06d25b4f0fa | [
"Apache-2.0"
] | null | null | null | src/melange/src/soc/models/expando_base.py | MatthewWilkes/mw4068-packaging | 5c5d50eea89372e967994dac3bd8b06d25b4f0fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that contains base class for Melange Expando models.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
from soc.logic import dicts
class ExpandoBase(db.Expando):
"""Expando Base model.
This might later on contain general functionalities like the
ModelWithFieldAttributes model.
"""
toDict = dicts.toDict
| 26.236842 | 74 | 0.748245 |
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
from soc.logic import dicts
class ExpandoBase(db.Expando):
toDict = dicts.toDict
| true | true |
7900586390cc1d2f877f86ec4d5303f3eb4f91eb | 4,327 | py | Python | tests/layer_tests/common/utils/tf_utils.py | monroid/openvino | 8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6 | [
"Apache-2.0"
] | 2,406 | 2020-04-22T15:47:54.000Z | 2022-03-31T10:27:37.000Z | tests/layer_tests/common/utils/tf_utils.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 4,948 | 2020-04-22T15:12:39.000Z | 2022-03-31T18:45:42.000Z | tests/layer_tests/common/utils/tf_utils.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 991 | 2020-04-23T18:21:09.000Z | 2022-03-31T18:40:57.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import re
import tensorflow as tf
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_graph(model_file, output_nodes_for_freeze=None):
is_meta = os.path.splitext(model_file)[-1] == ".meta"
tf.compat.v1.reset_default_graph()
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef() if not is_meta else tf.compat.v1.MetaGraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node
for node in nodes_to_clear_device:
node.device = ""
if is_meta:
with tf.compat.v1.Session() as sess:
restorer = tf.compat.v1.train.import_meta_graph(graph_def)
restorer.restore(sess, re.sub('\.meta$', '', model_file))
graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, graph_def.graph_def, output_nodes_for_freeze)
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
def collect_tf_references(model_path, feed_dict, out_layer, output_nodes_for_freeze=None):
_feed_dict = dict()
graph = load_graph(model_path, output_nodes_for_freeze)
output_tensors_list = list()
outputs_list = list()
for input in feed_dict:
input_node = [node for node in graph.as_graph_def().node if node.name == input][0]
if input_node.op == "Placeholder":
tensor = graph.get_tensor_by_name(input + ":0")
_feed_dict[tensor] = feed_dict[input]
else:
for parrent_input in input_node.input:
in_node = [node for node in graph.as_graph_def().node if node.name == parrent_input][0]
if in_node.op in ['Const', 'Assign', 'NoOp', 'Assert']:
continue
else:
tensor = graph.get_tensor_by_name(parrent_input + ":0")
_feed_dict[tensor] = feed_dict[input]
for output in out_layer:
tensor = graph.get_tensor_by_name(output + ":0")
output_tensors_list.append(tensor)
outputs_list.append(output)
with graph.as_default():
with tf.compat.v1.Session(graph=graph) as sess:
outputs = sess.run(output_tensors_list, feed_dict=_feed_dict)
out_dict = dict(zip(outputs_list, outputs))
return out_dict
def children(op, graph):
op = graph.get_operation_by_name(op)
return set(op for out in op.outputs for op in out.consumers())
def summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):
placeholders = dict()
variables = list()
outputs = list()
graph = load_graph(model_path, output_nodes_for_freeze)
unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f']
for node in graph.as_graph_def().node:
if node.op == 'Placeholder':
node_dict = dict()
node_dict['type'] = tf.DType(node.attr['dtype'].type).name
node_dict['shape'] = str(node.attr['shape'].shape.dim).replace('\n', '').replace(' ', '').replace(
'size:', '').replace('[', '').replace(']', '')
node_dict['shape'] = tuple(map(lambda x: int(x), node_dict['shape'].split(',')))
placeholders[node.name] = node_dict
if node.op == "Variable" or node.op == "VariableV2":
variables.append(node.name)
if len(children(node.name, graph)) == 0:
if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types:
outputs.append(node.name)
result = dict()
result['inputs'] = placeholders
result['outputs'] = outputs
if reshape_net:
out_layer = list(result['inputs'].keys()) + result['outputs']
feed_dict = {}
for inputl in reshape_net:
feed_dict.update({inputl: np.ones(shape=reshape_net[inputl])})
scoring_res = collect_tf_references(model_path=model_path, feed_dict=feed_dict, out_layer=out_layer)
for layer in scoring_res:
if layer in result['inputs']:
result['inputs'][layer]['shape'] = scoring_res[layer].shape
return result
| 39.336364 | 130 | 0.645251 |
import os
import re
import tensorflow as tf
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_graph(model_file, output_nodes_for_freeze=None):
is_meta = os.path.splitext(model_file)[-1] == ".meta"
tf.compat.v1.reset_default_graph()
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef() if not is_meta else tf.compat.v1.MetaGraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node
for node in nodes_to_clear_device:
node.device = ""
if is_meta:
with tf.compat.v1.Session() as sess:
restorer = tf.compat.v1.train.import_meta_graph(graph_def)
restorer.restore(sess, re.sub('\.meta$', '', model_file))
graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, graph_def.graph_def, output_nodes_for_freeze)
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
def collect_tf_references(model_path, feed_dict, out_layer, output_nodes_for_freeze=None):
_feed_dict = dict()
graph = load_graph(model_path, output_nodes_for_freeze)
output_tensors_list = list()
outputs_list = list()
for input in feed_dict:
input_node = [node for node in graph.as_graph_def().node if node.name == input][0]
if input_node.op == "Placeholder":
tensor = graph.get_tensor_by_name(input + ":0")
_feed_dict[tensor] = feed_dict[input]
else:
for parrent_input in input_node.input:
in_node = [node for node in graph.as_graph_def().node if node.name == parrent_input][0]
if in_node.op in ['Const', 'Assign', 'NoOp', 'Assert']:
continue
else:
tensor = graph.get_tensor_by_name(parrent_input + ":0")
_feed_dict[tensor] = feed_dict[input]
for output in out_layer:
tensor = graph.get_tensor_by_name(output + ":0")
output_tensors_list.append(tensor)
outputs_list.append(output)
with graph.as_default():
with tf.compat.v1.Session(graph=graph) as sess:
outputs = sess.run(output_tensors_list, feed_dict=_feed_dict)
out_dict = dict(zip(outputs_list, outputs))
return out_dict
def children(op, graph):
op = graph.get_operation_by_name(op)
return set(op for out in op.outputs for op in out.consumers())
def summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):
placeholders = dict()
variables = list()
outputs = list()
graph = load_graph(model_path, output_nodes_for_freeze)
unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f']
for node in graph.as_graph_def().node:
if node.op == 'Placeholder':
node_dict = dict()
node_dict['type'] = tf.DType(node.attr['dtype'].type).name
node_dict['shape'] = str(node.attr['shape'].shape.dim).replace('\n', '').replace(' ', '').replace(
'size:', '').replace('[', '').replace(']', '')
node_dict['shape'] = tuple(map(lambda x: int(x), node_dict['shape'].split(',')))
placeholders[node.name] = node_dict
if node.op == "Variable" or node.op == "VariableV2":
variables.append(node.name)
if len(children(node.name, graph)) == 0:
if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types:
outputs.append(node.name)
result = dict()
result['inputs'] = placeholders
result['outputs'] = outputs
if reshape_net:
out_layer = list(result['inputs'].keys()) + result['outputs']
feed_dict = {}
for inputl in reshape_net:
feed_dict.update({inputl: np.ones(shape=reshape_net[inputl])})
scoring_res = collect_tf_references(model_path=model_path, feed_dict=feed_dict, out_layer=out_layer)
for layer in scoring_res:
if layer in result['inputs']:
result['inputs'][layer]['shape'] = scoring_res[layer].shape
return result
| true | true |
79005872d06294cf1396ae57aa3929ec0a9c194f | 1,047 | py | Python | test/test_ezsignformfield_response_compound.py | eZmaxinc/eZmax-SDK-python | 5b4d54b69db68aab8ee814a1e26460a0af03784e | [
"MIT"
] | null | null | null | test/test_ezsignformfield_response_compound.py | eZmaxinc/eZmax-SDK-python | 5b4d54b69db68aab8ee814a1e26460a0af03784e | [
"MIT"
] | null | null | null | test/test_ezsignformfield_response_compound.py | eZmaxinc/eZmax-SDK-python | 5b4d54b69db68aab8ee814a1e26460a0af03784e | [
"MIT"
] | null | null | null | """
eZmax API Definition
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.ezsignformfield_response import EzsignformfieldResponse
globals()['EzsignformfieldResponse'] = EzsignformfieldResponse
from eZmaxApi.model.ezsignformfield_response_compound import EzsignformfieldResponseCompound
class TestEzsignformfieldResponseCompound(unittest.TestCase):
"""EzsignformfieldResponseCompound unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignformfieldResponseCompound(self):
"""Test EzsignformfieldResponseCompound"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsignformfieldResponseCompound() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.846154 | 97 | 0.747851 |
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.ezsignformfield_response import EzsignformfieldResponse
globals()['EzsignformfieldResponse'] = EzsignformfieldResponse
from eZmaxApi.model.ezsignformfield_response_compound import EzsignformfieldResponseCompound
class TestEzsignformfieldResponseCompound(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignformfieldResponseCompound(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
7900588c3a101672c588d3ccad8f07e8763252df | 6,202 | py | Python | tests/test_test_metadata.py | kbh2o/slash | 532b7e3acdf46103ece5b86f21c29f9b58587289 | [
"BSD-3-Clause"
] | 70 | 2015-12-05T12:33:10.000Z | 2022-03-03T04:56:58.000Z | tests/test_test_metadata.py | kbh2o/slash | 532b7e3acdf46103ece5b86f21c29f9b58587289 | [
"BSD-3-Clause"
] | 711 | 2015-10-06T11:01:48.000Z | 2022-02-09T12:40:47.000Z | tests/test_test_metadata.py | kbh2o/slash | 532b7e3acdf46103ece5b86f21c29f9b58587289 | [
"BSD-3-Clause"
] | 37 | 2015-10-13T11:00:51.000Z | 2022-02-08T07:28:11.000Z | # pylint: disable=redefined-outer-name
from .utils import TestCase
from .utils import run_tests_assert_success
import itertools
import os
import slash
import pytest
from .utils.suite_writer import Suite
@pytest.mark.parametrize('parametrize', [True, False])
def test_class_name(suite, suite_test, test_type, parametrize):
if parametrize:
suite_test.add_parameter(num_values=3)
summary = suite.run()
for result in summary.get_all_results_for_test(suite_test):
if test_type == 'method':
assert result.test_metadata.class_name.startswith('Test')
assert '(' not in result.test_metadata.class_name
elif test_type == 'function':
assert result.test_metadata.class_name is None
else:
raise NotImplementedError() # pragma: no cover
@pytest.mark.parametrize('parametrize', [True, False])
def test_function_name(suite, suite_test, parametrize):
if parametrize:
suite_test.add_parameter(num_values=3)
summary = suite.run()
for result in summary.get_all_results_for_test(suite_test):
function_name = result.test_metadata.function_name
assert function_name.startswith('test_')
assert '.' not in result.test_metadata.function_name
assert '(' not in result.test_metadata.function_name
def test_variation(suite, suite_test):
fixture = suite.slashconf.add_fixture()
param = fixture.add_parameter() # pylint: disable=unused-variable
suite_test.depend_on_fixture(fixture)
suite_test.append_line('slash.context.result.data["variation"] = slash.context.test.__slash__.variation.values.copy()')
summary = suite.run()
for result in summary.get_all_results_for_test(suite_test):
assert len(result.data['variation']) == 1
assert fixture.name not in result.data['variation']
assert '{}.{}'.format(fixture.name, param.name) in result.data['variation']
def test_function_name_with_special_parameters(test_type):
suite = Suite()
assert len(suite) == 0 # pylint: disable=len-as-condition
suite_test = suite.add_test(type=test_type)
values = ['a.b', 'a(b']
suite_test.add_parameter(values=values)
# we can't verify result because we would not be able to parse the function properly
# TODO: this will change once we properly support variations metadata # pylint: disable=fixme
summary = suite.run(verify=False, sort=False)
for result, value in itertools.zip_longest(summary.session.results, values):
function_name = result.test_metadata.function_name
assert value not in function_name
assert '.' not in result.test_metadata.function_name
assert '(' not in result.test_metadata.function_name
assert function_name.startswith('test_')
def test_module_name_not_none_or_empty_string(suite):
for result in suite.run().session.results:
assert result.test_metadata.module_name
def test_test_index(suite):
index = None
session = suite.run().session
for index, result in enumerate(session.results):
assert result.test_metadata.test_index0 == index
assert result.test_metadata.test_index1 == index + 1
assert index > 0
def test_set_test_name(test_metadata):
assert test_metadata.file_path in str(test_metadata)
custom_name = 'some_custom_name'
test_metadata.set_test_full_name(custom_name)
assert str(test_metadata) == '<{}>'.format(custom_name)
def test_class_name_with_dot_parameters():
# pylint: disable=unused-argument
@slash.parametrize('path', ['x.y'])
def test_something(path):
pass
with slash.Session() as s: # pylint: disable=unused-variable
loader = slash.loader.Loader()
[test] = loader.get_runnables(test_something) # pylint: disable=unbalanced-tuple-unpacking
assert test.__slash__.class_name is None
def test_set_file_path(test_metadata):
file_path = '/tmp/file_path.py'
assert file_path not in test_metadata.address
test_metadata.set_file_path(file_path)
assert test_metadata.file_path == file_path
assert file_path in test_metadata.address
def test_mark_interactive(test_metadata):
test_metadata.mark_interactive()
assert test_metadata.is_interactive()
@pytest.fixture
def test_metadata(suite, suite_test):
return suite.run()[suite_test].test_metadata
class TestMetadataTest(TestCase):
loaded_tests = []
def setUp(self):
@slash.hooks.register
def tests_loaded(tests): # pylint: disable=unused-variable
TestMetadataTest.loaded_tests = tests
super(TestMetadataTest, self).setUp()
self.root = self.get_new_path()
self.filename = os.path.join(self.root, "testfile.py")
with open(self.filename, "w") as f:
f.write(_TEST_FILE_TEMPLATE)
with slash.Session() as s:
self.session = run_tests_assert_success(self.filename, session=s)
self.tests = self.loaded_tests
self.results = list(self.session.results.iter_test_results())
self.results.sort(key=lambda result: str(result.test_metadata))
def test_tests_have_correct_metadata(self):
for test, result in zip(self.tests, self.session.results.iter_test_results()):
self.assertIs(test.__slash__, result.test_metadata)
def test_simple_test_address(self):
self.assertEqual(self.results[0].test_metadata.address, "{}:T001.test_method".format(self.filename))
def test_parameterized_test_address(self):
parameterized = set(x.test_metadata.address for x in self.results[1:])
self.assertEqual(parameterized, set(
"{0}:T002.test_parameters(after:c={2},b={3},before:a={1})".format(self.filename, a, c, b)
for a, b, c in itertools.product([1, 2], [3, 4], [5, 6])))
_TEST_FILE_TEMPLATE = """
import slash
class T001(slash.Test):
def test_method(self):
pass
class T002(slash.Test):
@slash.parameters.iterate(a=[1, 2])
def before(self, a):
pass
@slash.parameters.iterate(b=[3, 4])
def test_parameters(self, b):
pass
@slash.parameters.iterate(c=[5, 6])
def after(self, c):
pass
"""
| 34.842697 | 123 | 0.704611 |
from .utils import TestCase
from .utils import run_tests_assert_success
import itertools
import os
import slash
import pytest
from .utils.suite_writer import Suite
@pytest.mark.parametrize('parametrize', [True, False])
def test_class_name(suite, suite_test, test_type, parametrize):
if parametrize:
suite_test.add_parameter(num_values=3)
summary = suite.run()
for result in summary.get_all_results_for_test(suite_test):
if test_type == 'method':
assert result.test_metadata.class_name.startswith('Test')
assert '(' not in result.test_metadata.class_name
elif test_type == 'function':
assert result.test_metadata.class_name is None
else:
raise NotImplementedError()
@pytest.mark.parametrize('parametrize', [True, False])
def test_function_name(suite, suite_test, parametrize):
if parametrize:
suite_test.add_parameter(num_values=3)
summary = suite.run()
for result in summary.get_all_results_for_test(suite_test):
function_name = result.test_metadata.function_name
assert function_name.startswith('test_')
assert '.' not in result.test_metadata.function_name
assert '(' not in result.test_metadata.function_name
def test_variation(suite, suite_test):
fixture = suite.slashconf.add_fixture()
param = fixture.add_parameter()
suite_test.depend_on_fixture(fixture)
suite_test.append_line('slash.context.result.data["variation"] = slash.context.test.__slash__.variation.values.copy()')
summary = suite.run()
for result in summary.get_all_results_for_test(suite_test):
assert len(result.data['variation']) == 1
assert fixture.name not in result.data['variation']
assert '{}.{}'.format(fixture.name, param.name) in result.data['variation']
def test_function_name_with_special_parameters(test_type):
suite = Suite()
assert len(suite) == 0
suite_test = suite.add_test(type=test_type)
values = ['a.b', 'a(b']
suite_test.add_parameter(values=values)
# TODO: this will change once we properly support variations metadata # pylint: disable=fixme
summary = suite.run(verify=False, sort=False)
for result, value in itertools.zip_longest(summary.session.results, values):
function_name = result.test_metadata.function_name
assert value not in function_name
assert '.' not in result.test_metadata.function_name
assert '(' not in result.test_metadata.function_name
assert function_name.startswith('test_')
def test_module_name_not_none_or_empty_string(suite):
for result in suite.run().session.results:
assert result.test_metadata.module_name
def test_test_index(suite):
index = None
session = suite.run().session
for index, result in enumerate(session.results):
assert result.test_metadata.test_index0 == index
assert result.test_metadata.test_index1 == index + 1
assert index > 0
def test_set_test_name(test_metadata):
assert test_metadata.file_path in str(test_metadata)
custom_name = 'some_custom_name'
test_metadata.set_test_full_name(custom_name)
assert str(test_metadata) == '<{}>'.format(custom_name)
def test_class_name_with_dot_parameters():
# pylint: disable=unused-argument
@slash.parametrize('path', ['x.y'])
def test_something(path):
pass
with slash.Session() as s: # pylint: disable=unused-variable
loader = slash.loader.Loader()
[test] = loader.get_runnables(test_something) # pylint: disable=unbalanced-tuple-unpacking
assert test.__slash__.class_name is None
def test_set_file_path(test_metadata):
file_path = '/tmp/file_path.py'
assert file_path not in test_metadata.address
test_metadata.set_file_path(file_path)
assert test_metadata.file_path == file_path
assert file_path in test_metadata.address
def test_mark_interactive(test_metadata):
test_metadata.mark_interactive()
assert test_metadata.is_interactive()
@pytest.fixture
def test_metadata(suite, suite_test):
return suite.run()[suite_test].test_metadata
class TestMetadataTest(TestCase):
loaded_tests = []
def setUp(self):
@slash.hooks.register
def tests_loaded(tests): # pylint: disable=unused-variable
TestMetadataTest.loaded_tests = tests
super(TestMetadataTest, self).setUp()
self.root = self.get_new_path()
self.filename = os.path.join(self.root, "testfile.py")
with open(self.filename, "w") as f:
f.write(_TEST_FILE_TEMPLATE)
with slash.Session() as s:
self.session = run_tests_assert_success(self.filename, session=s)
self.tests = self.loaded_tests
self.results = list(self.session.results.iter_test_results())
self.results.sort(key=lambda result: str(result.test_metadata))
def test_tests_have_correct_metadata(self):
for test, result in zip(self.tests, self.session.results.iter_test_results()):
self.assertIs(test.__slash__, result.test_metadata)
def test_simple_test_address(self):
self.assertEqual(self.results[0].test_metadata.address, "{}:T001.test_method".format(self.filename))
def test_parameterized_test_address(self):
parameterized = set(x.test_metadata.address for x in self.results[1:])
self.assertEqual(parameterized, set(
"{0}:T002.test_parameters(after:c={2},b={3},before:a={1})".format(self.filename, a, c, b)
for a, b, c in itertools.product([1, 2], [3, 4], [5, 6])))
_TEST_FILE_TEMPLATE = """
import slash
class T001(slash.Test):
def test_method(self):
pass
class T002(slash.Test):
@slash.parameters.iterate(a=[1, 2])
def before(self, a):
pass
@slash.parameters.iterate(b=[3, 4])
def test_parameters(self, b):
pass
@slash.parameters.iterate(c=[5, 6])
def after(self, c):
pass
"""
| true | true |
790058a2de53125b428bf3d765bcb497f29db152 | 4,871 | py | Python | recipe_scrapers/_abstract.py | squat-house/recipe-scrapers | 72d2f69196f95210d2ea248f3b5cb446f94fd2b2 | [
"MIT"
] | null | null | null | recipe_scrapers/_abstract.py | squat-house/recipe-scrapers | 72d2f69196f95210d2ea248f3b5cb446f94fd2b2 | [
"MIT"
] | 1 | 2022-01-08T10:49:17.000Z | 2022-01-08T10:49:30.000Z | recipe_scrapers/_abstract.py | AlexRogalskiy/recipe-scrapers | ff378b3ba4ae7ff4cbc113ca13991f887c1c70e7 | [
"MIT"
] | 1 | 2022-01-08T10:49:09.000Z | 2022-01-08T10:49:09.000Z | import inspect
from collections import OrderedDict
from json.decoder import JSONDecodeError
from typing import Optional, Tuple, Union
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from recipe_scrapers.settings import settings
from ._schemaorg import SchemaOrg
# some sites close their content for 'bots', so user-agent must be supplied
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7"
}
class AbstractScraper:
def __init__(
self,
url,
proxies: Optional[str] = None, # allows us to specify optional proxy server
timeout: Optional[
Union[float, Tuple, None]
] = None, # allows us to specify optional timeout for request
):
if settings.TEST_MODE: # when testing, we load a file
page_data = url.read()
url = "https://test.example.com/"
else:
page_data = requests.get(
url, headers=HEADERS, proxies=proxies, timeout=timeout
).content
self.soup = BeautifulSoup(page_data, "html.parser")
self.url = url
# Attempt to read Schema.org data. Gracefully fail if it raises an exception parsing the JSON.
# The scraper subclass can use BeautifulSoup to extract the information.
try:
self.schema = SchemaOrg(page_data)
except JSONDecodeError:
pass
# attach the plugins as instructed in settings.PLUGINS
for name, func in inspect.getmembers(self, inspect.ismethod):
current_method = getattr(self.__class__, name)
for plugin in reversed(settings.PLUGINS):
if plugin.should_run(self.host(), name):
current_method = plugin.run(current_method)
setattr(self.__class__, name, current_method)
@classmethod
def host(cls) -> str:
""" get the host of the url, so we can use the correct scraper """
raise NotImplementedError("This should be implemented.")
def canonical_url(self):
canonical_link = self.soup.find("link", {"rel": "canonical", "href": True})
if canonical_link:
return urljoin(self.url, canonical_link["href"])
return self.url
def title(self):
raise NotImplementedError("This should be implemented.")
def total_time(self):
""" total time it takes to preparate the recipe in minutes """
raise NotImplementedError("This should be implemented.")
def yields(self):
""" The number of servings or items in the recipe """
raise NotImplementedError("This should be implemented.")
def image(self):
raise NotImplementedError("This should be implemented.")
def nutrients(self):
raise NotImplementedError("This should be implemented.")
def language(self):
"""
Human language the recipe is written in.
May be overridden by individual scrapers.
"""
candidate_languages = OrderedDict()
html = self.soup.find("html", {"lang": True})
candidate_languages[html.get("lang")] = True
# Deprecated: check for a meta http-equiv header
# See: https://www.w3.org/International/questions/qa-http-and-lang
meta_language = (
self.soup.find(
"meta",
{
"http-equiv": lambda x: x and x.lower() == "content-language",
"content": True,
},
)
if settings.META_HTTP_EQUIV
else None
)
if meta_language:
language = meta_language.get("content").split(",", 1)[0]
if language:
candidate_languages[language] = True
# If other langs exist, remove 'en' commonly generated by HTML editors
if len(candidate_languages) > 1:
candidate_languages.pop("en", None)
# Return the first candidate language
return candidate_languages.popitem(last=False)[0]
def ingredients(self):
raise NotImplementedError("This should be implemented.")
def instructions(self):
raise NotImplementedError("This should be implemented.")
def ratings(self):
raise NotImplementedError("This should be implemented.")
def author(self):
raise NotImplementedError("This should be implemented.")
def reviews(self):
raise NotImplementedError("This should be implemented.")
def links(self):
invalid_href = {"#", ""}
links_html = self.soup.findAll("a", href=True)
return [link.attrs for link in links_html if link["href"] not in invalid_href]
def site_name(self):
meta = self.soup.find("meta", property="og:site_name")
return meta.get("content") if meta else None
| 34.302817 | 110 | 0.625539 | import inspect
from collections import OrderedDict
from json.decoder import JSONDecodeError
from typing import Optional, Tuple, Union
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from recipe_scrapers.settings import settings
from ._schemaorg import SchemaOrg
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7"
}
class AbstractScraper:
def __init__(
self,
url,
proxies: Optional[str] = None,
timeout: Optional[
Union[float, Tuple, None]
] = None,
):
if settings.TEST_MODE:
page_data = url.read()
url = "https://test.example.com/"
else:
page_data = requests.get(
url, headers=HEADERS, proxies=proxies, timeout=timeout
).content
self.soup = BeautifulSoup(page_data, "html.parser")
self.url = url
try:
self.schema = SchemaOrg(page_data)
except JSONDecodeError:
pass
for name, func in inspect.getmembers(self, inspect.ismethod):
current_method = getattr(self.__class__, name)
for plugin in reversed(settings.PLUGINS):
if plugin.should_run(self.host(), name):
current_method = plugin.run(current_method)
setattr(self.__class__, name, current_method)
@classmethod
def host(cls) -> str:
raise NotImplementedError("This should be implemented.")
def canonical_url(self):
canonical_link = self.soup.find("link", {"rel": "canonical", "href": True})
if canonical_link:
return urljoin(self.url, canonical_link["href"])
return self.url
def title(self):
raise NotImplementedError("This should be implemented.")
def total_time(self):
raise NotImplementedError("This should be implemented.")
def yields(self):
raise NotImplementedError("This should be implemented.")
def image(self):
raise NotImplementedError("This should be implemented.")
def nutrients(self):
raise NotImplementedError("This should be implemented.")
def language(self):
candidate_languages = OrderedDict()
html = self.soup.find("html", {"lang": True})
candidate_languages[html.get("lang")] = True
meta_language = (
self.soup.find(
"meta",
{
"http-equiv": lambda x: x and x.lower() == "content-language",
"content": True,
},
)
if settings.META_HTTP_EQUIV
else None
)
if meta_language:
language = meta_language.get("content").split(",", 1)[0]
if language:
candidate_languages[language] = True
if len(candidate_languages) > 1:
candidate_languages.pop("en", None)
return candidate_languages.popitem(last=False)[0]
def ingredients(self):
raise NotImplementedError("This should be implemented.")
def instructions(self):
raise NotImplementedError("This should be implemented.")
def ratings(self):
raise NotImplementedError("This should be implemented.")
def author(self):
raise NotImplementedError("This should be implemented.")
def reviews(self):
raise NotImplementedError("This should be implemented.")
def links(self):
invalid_href = {"#", ""}
links_html = self.soup.findAll("a", href=True)
return [link.attrs for link in links_html if link["href"] not in invalid_href]
def site_name(self):
meta = self.soup.find("meta", property="og:site_name")
return meta.get("content") if meta else None
| true | true |
790059ba05d19aa2cc658ad3cad7c078cf5126b7 | 1,905 | py | Python | bin/convertfavourites.py | redmond-penguin/musicplayer | 1b342b885e6e97d073b92e624d0ae5dc38f7c687 | [
"BSD-3-Clause"
] | null | null | null | bin/convertfavourites.py | redmond-penguin/musicplayer | 1b342b885e6e97d073b92e624d0ae5dc38f7c687 | [
"BSD-3-Clause"
] | null | null | null | bin/convertfavourites.py | redmond-penguin/musicplayer | 1b342b885e6e97d073b92e624d0ae5dc38f7c687 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
import argparse
scriptpath = os.path.abspath(os.path.dirname(__file__))
includepath = os.path.dirname(scriptpath)
sys.path.insert(0, includepath)
from audio.audiofilefactory import AudioFileFactory
from audio.audioconversionservice import AudioConversionService
from filesystem.filelist import FileList
parser = argparse.ArgumentParser(description="Convert music files", epilog="File types are auto-derived from the filename extensions.")
parser.add_argument("source_path", help="The source path")
parser.add_argument("destination_path", help="The destination path")
parser.add_argument("list_of_favourites", help="The list of favourites")
args = parser.parse_args()
source_path = args.source_path
destination_path = args.destination_path
list_of_favourites = args.list_of_favourites
with open(list_of_favourites) as f:
content = f.readlines()
content = [x.strip() for x in content]
factory = AudioFileFactory()
for favourite in content:
statvfs = os.statvfs(destination_path)
free_space = statvfs.f_bavail * statvfs.f_bsize
print("Space left: " + str(free_space / 1024 / 1024 / 1024) + " Gb")
if free_space < 700 * 1024 * 1024:
print("Skipping " + favourite + ", less than 700 Mb left on device (" + str(free_space / 1024 / 1024) + " Mb)")
continue
target_dir = os.path.join(destination_path, favourite)
if os.path.isdir(target_dir):
print("Skipping " + favourite + ", path already exists")
continue
os.mkdir(target_dir)
list = FileList(None, factory)
list.add_path_to_list(os.path.join(source_path, favourite))
for f in list:
source_file_path = f.get_path()
destination_file_path = os.path.join(target_dir, os.path.splitext(os.path.basename(source_file_path))[0] + ".wav")
destination_file = factory.create_file(destination_file_path)
AudioConversionService().convert_audio_file(f, destination_file)
| 43.295455 | 135 | 0.76378 |
import sys
import os
import argparse
scriptpath = os.path.abspath(os.path.dirname(__file__))
includepath = os.path.dirname(scriptpath)
sys.path.insert(0, includepath)
from audio.audiofilefactory import AudioFileFactory
from audio.audioconversionservice import AudioConversionService
from filesystem.filelist import FileList
parser = argparse.ArgumentParser(description="Convert music files", epilog="File types are auto-derived from the filename extensions.")
parser.add_argument("source_path", help="The source path")
parser.add_argument("destination_path", help="The destination path")
parser.add_argument("list_of_favourites", help="The list of favourites")
args = parser.parse_args()
source_path = args.source_path
destination_path = args.destination_path
list_of_favourites = args.list_of_favourites
with open(list_of_favourites) as f:
content = f.readlines()
content = [x.strip() for x in content]
factory = AudioFileFactory()
for favourite in content:
statvfs = os.statvfs(destination_path)
free_space = statvfs.f_bavail * statvfs.f_bsize
print("Space left: " + str(free_space / 1024 / 1024 / 1024) + " Gb")
if free_space < 700 * 1024 * 1024:
print("Skipping " + favourite + ", less than 700 Mb left on device (" + str(free_space / 1024 / 1024) + " Mb)")
continue
target_dir = os.path.join(destination_path, favourite)
if os.path.isdir(target_dir):
print("Skipping " + favourite + ", path already exists")
continue
os.mkdir(target_dir)
list = FileList(None, factory)
list.add_path_to_list(os.path.join(source_path, favourite))
for f in list:
source_file_path = f.get_path()
destination_file_path = os.path.join(target_dir, os.path.splitext(os.path.basename(source_file_path))[0] + ".wav")
destination_file = factory.create_file(destination_file_path)
AudioConversionService().convert_audio_file(f, destination_file)
| true | true |
79005b6f2e93b9c66058376da6758f50e42b6ef9 | 801 | py | Python | example/example/urls.py | lambdalisue/django-codemirror-widget | 23c81b41e59bfe81ea5bfce5dd78e6d93b97d2aa | [
"MIT"
] | 39 | 2015-03-22T21:57:28.000Z | 2021-11-04T08:17:15.000Z | example/example/urls.py | lambdalisue/django-codemirror-widget | 23c81b41e59bfe81ea5bfce5dd78e6d93b97d2aa | [
"MIT"
] | 13 | 2015-09-09T05:14:11.000Z | 2020-03-10T17:00:25.000Z | example/example/urls.py | lambdalisue/django-codemirror-widget | 23c81b41e59bfe81ea5bfce5dd78e6d93b97d2aa | [
"MIT"
] | 17 | 2015-09-08T15:52:15.000Z | 2020-02-28T03:20:02.000Z | """example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]
| 34.826087 | 77 | 0.702871 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]
| true | true |
79005b903abc8821d56ee3f0a97a1aa520676826 | 68,903 | py | Python | src/sage_docbuild/__init__.py | sensen1/sage | d6c5cd9be78cc448ee4c54bac93385b1244a234c | [
"BSL-1.0"
] | 1 | 2021-03-15T21:45:56.000Z | 2021-03-15T21:45:56.000Z | src/sage_docbuild/__init__.py | sensen1/sage | d6c5cd9be78cc448ee4c54bac93385b1244a234c | [
"BSL-1.0"
] | null | null | null | src/sage_docbuild/__init__.py | sensen1/sage | d6c5cd9be78cc448ee4c54bac93385b1244a234c | [
"BSL-1.0"
] | null | null | null | """
The documentation builder
It is the starting point for building documentation, and is
responsible to figure out what to build and with which options. The
actual documentation build for each individual document is then done
in a subprocess call to sphinx, see :func:`builder_helper`.
* The builder can be configured in build_options.py
* The sphinx subprocesses are configured in conf.py
"""
# ****************************************************************************
# Copyright (C) 2008-2009 Mike Hansen <mhansen@gmail.com>
# 2009-2010 Mitesh Patel <qed777@gmail.com>
# 2009-2015 J. H. Palmieri <palmieri@math.washington.edu>
# 2009 Carl Witty <cwitty@newtonlabs.com>
# 2010-2017 Jeroen Demeyer <jdemeyer@cage.ugent.be>
# 2012 William Stein <wstein@gmail.com>
# 2012-2014 Nicolas M. Thiery <nthiery@users.sf.net>
# 2012-2015 André Apitzsch <andre.apitzsch@etit.tu-chemnitz.de>
# 2012 Florent Hivert <Florent.Hivert@univ-rouen.fr>
# 2013-2014 Volker Braun <vbraun.name@gmail.com>
# 2013 R. Andrew Ohana <andrew.ohana@gmail.com>
# 2015 Thierry Monteil <sage@lma.metelu.net>
# 2015 Marc Mezzarobba <marc@mezzarobba.net>
# 2015 Travis Scrimshaw <tscrim at ucdavis.edu>
# 2016-2017 Frédéric Chapoton <chapoton@math.univ-lyon1.fr>
# 2016 Erik M. Bray <erik.bray@lri.fr>
# 2017 Kwankyu Lee <ekwankyu@gmail.com>
# 2017 François Bissey <frp.bissey@gmail.com>
# 2018 Julian Rüth <julian.rueth@fsfe.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
import logging
import optparse
import os
import pickle
import re
import shutil
import subprocess
import sys
import time
import types
import warnings
logger = logging.getLogger(__name__)
import sphinx.util.console
import sphinx.ext.intersphinx
import sage.all
from sage.misc.cachefunc import cached_method
from sage.misc.misc import sage_makedirs
from sage.env import SAGE_DOC_SRC, SAGE_DOC, SAGE_SRC, DOT_SAGE
from .build_options import (LANGUAGES, SPHINXOPTS, PAPER, OMIT,
PAPEROPTS, ALLSPHINXOPTS, NUM_THREADS, WEBSITESPHINXOPTS,
INCREMENTAL_BUILD, ABORT_ON_ERROR)
##########################################
# Parallel Building Ref Manual #
##########################################
def build_ref_doc(args):
doc = args[0]
lang = args[1]
format = args[2]
kwds = args[3]
args = args[4:]
if format == 'inventory': # you must not use the inventory to build the inventory
kwds['use_multidoc_inventory'] = False
getattr(ReferenceSubBuilder(doc, lang), format)(*args, **kwds)
##########################################
# Builders #
##########################################
def builder_helper(type):
"""
Returns a function which builds the documentation for
output type ``type``.
TESTS:
Check that :trac:`25161` has been resolved::
sage: from sage_docbuild import DocBuilder, setup_parser
sage: DocBuilder._options = setup_parser().parse_args([])[0] # builder_helper needs _options to be set
sage: import sage_docbuild.sphinxbuild
sage: def raiseBaseException():
....: raise BaseException("abort pool operation")
sage: original_runsphinx, sage_docbuild.sphinxbuild.runsphinx = sage_docbuild.sphinxbuild.runsphinx, raiseBaseException
sage: from sage_docbuild import builder_helper, build_ref_doc
sage: from sage_docbuild import _build_many as build_many
sage: helper = builder_helper("html")
sage: try:
....: build_many(build_ref_doc, [("docname", "en", "html", {})])
....: except Exception as E:
....: "Non-exception during docbuild: abort pool operation" in str(E)
True
"""
def f(self, *args, **kwds):
output_dir = self._output_dir(type)
options = ALLSPHINXOPTS
if self.name == 'website':
# WEBSITESPHINXOPTS is either empty or " -A hide_pdf_links=1 "
options += WEBSITESPHINXOPTS
if kwds.get('use_multidoc_inventory', True):
options += ' -D multidoc_first_pass=0'
else:
options += ' -D multidoc_first_pass=1'
build_command = '-b %s -d %s %s %s %s'%(type, self._doctrees_dir(),
options, self.dir,
output_dir)
logger.debug(build_command)
# Run Sphinx with Sage's special logger
sys.argv = ["sphinx-build"] + build_command.split()
from .sphinxbuild import runsphinx
try:
runsphinx()
except Exception:
if ABORT_ON_ERROR:
raise
except BaseException as e:
# We need to wrap a BaseException that is not an Exception in a
# regular Exception. Otherwise multiprocessing.Pool.get hangs, see
# #25161
if ABORT_ON_ERROR:
raise Exception("Non-exception during docbuild: %s"%(e,), e)
if "/latex" in output_dir:
logger.warning("LaTeX file written to {}".format(output_dir))
else:
logger.warning(
"Build finished. The built documents can be found in {}".
format(output_dir))
f.is_output_format = True
return f
class DocBuilder(object):
def __init__(self, name, lang='en'):
"""
INPUT:
- ``name`` - the name of a subdirectory in SAGE_DOC_SRC, such as
'tutorial' or 'bordeaux_2008'
- ``lang`` - (default "en") the language of the document.
"""
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = os.path.join(*doc)
self.lang = lang
self.dir = os.path.join(SAGE_DOC_SRC, self.lang, self.name)
def _output_dir(self, type):
"""
Returns the directory where the output of type ``type`` is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b._output_dir('html')
'.../html/en/tutorial'
"""
d = os.path.join(SAGE_DOC, type, self.lang, self.name)
sage_makedirs(d)
return d
def _doctrees_dir(self):
"""
Returns the directory where the doctrees are stored. If the
directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b._doctrees_dir()
'.../doctrees/en/tutorial'
"""
d = os.path.join(SAGE_DOC, 'doctrees', self.lang, self.name)
sage_makedirs(d)
return d
def _output_formats(self):
"""
Returns a list of the possible output formats.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b._output_formats()
['changes', 'html', 'htmlhelp', 'inventory', 'json', 'latex', 'linkcheck', 'pickle', 'web']
"""
#Go through all the attributes of self and check to
#see which ones have an 'is_output_format' attribute. These
#are the ones created with builder_helper.
output_formats = []
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
output_formats.append(attr)
output_formats.sort()
return output_formats
def pdf(self):
"""
Builds the PDF files for this document. This is done by first
(re)-building the LaTeX output, going into that LaTeX
directory, and running 'make all-pdf' there.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b.pdf() #not tested
"""
self.latex()
tex_dir = self._output_dir('latex')
pdf_dir = self._output_dir('pdf')
if self.name == 'reference':
# recover maths in tex, undoing what Sphinx did (trac #29993)
tex_file = os.path.join(tex_dir, 'reference.tex')
with open(tex_file) as f:
ref = f.read()
ref = re.sub(r'\\textbackslash{}', r'\\', ref)
ref = re.sub(r'\\textbackslash{}', r'\\', ref)
ref = re.sub(r'\\{', r'{', ref)
ref = re.sub(r'\\}', r'}', ref)
ref = re.sub(r'\\_', r'_', ref)
ref = re.sub(r'\\textasciicircum{}', r'^', ref)
with open(tex_file, 'w') as f:
f.write(ref)
make_target = "cd '%s' && $MAKE %s && mv -f *.pdf '%s'"
error_message = "failed to run $MAKE %s in %s"
command = 'all-pdf'
if subprocess.call(make_target%(tex_dir, command, pdf_dir), shell=True):
raise RuntimeError(error_message%(command, tex_dir))
logger.warning("Build finished. The built documents can be found in %s", pdf_dir)
def clean(self, *args):
shutil.rmtree(self._doctrees_dir())
output_formats = list(args) if args else self._output_formats()
for format in output_formats:
shutil.rmtree(self._output_dir(format), ignore_errors=True)
html = builder_helper('html')
pickle = builder_helper('pickle')
web = pickle
json = builder_helper('json')
htmlhelp = builder_helper('htmlhelp')
latex = builder_helper('latex')
changes = builder_helper('changes')
linkcheck = builder_helper('linkcheck')
# import the customized builder for object.inv files
inventory = builder_helper('inventory')
from .utils import build_many as _build_many
def build_many(target, args, processes=None):
"""
Thin wrapper around `sage_docbuild.utils.build_many` which uses the
docbuild settings ``NUM_THREADS`` and ``ABORT_ON_ERROR``.
"""
if processes is None:
processes = NUM_THREADS
try:
_build_many(target, args, processes=processes)
except BaseException as exc:
if ABORT_ON_ERROR:
raise
##########################################
# Parallel Building Ref Manual #
##########################################
def build_other_doc(args):
document = args[0]
name = args[1]
kwds = args[2]
args = args[3:]
logger.warning("\nBuilding %s.\n" % document)
getattr(get_builder(document), name)(*args, **kwds)
class AllBuilder(object):
"""
A class used to build all of the documentation.
"""
def __getattr__(self, attr):
"""
For any attributes not explicitly defined, we just go through
all of the documents and call their attr. For example,
'AllBuilder().json()' will go through all of the documents
and call the json() method on their builders.
"""
from functools import partial
return partial(self._wrapper, attr)
def _wrapper(self, name, *args, **kwds):
"""
This is the function which goes through all of the documents
and does the actual building.
"""
start = time.time()
docs = self.get_all_documents()
refs = [x for x in docs if x.endswith('reference')]
others = [x for x in docs if not x.endswith('reference')]
# Build the reference manual twice to resolve references. That is,
# build once with the inventory builder to construct the intersphinx
# inventory files, and then build the second time for real. So the
# first build should be as fast as possible;
logger.warning("\nBuilding reference manual, first pass.\n")
for document in refs:
getattr(get_builder(document), 'inventory')(*args, **kwds)
logger.warning("Building reference manual, second pass.\n")
sage_makedirs(os.path.join(SAGE_DOC, "html", "en", "reference", "_static"))
for document in refs:
getattr(get_builder(document), name)(*args, **kwds)
# build the other documents in parallel
L = [(doc, name, kwds) + args for doc in others]
# Trac #31344: Work around crashes from multiprocessing
if sys.platform == 'darwin':
for target in L:
build_other_doc(target)
else:
build_many(build_other_doc, L)
logger.warning("Elapsed time: %.1f seconds."%(time.time()-start))
logger.warning("Done building the documentation!")
def get_all_documents(self):
"""
Returns a list of all of the documents. A document is a directory within one of
the language subdirectories of SAGE_DOC_SRC specified by the global LANGUAGES
variable.
EXAMPLES::
sage: from sage_docbuild import AllBuilder
sage: documents = AllBuilder().get_all_documents()
sage: 'en/tutorial' in documents
True
sage: documents[0] == 'en/reference'
True
"""
documents = []
for lang in LANGUAGES:
for document in os.listdir(os.path.join(SAGE_DOC_SRC, lang)):
if (document not in OMIT
and os.path.isdir(os.path.join(SAGE_DOC_SRC, lang, document))):
documents.append(os.path.join(lang, document))
# Ensure that the reference guide is compiled first so that links from
# the other documents to it are correctly resolved.
if 'en/reference' in documents:
documents.remove('en/reference')
documents.insert(0, 'en/reference')
return documents
class WebsiteBuilder(DocBuilder):
def html(self):
"""
After we've finished building the website index page, we copy
everything one directory up. Then we call
:meth:`create_html_redirects`.
"""
DocBuilder.html(self)
html_output_dir = self._output_dir('html')
for f in os.listdir(html_output_dir):
src = os.path.join(html_output_dir, f)
dst = os.path.join(html_output_dir, '..', f)
if os.path.isdir(src):
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
def create_html_redirects(self):
"""
Writes a number of small HTML files; these are files which used to
contain the main content of the reference manual before splitting the
manual into multiple documents. After the split, those files have
moved, so in each old location, write a file which redirects to the new
version. (This is so old URLs to pieces of the reference manual still
open the correct files.)
"""
from sage.misc.superseded import deprecation
deprecation(29993, "This method was created in trac #6495 for backward compatibility. Not necessary anymore.")
# The simple html template which will cause a redirect to the correct file.
html_template = """<html><head>
<meta HTTP-EQUIV="REFRESH" content="0; url=%s">
</head><body></body></html>"""
reference_dir = os.path.abspath(os.path.join(self._output_dir('html'),
'..', 'reference'))
reference_builder = ReferenceBuilder('reference')
refdir = os.path.join(SAGE_DOC_SRC, 'en', 'reference')
for document in reference_builder.get_all_documents(refdir):
# path is the directory above reference dir
path = os.path.abspath(os.path.join(reference_dir, '..'))
# the name of the subdocument
document_name = document.split('/')[1]
# the sage directory within a subdocument, for example
# local/share/doc/sage/html/en/reference/algebras/sage
sage_directory = os.path.join(path, document, 'sage')
# Walk through all of the files in the sage_directory
for dirpath, dirnames, filenames in os.walk(sage_directory):
# a string like reference/algebras/sage/algebras
short_path = dirpath[len(path)+1:]
# a string like sage/algebras
shorter_path = os.path.join(*short_path.split(os.sep)[2:])
# make the shorter path directory
try:
os.makedirs(os.path.join(reference_dir, shorter_path))
except OSError:
pass
for filename in filenames:
if not filename.endswith('html'):
continue
# the name of the html file we are going to create
redirect_filename = os.path.join(reference_dir, shorter_path, filename)
# the number of levels up we need to use in the relative url
levels_up = len(shorter_path.split(os.sep))
# the relative url that we will redirect to
redirect_url = "/".join(['..']*levels_up + [document_name, shorter_path, filename])
# write the html file which performs the redirect
with open(redirect_filename, 'w') as f:
print(redirect_filename)
f.write(html_template % redirect_url)
def clean(self):
"""
When we clean the output for the website index, we need to
remove all of the HTML that were placed in the parent
directory.
"""
html_output_dir = self._output_dir('html')
parent_dir = os.path.realpath(os.path.join(html_output_dir, '..'))
for filename in os.listdir(html_output_dir):
parent_filename = os.path.join(parent_dir, filename)
if not os.path.exists(parent_filename):
continue
if os.path.isdir(parent_filename):
shutil.rmtree(parent_filename, ignore_errors=True)
else:
os.unlink(parent_filename)
DocBuilder.clean(self)
class ReferenceBuilder(AllBuilder):
"""
This class builds the reference manual. It uses DocBuilder to
build the top-level page and ReferenceSubBuilder for each
sub-component.
"""
def __init__(self, name, lang='en'):
"""
Records the reference manual's name, in case it's not
identical to 'reference'.
"""
AllBuilder.__init__(self)
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = doc[0]
self.lang = lang
def _output_dir(self, type, lang='en'):
"""
Return the directory where the output of type ``type`` is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: from sage_docbuild import ReferenceBuilder
sage: b = ReferenceBuilder('reference')
sage: b._output_dir('html')
'.../html/en/reference'
"""
d = os.path.join(SAGE_DOC, type, lang, self.name)
sage_makedirs(d)
return d
def _refdir(self, lang):
return os.path.join(SAGE_DOC_SRC, lang, self.name)
def _build_bibliography(self, lang, format, *args, **kwds):
"""
Build the bibliography only
The bibliography references.aux is referenced by the other
manuals and needs to be built first.
"""
refdir = self._refdir(lang)
references = [
(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)
if doc == 'reference/references'
]
build_many(build_ref_doc, references)
def _build_everything_except_bibliography(self, lang, format, *args, **kwds):
"""
Build the entire reference manual except the bibliography
"""
refdir = self._refdir(lang)
non_references = [
(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)
if doc != 'reference/references'
]
build_many(build_ref_doc, non_references)
def _wrapper(self, format, *args, **kwds):
"""
Builds reference manuals. For each language, it builds the
top-level document and its components.
"""
for lang in LANGUAGES:
refdir = self._refdir(lang)
if not os.path.exists(refdir):
continue
logger.info('Building bibliography')
self._build_bibliography(lang, format, *args, **kwds)
logger.info('Bibliography finished, building dependent manuals')
self._build_everything_except_bibliography(lang, format, *args, **kwds)
# The html refman must be build at the end to ensure correct
# merging of indexes and inventories.
# Sphinx is run here in the current process (not in a
# subprocess) and the IntersphinxCache gets populated to be
# used for the second pass of the reference manual and for
# the other documents.
getattr(DocBuilder(self.name, lang), format)(*args, **kwds)
# PDF: we need to build master index file which lists all
# of the PDF file. So we create an html file, based on
# the file index.html from the "website" target.
if format == 'pdf':
# First build the website page. This only takes a few seconds.
getattr(get_builder('website'), 'html')()
website_dir = os.path.join(SAGE_DOC, 'html', 'en', 'website')
output_dir = self._output_dir(format, lang)
# Install in output_dir a symlink to the directory containing static files.
try:
os.symlink(os.path.join(website_dir, '_static'), os.path.join(output_dir, '_static'))
except FileExistsError:
pass
# Now modify website's index.html page and write it to
# output_dir.
with open(os.path.join(website_dir, 'index.html')) as f:
html = f.read().replace('Documentation', 'Reference')
html_output_dir = os.path.dirname(website_dir)
html = html.replace('http://www.sagemath.org',
os.path.join(html_output_dir, 'index.html'))
# From index.html, we want the preamble and the tail.
html_end_preamble = html.find('<h1>Sage Reference')
html_bottom = html.rfind('</table>') + len('</table>')
# For the content, we modify doc/en/reference/index.rst, which
# has two parts: the body and the table of contents.
with open(os.path.join(SAGE_DOC_SRC, lang, 'reference', 'index.rst')) as f:
rst = f.read()
# Get rid of todolist and miscellaneous rst markup.
rst = rst.replace('.. _reference-manual:\n\n', '')
rst = re.sub(r'\\\\', r'\\', rst)
# Replace rst links with html links. There are three forms:
#
# `blah`__ followed by __ LINK
#
# `blah <LINK>`_
#
# :doc:`blah <module/index>`
#
# Change the first and the second forms to
#
# <a href="LINK">blah</a>
#
# Change the third form to
#
# <a href="module/module.pdf">blah <img src="_static/pdf.png" /></a>
#
rst = re.sub(r'`([^`\n]*)`__.*\n\n__ (.*)',
r'<a href="\2">\1</a>.', rst)
rst = re.sub(r'`([^<\n]*)\s+<(.*)>`_',
r'<a href="\2">\1</a>', rst)
rst = re.sub(r':doc:`([^<]*?)\s+<(.*)/index>`',
r'<a href="\2/\2.pdf">\1 <img src="_static/pdf.png"/></a>', rst)
# Body: add paragraph <p> markup.
start = rst.rfind('*\n') + 1
end = rst.find('\nUser Interfaces')
rst_body = rst[start:end]
rst_body = rst_body.replace('\n\n', '</p>\n<p>')
# TOC: don't include the indices
start = rst.find('\nUser Interfaces')
end = rst.find('Indices and Tables')
rst_toc = rst[start:end]
# change * to <li>; change rst headers to html headers
rst_toc = re.sub(r'\*(.*)\n',
r'<li>\1</li>\n', rst_toc)
rst_toc = re.sub(r'\n([A-Z][a-zA-Z, ]*)\n[=]*\n',
r'</ul>\n\n\n<h2>\1</h2>\n\n<ul>\n', rst_toc)
rst_toc = re.sub(r'\n([A-Z][a-zA-Z, ]*)\n[-]*\n',
r'</ul>\n\n\n<h3>\1</h3>\n\n<ul>\n', rst_toc)
# now write the file.
with open(os.path.join(output_dir, 'index.html'), 'w') as new_index:
new_index.write(html[:html_end_preamble])
new_index.write('<h1> Sage Reference Manual (PDF version)'+ '</h1>')
new_index.write(rst_body)
new_index.write('<ul>')
new_index.write(rst_toc)
new_index.write('</ul>\n\n')
new_index.write(html[html_bottom:])
logger.warning('''
PDF documents have been created in subdirectories of
%s
Alternatively, you can open
%s
for a webpage listing all of the documents.''' % (output_dir,
os.path.join(output_dir,
'index.html')))
def get_all_documents(self, refdir):
"""
Returns a list of all reference manual components to build.
We add a component name if it's a subdirectory of the manual's
directory and contains a file named 'index.rst'.
We return the largest component (most subdirectory entries)
first since they will take the longest to build.
EXAMPLES::
sage: from sage_docbuild import ReferenceBuilder
sage: b = ReferenceBuilder('reference')
sage: refdir = os.path.join(os.environ['SAGE_DOC_SRC'], 'en', b.name)
sage: sorted(b.get_all_documents(refdir))
['reference/algebras',
'reference/arithgroup',
...,
'reference/valuations']
"""
documents = []
for doc in os.listdir(refdir):
directory = os.path.join(refdir, doc)
if os.path.exists(os.path.join(directory, 'index.rst')):
n = len(os.listdir(directory))
documents.append((-n, os.path.join(self.name, doc)))
return [ doc[1] for doc in sorted(documents) ]
class ReferenceSubBuilder(DocBuilder):
"""
This class builds sub-components of the reference manual. It is
responsible for making sure that the auto generated reST files for the
Sage library are up to date.
When building any output, we must first go through and check
to see if we need to update any of the autogenerated reST
files. There are two cases where this would happen:
1. A new module gets added to one of the toctrees.
2. The actual module gets updated and possibly contains a new
title.
"""
def __init__(self, *args, **kwds):
DocBuilder.__init__(self, *args, **kwds)
self._wrap_builder_helpers()
def _wrap_builder_helpers(self):
from functools import partial, update_wrapper
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
f = partial(self._wrapper, attr)
f.is_output_format = True
update_wrapper(f, getattr(self, attr))
setattr(self, attr, f)
def _wrapper(self, build_type, *args, **kwds):
"""
This is the wrapper around the builder_helper methods that
goes through and makes sure things are up to date.
"""
# Force regeneration of all modules if the inherited
# and/or underscored members options have changed.
cache = self.get_cache()
force = False
try:
if (cache['option_inherited'] != self._options.inherited or
cache['option_underscore'] != self._options.underscore):
logger.info("Detected change(s) in inherited and/or underscored members option(s).")
force = True
except KeyError:
force = True
cache['option_inherited'] = self._options.inherited
cache['option_underscore'] = self._options.underscore
self.save_cache()
# After "sage -clone", refresh the reST file mtimes in
# environment.pickle.
if self._options.update_mtimes:
logger.info("Checking for reST file mtimes to update...")
self.update_mtimes()
if force:
# Write reST files for all modules from scratch.
self.clean_auto()
for module_name in self.get_all_included_modules():
self.write_auto_rest_file(module_name)
else:
# Write reST files for new and updated modules.
for module_name in self.get_new_and_updated_modules():
self.write_auto_rest_file(module_name)
# Copy over the custom reST files from _sage
_sage = os.path.join(self.dir, '_sage')
if os.path.exists(_sage):
logger.info("Copying over custom reST files from %s ...", _sage)
shutil.copytree(_sage, os.path.join(self.dir, 'sage'))
getattr(DocBuilder, build_type)(self, *args, **kwds)
def cache_filename(self):
"""
Return the filename where the pickle of the reference cache
is stored.
"""
return os.path.join(self._doctrees_dir(), 'reference.pickle')
@cached_method
def get_cache(self):
"""
Retrieve the reference cache which contains the options previously used
by the reference builder.
If it doesn't exist, then we just return an empty dictionary. If it
is corrupted, return an empty dictionary.
"""
filename = self.cache_filename()
if not os.path.exists(filename):
return {}
with open(self.cache_filename(), 'rb') as file:
try:
cache = pickle.load(file)
except Exception:
logger.debug("Cache file '%s' is corrupted; ignoring it..." % filename)
cache = {}
else:
logger.debug("Loaded the reference cache: %s", filename)
return cache
def save_cache(self):
"""
Pickle the current reference cache for later retrieval.
"""
cache = self.get_cache()
with open(self.cache_filename(), 'wb') as file:
pickle.dump(cache, file)
logger.debug("Saved the reference cache: %s", self.cache_filename())
def get_sphinx_environment(self):
"""
Returns the Sphinx environment for this project.
"""
from sphinx.environment import BuildEnvironment
class FakeConfig(object):
values = tuple()
class FakeApp(object):
def __init__(self, dir):
self.srcdir = dir
self.config = FakeConfig()
env_pickle = os.path.join(self._doctrees_dir(), 'environment.pickle')
try:
with open(env_pickle, 'rb') as f:
env = pickle.load(f)
env.app = FakeApp(self.dir)
env.config.values = env.app.config.values
logger.debug("Opened Sphinx environment: %s", env_pickle)
return env
except IOError as err:
logger.debug("Failed to open Sphinx environment: %s", err)
def update_mtimes(self):
"""
Updates the modification times for reST files in the Sphinx
environment for this project.
"""
env = self.get_sphinx_environment()
if env is not None:
for doc in env.all_docs:
env.all_docs[doc] = time.time()
logger.info("Updated %d reST file mtimes", len(env.all_docs))
# This is the only place we need to save (as opposed to
# load) Sphinx's pickle, so we do it right here.
env_pickle = os.path.join(self._doctrees_dir(),
'environment.pickle')
# When cloning a new branch (see
# SAGE_LOCAL/bin/sage-clone), we hard link the doc output.
# To avoid making unlinked, potentially inconsistent
# copies of the environment, we *don't* use
# env.topickle(env_pickle), which first writes a temporary
# file. We adapt sphinx.environment's
# BuildEnvironment.topickle:
# remove unpicklable attributes
env.set_warnfunc(None)
del env.config.values
with open(env_pickle, 'wb') as picklefile:
# remove potentially pickling-problematic values from config
for key, val in vars(env.config).items():
if key.startswith('_') or isinstance(val, (types.ModuleType,
types.FunctionType,
type)):
del env.config[key]
pickle.dump(env, picklefile, pickle.HIGHEST_PROTOCOL)
logger.debug("Saved Sphinx environment: %s", env_pickle)
def get_modified_modules(self):
"""
Returns an iterator for all the modules that have been modified
since the documentation was last built.
"""
env = self.get_sphinx_environment()
if env is None:
logger.debug("Stopped check for modified modules.")
return
try:
added, changed, removed = env.get_outdated_files(False)
logger.info("Sphinx found %d modified modules", len(changed))
except OSError as err:
logger.debug("Sphinx failed to determine modified modules: %s", err)
return
for name in changed:
# Only pay attention to files in a directory sage/... In
# particular, don't treat a file like 'sagetex.rst' in
# doc/en/reference/misc as an autogenerated file: see
# #14199.
if name.startswith('sage' + os.sep):
yield name
def print_modified_modules(self):
"""
Prints a list of all the modules that have been modified since
the documentation was last built.
"""
for module_name in self.get_modified_modules():
print(module_name)
def get_all_rst_files(self, exclude_sage=True):
"""
Returns an iterator for all rst files which are not
autogenerated.
"""
for directory, subdirs, files in os.walk(self.dir):
if exclude_sage and directory.startswith(os.path.join(self.dir, 'sage')):
continue
for filename in files:
if not filename.endswith('.rst'):
continue
yield os.path.join(directory, filename)
def get_all_included_modules(self):
"""
Returns an iterator for all modules which are included in the
reference manual.
"""
for filename in self.get_all_rst_files():
for module in self.get_modules(filename):
yield module
def get_new_and_updated_modules(self):
"""
Return an iterator for all new and updated modules that appear in
the toctrees, and remove obsolete old modules.
"""
env = self.get_sphinx_environment()
if env is None:
all_docs = {}
else:
all_docs = env.all_docs
new_modules = []
updated_modules = []
old_modules = []
for module_name in self.get_all_included_modules():
docname = module_name.replace('.', os.path.sep)
if docname not in all_docs:
new_modules.append(module_name)
yield module_name
continue
# get the modification timestamp of the reST doc for the module
mtime = all_docs[docname]
try:
with warnings.catch_warnings():
# primarily intended to ignore deprecation warnings
warnings.simplefilter("ignore")
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
raise
module_filename = sys.modules[module_name].__file__
if (module_filename.endswith('.pyc') or module_filename.endswith('.pyo')):
source_filename = module_filename[:-1]
if (os.path.exists(source_filename)): module_filename = source_filename
newtime = os.path.getmtime(module_filename)
if newtime > mtime:
updated_modules.append(module_name)
yield module_name
else: # keep good old module
old_modules.append(module_name)
removed_modules = []
for docname in all_docs.keys():
if docname.startswith('sage' + os.path.sep):
module_name = docname.replace(os.path.sep, '.')
if not (module_name in old_modules or module_name in updated_modules):
try:
os.remove(os.path.join(self.dir, docname) + '.rst')
except OSError: # already removed
pass
logger.debug("Deleted auto-generated reST file %s".format(docname))
removed_modules.append(module_name)
logger.info("Found %d new modules", len(new_modules))
logger.info("Found %d updated modules", len(updated_modules))
logger.info("Removed %d obsolete modules", len(removed_modules))
def print_new_and_updated_modules(self):
"""
Print all the modules that appear in the toctrees that
are newly included or updated.
"""
for module_name in self.get_new_and_updated_modules():
print(module_name)
def get_modules(self, filename):
"""
Given a filename for a reST file, return an iterator for
all of the autogenerated reST files that it includes.
"""
# Create the regular expression used to detect an autogenerated file
auto_re = re.compile(r'^\s*(..\/)*(sage(nb)?\/[\w\/]*)\s*$')
# Read the lines
with open(filename) as f:
lines = f.readlines()
for line in lines:
match = auto_re.match(line)
if match:
yield match.group(2).replace(os.path.sep, '.')
def get_module_docstring_title(self, module_name):
"""
Returns the title of the module from its docstring.
"""
#Try to import the module
try:
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
return "UNABLE TO IMPORT MODULE"
module = sys.modules[module_name]
#Get the docstring
doc = module.__doc__
if doc is None:
doc = module.doc if hasattr(module, 'doc') else ""
#Extract the title
i = doc.find('\n')
if i != -1:
return doc[i+1:].lstrip().splitlines()[0]
else:
return doc
def auto_rest_filename(self, module_name):
"""
Returns the name of the file associated to a given module
EXAMPLES::
sage: from sage_docbuild import ReferenceSubBuilder
sage: ReferenceSubBuilder("reference").auto_rest_filename("sage.combinat.partition")
'.../doc/en/reference/sage/combinat/partition.rst'
"""
return self.dir + os.path.sep + module_name.replace('.',os.path.sep) + '.rst'
def write_auto_rest_file(self, module_name):
"""
Writes the autogenerated reST file for module_name.
"""
if not module_name.startswith('sage'):
return
filename = self.auto_rest_filename(module_name)
sage_makedirs(os.path.dirname(filename))
title = self.get_module_docstring_title(module_name)
if title == '':
logger.error("Warning: Missing title for %s", module_name)
title = "MISSING TITLE"
with open(filename, 'w') as outfile:
# Don't doctest the autogenerated file.
outfile.write(".. nodoctest\n\n")
# Now write the actual content.
outfile.write(".. _%s:\n\n"%(module_name.replace(".__init__","")))
outfile.write(title + '\n')
outfile.write('='*len(title) + "\n\n")
outfile.write('.. This file has been autogenerated.\n\n')
inherited = ':inherited-members:' if self._options.inherited else ''
automodule = '''
.. automodule:: %s
:members:
:undoc-members:
:show-inheritance:
%s
'''
outfile.write(automodule % (module_name, inherited))
def clean_auto(self):
"""
Remove all autogenerated reST files.
"""
try:
shutil.rmtree(os.path.join(self.dir, 'sage'))
logger.debug("Deleted auto-generated reST files in: %s",
os.path.join(self.dir, 'sage'))
except OSError:
pass
def get_unincluded_modules(self):
"""
Returns an iterator for all the modules in the Sage library
which are not included in the reference manual.
"""
#Make a dictionary of the included modules
included_modules = {}
for module_name in self.get_all_included_modules():
included_modules[module_name] = True
base_path = os.path.join(SAGE_SRC, 'sage')
for directory, subdirs, files in os.walk(base_path):
for filename in files:
if not (filename.endswith('.py') or
filename.endswith('.pyx')):
continue
path = os.path.join(directory, filename)
#Create the module name
module_name = path[len(base_path):].replace(os.path.sep, '.')
module_name = 'sage' + module_name
module_name = module_name[:-4] if module_name.endswith('pyx') else module_name[:-3]
#Exclude some ones -- we don't want init the manual
if module_name.endswith('__init__') or module_name.endswith('all'):
continue
if module_name not in included_modules:
yield module_name
def print_unincluded_modules(self):
"""
Prints all of the modules which are not included in the Sage
reference manual.
"""
for module_name in self.get_unincluded_modules():
print(module_name)
def print_included_modules(self):
"""
Prints all of the modules that are included in the Sage reference
manual.
"""
for module_name in self.get_all_included_modules():
print(module_name)
class SingleFileBuilder(DocBuilder):
"""
This is the class used to build the documentation for a single
user-specified file. If the file is called 'foo.py', then the
documentation is built in ``DIR/foo/`` if the user passes the
command line option "-o DIR", or in ``DOT_SAGE/docbuild/foo/``
otherwise.
"""
def __init__(self, path):
"""
INPUT:
- ``path`` - the path to the file for which documentation
should be built
"""
self.lang = 'en'
self.name = 'single_file'
path = os.path.abspath(path)
# Create docbuild and relevant subdirectories, e.g.,
# the static and templates directories in the output directory.
# By default, this is DOT_SAGE/docbuild/MODULE_NAME, but can
# also be specified at the command line.
module_name = os.path.splitext(os.path.basename(path))[0]
latex_name = module_name.replace('_', r'\\_')
if self._options.output_dir:
base_dir = os.path.join(self._options.output_dir, module_name)
if os.path.exists(base_dir):
logger.warning('Warning: Directory %s exists. It is safer to build in a new directory.' % base_dir)
else:
base_dir = os.path.join(DOT_SAGE, 'docbuild', module_name)
try:
shutil.rmtree(base_dir)
except OSError:
pass
self.dir = os.path.join(base_dir, 'source')
sage_makedirs(os.path.join(self.dir, "static"))
sage_makedirs(os.path.join(self.dir, "templates"))
# Write self.dir/conf.py
conf = r"""# This file is automatically generated by {}, do not edit!
import sys, os
sys.path.append({!r})
from sage.docs.conf import *
html_static_path = [] + html_common_static_path
project = 'Documentation for {}'
release = 'unknown'
name = {!r}
html_title = project
html_short_title = project
htmlhelp_basename = name
extensions.remove('multidocs') # see #29651
extensions.remove('inventory_builder')
latex_domain_indices = False
latex_documents = [
('index', name + '.tex', 'Documentation for {}',
'unknown', 'manual'),
]
""".format(__file__, self.dir, module_name, module_name, latex_name)
if 'SAGE_DOC_UNDERSCORE' in os.environ:
conf += r"""
def setup(app):
app.connect('autodoc-skip-member', skip_member)
"""
with open(os.path.join(self.dir, 'conf.py'), 'w') as conffile:
conffile.write(conf)
# Write self.dir/index.rst
title = 'Docs for file %s' % path
heading = title + "\n" + ("=" * len(title))
index = r"""{}
.. This file is automatically generated by {}, do not edit!
.. automodule:: {}
:members:
:undoc-members:
:show-inheritance:
""".format(heading, __file__, module_name)
with open(os.path.join(self.dir, 'index.rst'), 'w') as indexfile:
indexfile.write(index)
# Create link from original file to self.dir. Note that we
# append self.dir to sys.path in conf.py. This is reasonably
# safe (but not perfect), since we just created self.dir.
try:
os.symlink(path, os.path.join(self.dir, os.path.basename(path)))
except OSError:
pass
def _output_dir(self, type):
"""
Return the directory where the output of type ``type`` is stored.
If the directory does not exist, then it will automatically be
created.
"""
base_dir = os.path.split(self.dir)[0]
d = os.path.join(base_dir, "output", type)
sage_makedirs(d)
return d
def _doctrees_dir(self):
"""
Returns the directory where the doctrees are stored. If the
directory does not exist, then it will automatically be
created.
"""
return self._output_dir('doctrees')
def get_builder(name):
"""
Returns an appropriate *Builder object for the document ``name``.
DocBuilder and its subclasses do all the real work in building the
documentation.
"""
if name == 'all':
return AllBuilder()
elif name.endswith('reference'):
return ReferenceBuilder(name)
elif 'reference' in name and os.path.exists(os.path.join(SAGE_DOC_SRC, 'en', name)):
return ReferenceSubBuilder(name)
elif name.endswith('website'):
return WebsiteBuilder(name)
elif name.startswith('file='):
path = name[5:]
if path.endswith('.sage') or path.endswith('.pyx'):
raise NotImplementedError('Building documentation for a single file only works for Python files.')
return SingleFileBuilder(path)
elif name in get_documents() or name in AllBuilder().get_all_documents():
return DocBuilder(name)
else:
print("'%s' is not a recognized document. Type 'sage --docbuild -D' for a list"%name)
print("of documents, or 'sage --docbuild --help' for more help.")
sys.exit(1)
def format_columns(lst, align='<', cols=None, indent=4, pad=3, width=80):
"""
Utility function that formats a list as a simple table and returns
a Unicode string representation. The number of columns is
computed from the other options, unless it's passed as a keyword
argument. For help on Python's string formatter, see
http://docs.python.org/library/string.html#format-string-syntax
"""
# Can we generalize this (efficiently) to other / multiple inputs
# and generators?
size = max(map(len, lst)) + pad
if cols is None:
import math
cols = math.trunc((width - indent) / size)
s = " " * indent
for i in range(len(lst)):
if i != 0 and i % cols == 0:
s += "\n" + " " * indent
s += "{0:{1}{2}}".format(lst[i], align, size)
s += "\n"
return s
def help_usage(s="", compact=False):
"""
Appends and returns a brief usage message for the Sage
documentation builder. If 'compact' is False, the function adds a
final newline character.
"""
s += "sage --docbuild [OPTIONS] DOCUMENT (FORMAT | COMMAND)"
if not compact:
s += "\n"
return s
def help_description(s="", compact=False):
"""
Appends and returns a brief description of the Sage documentation
builder. If 'compact' is False, the function adds a final newline
character.
"""
s += "Build or return information about Sage documentation.\n\n"
s += " DOCUMENT name of the document to build\n"
s += " FORMAT document output format\n"
s += " COMMAND document-specific command\n\n"
s += "Note that DOCUMENT may have the form 'file=/path/to/FILE',\n"
s += "which builds the documentation for the specified file.\n\n"
s += "A DOCUMENT and either a FORMAT or a COMMAND are required,\n"
s += "unless a list of one or more of these is requested."
if not compact:
s += "\n"
return s
def help_examples(s=""):
"""
Appends and returns some usage examples for the Sage documentation
builder.
"""
s += "Examples:\n"
s += " sage --docbuild -FDC all\n"
s += " sage --docbuild constructions pdf\n"
s += " sage --docbuild reference html -jv3\n"
s += " sage --docbuild --mathjax tutorial html\n"
s += " sage --docbuild reference print_unincluded_modules\n"
s += " sage --docbuild developer -j html --sphinx-opts -q,-aE --verbose 2"
return s
def get_documents():
"""
Returns a list of document names the Sage documentation builder
will accept as command-line arguments.
"""
all_b = AllBuilder()
docs = all_b.get_all_documents()
docs = [(d[3:] if d[0:3] == 'en/' else d) for d in docs]
return docs
def help_documents(s=""):
"""
Appends and returns a tabular list of documents, including a
shortcut 'all' for all documents, available to the Sage
documentation builder.
"""
docs = get_documents()
s += "DOCUMENTs:\n"
s += format_columns(docs + ['all (!)'])
s += "(!) Builds everything.\n\n"
if 'reference' in docs:
s+= "Other valid document names take the form 'reference/DIR', where\n"
s+= "DIR is a subdirectory of SAGE_DOC_SRC/en/reference/.\n"
s+= "This builds just the specified part of the reference manual.\n"
s += "DOCUMENT may also have the form 'file=/path/to/FILE', which builds\n"
s += "the documentation for the specified file.\n"
return s
def get_formats():
"""
Returns a list of output formats the Sage documentation builder
will accept on the command-line.
"""
tut_b = DocBuilder('en/tutorial')
formats = tut_b._output_formats()
formats.remove('html')
return ['html', 'pdf'] + formats
def help_formats(s=""):
"""
Appends and returns a tabular list of output formats available to
the Sage documentation builder.
"""
s += "FORMATs:\n"
s += format_columns(get_formats())
return s
def help_commands(name='all', s=""):
"""
Appends and returns a tabular list of commands, if any, the Sage
documentation builder can run on the indicated document. The
default is to list all commands for all documents.
"""
# To do: Generate the lists dynamically, using class attributes,
# as with the Builders above.
command_dict = { 'reference' : [
'print_included_modules', 'print_modified_modules (*)',
'print_unincluded_modules', 'print_new_and_updated_modules (*)',
] }
for doc in command_dict:
if name == 'all' or doc == name:
s += "COMMANDs for the DOCUMENT '" + doc + "':\n"
s += format_columns(command_dict[doc])
s += "(*) Since the last build.\n"
return s
def help_message_long(option, opt_str, value, parser):
"""
Prints an extended help message for the Sage documentation builder
and exits.
"""
help_funcs = [ help_usage, help_description, help_documents,
help_formats, help_commands, parser.format_option_help,
help_examples ]
for f in help_funcs:
print(f())
sys.exit(0)
def help_message_short(option=None, opt_str=None, value=None, parser=None,
error=False):
"""
Prints a help message for the Sage documentation builder. The
message includes command-line usage and a list of options. The
message is printed only on the first call. If error is True
during this call, the message is printed only if the user hasn't
requested a list (e.g., documents, formats, commands).
"""
if not hasattr(parser.values, 'printed_help'):
if error is True:
if not hasattr(parser.values, 'printed_list'):
parser.print_help()
else:
parser.print_help()
setattr(parser.values, 'printed_help', 1)
def help_wrapper(option, opt_str, value, parser):
"""
A helper wrapper for command-line options to the Sage
documentation builder that print lists, such as document names,
formats, and document-specific commands.
"""
if option.dest == 'commands':
print(help_commands(value), end="")
if option.dest == 'documents':
print(help_documents(), end="")
if option.dest == 'formats':
print(help_formats(), end="")
if option.dest == 'all_documents':
if value == 'en/reference' or value == 'reference':
b = ReferenceBuilder('reference')
refdir = os.path.join(os.environ['SAGE_DOC_SRC'], 'en', b.name)
s = b.get_all_documents(refdir)
# Put the bibliography first, because it needs to be built first:
s.remove('reference/references')
s.insert(0, 'reference/references')
elif value == 'all':
s = get_documents()
# Put the reference manual first, because it needs to be built first:
s.remove('reference')
s.insert(0, 'reference')
else:
raise ValueError("argument for --all-documents must be either 'all'"
" or 'reference'")
for d in s:
print(d)
setattr(parser.values, 'printed_list', 1)
class IndentedHelpFormatter2(optparse.IndentedHelpFormatter, object):
"""
Custom help formatter class for optparse's OptionParser.
"""
def format_description(self, description):
"""
Returns a formatted description, preserving any original
explicit new line characters.
"""
if description:
lines_in = description.split('\n')
lines_out = [self._format_text(line) for line in lines_in]
return "\n".join(lines_out) + "\n"
else:
return ""
def format_heading(self, heading):
"""
Returns a formatted heading using the superclass' formatter.
If the heading is 'options', up to case, the function converts
it to ALL CAPS. This allows us to match the heading 'OPTIONS' with
the same token in the builder's usage message.
"""
if heading.lower() == 'options':
heading = "OPTIONS"
return super(IndentedHelpFormatter2, self).format_heading(heading)
def setup_parser():
"""
Sets up and returns a command-line OptionParser instance for the
Sage documentation builder.
"""
# Documentation: http://docs.python.org/library/optparse.html
parser = optparse.OptionParser(add_help_option=False,
usage=help_usage(compact=True),
formatter=IndentedHelpFormatter2(),
description=help_description(compact=True))
# Standard options. Note: We use explicit option.dest names
# to avoid ambiguity.
standard = optparse.OptionGroup(parser, "Standard")
standard.add_option("-h", "--help",
action="callback", callback=help_message_short,
help="show a help message and exit")
standard.add_option("-H", "--help-all",
action="callback", callback=help_message_long,
help="show an extended help message and exit")
standard.add_option("-D", "--documents", dest="documents",
action="callback", callback=help_wrapper,
help="list all available DOCUMENTs")
standard.add_option("-F", "--formats", dest="formats",
action="callback", callback=help_wrapper,
help="list all output FORMATs")
standard.add_option("-C", "--commands", dest="commands",
type="string", metavar="DOC",
action="callback", callback=help_wrapper,
help="list all COMMANDs for DOCUMENT DOC; use 'all' to list all")
standard.add_option("-i", "--inherited", dest="inherited",
default=False, action="store_true",
help="include inherited members in reference manual; may be slow, may fail for PDF output")
standard.add_option("-u", "--underscore", dest="underscore",
default=False, action="store_true",
help="include variables prefixed with '_' in reference manual; may be slow, may fail for PDF output")
standard.add_option("-j", "--mathjax", "--jsmath", dest="mathjax",
action="store_true",
help="render math using MathJax; FORMATs: html, json, pickle, web")
standard.add_option("--no-plot", dest="no_plot",
action="store_true",
help="do not include graphics auto-generated using the '.. plot' markup")
standard.add_option("--include-tests-blocks", dest="skip_tests", default=True,
action="store_false",
help="include TESTS blocks in the reference manual")
standard.add_option("--no-pdf-links", dest="no_pdf_links",
action="store_true",
help="do not include PDF links in DOCUMENT 'website'; FORMATs: html, json, pickle, web")
standard.add_option("--warn-links", dest="warn_links",
default=False, action="store_true",
help="issue a warning whenever a link is not properly resolved; equivalent to '--sphinx-opts -n' (sphinx option: nitpicky)")
standard.add_option("--check-nested", dest="check_nested",
action="store_true",
help="check picklability of nested classes in DOCUMENT 'reference'")
standard.add_option("-N", "--no-colors", dest="color", default=True,
action="store_false",
help="do not color output; does not affect children")
standard.add_option("-q", "--quiet", dest="verbose",
action="store_const", const=0,
help="work quietly; same as --verbose=0")
standard.add_option("-v", "--verbose", dest="verbose",
type="int", default=1, metavar="LEVEL",
action="store",
help="report progress at LEVEL=0 (quiet), 1 (normal), 2 (info), or 3 (debug); does not affect children")
standard.add_option("-o", "--output", dest="output_dir", default=None,
metavar="DIR", action="store",
help="if DOCUMENT is a single file ('file=...'), write output to this directory")
parser.add_option_group(standard)
# Advanced options.
advanced = optparse.OptionGroup(parser, "Advanced",
"Use these options with care.")
advanced.add_option("-S", "--sphinx-opts", dest="sphinx_opts",
type="string", metavar="OPTS",
action="store",
help="pass comma-separated OPTS to sphinx-build")
advanced.add_option("-U", "--update-mtimes", dest="update_mtimes",
default=False, action="store_true",
help="before building reference manual, update modification times for auto-generated reST files")
advanced.add_option("-k", "--keep-going", dest="keep_going",
default=False, action="store_true",
help="Do not abort on errors but continue as much as possible after an error")
advanced.add_option("--all-documents", dest="all_documents",
type="str", metavar="ARG",
action="callback", callback=help_wrapper,
help="if ARG is 'reference', list all subdocuments"
" of en/reference. If ARG is 'all', list all main"
" documents")
parser.add_option_group(advanced)
return parser
def setup_logger(verbose=1, color=True):
r"""
Set up a Python Logger instance for the Sage documentation builder. The
optional argument sets logger's level and message format.
EXAMPLES::
sage: from sage_docbuild import setup_logger, logger
sage: setup_logger()
sage: type(logger)
<class 'logging.Logger'>
"""
# Set up colors. Adapted from sphinx.cmdline.
import sphinx.util.console as c
if not color or not sys.stdout.isatty() or not c.color_terminal():
c.nocolor()
# Available colors: black, darkgray, (dark)red, dark(green),
# brown, yellow, (dark)blue, purple, fuchsia, turquoise, teal,
# lightgray, white. Available styles: reset, bold, faint,
# standout, underline, blink.
# Set up log record formats.
format_std = "%(message)s"
formatter = logging.Formatter(format_std)
# format_debug = "%(module)s #%(lineno)s %(funcName)s() %(message)s"
fields = ['%(module)s', '#%(lineno)s', '%(funcName)s()', '%(message)s']
colors = ['darkblue', 'darkred', 'brown', 'reset']
styles = ['reset', 'reset', 'reset', 'reset']
format_debug = ""
for i in range(len(fields)):
format_debug += c.colorize(styles[i], c.colorize(colors[i], fields[i]))
if i != len(fields):
format_debug += " "
# Note: There's also Handler.setLevel(). The argument is the
# lowest severity message that the respective logger or handler
# will pass on. The default levels are DEBUG, INFO, WARNING,
# ERROR, and CRITICAL. We use "WARNING" for normal verbosity and
# "ERROR" for quiet operation. It's possible to define custom
# levels. See the documentation for details.
if verbose == 0:
logger.setLevel(logging.ERROR)
if verbose == 1:
logger.setLevel(logging.WARNING)
if verbose == 2:
logger.setLevel(logging.INFO)
if verbose == 3:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_debug)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
class IntersphinxCache:
"""
Replace sphinx.ext.intersphinx.fetch_inventory by an in-memory
cached version.
"""
def __init__(self):
self.inventories = {}
self.real_fetch_inventory = sphinx.ext.intersphinx.fetch_inventory
sphinx.ext.intersphinx.fetch_inventory = self.fetch_inventory
def fetch_inventory(self, app, uri, inv):
"""
Return the result of ``sphinx.ext.intersphinx.fetch_inventory()``
from a cache if possible. Otherwise, call
``sphinx.ext.intersphinx.fetch_inventory()`` and cache the result.
"""
t = (uri, inv)
try:
return self.inventories[t]
except KeyError:
i = self.real_fetch_inventory(app, uri, inv)
self.inventories[t] = i
return i
def main():
# Parse the command-line.
parser = setup_parser()
options, args = parser.parse_args()
DocBuilder._options = options
# Get the name and type (target format) of the document we are
# trying to build.
try:
name, type = args
except ValueError:
help_message_short(parser=parser, error=True)
sys.exit(1)
# Set up module-wide logging.
setup_logger(options.verbose, options.color)
def excepthook(*exc_info):
logger.error('Error building the documentation.', exc_info=exc_info)
if INCREMENTAL_BUILD:
logger.error('''
Note: incremental documentation builds sometimes cause spurious
error messages. To be certain that these are real errors, run
"make doc-clean" first and try again.''')
sys.excepthook = excepthook
# Process selected options.
#
# MathJax: this check usually has no practical effect, since
# SAGE_DOC_MATHJAX is set to "True" by the script sage-env.
# To disable MathJax, set SAGE_DOC_MATHJAX to "no" or "False".
if options.mathjax or (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
os.environ['SAGE_DOC_MATHJAX'] = 'True'
if options.check_nested:
os.environ['SAGE_CHECK_NESTED'] = 'True'
if options.underscore:
os.environ['SAGE_DOC_UNDERSCORE'] = "True"
global ALLSPHINXOPTS, WEBSITESPHINXOPTS, ABORT_ON_ERROR
if options.sphinx_opts:
ALLSPHINXOPTS += options.sphinx_opts.replace(',', ' ') + " "
if options.no_pdf_links:
WEBSITESPHINXOPTS = " -A hide_pdf_links=1 "
if options.warn_links:
ALLSPHINXOPTS += "-n "
if options.no_plot:
os.environ['SAGE_SKIP_PLOT_DIRECTIVE'] = 'yes'
if options.skip_tests:
os.environ['SAGE_SKIP_TESTS_BLOCKS'] = 'True'
ABORT_ON_ERROR = not options.keep_going
# Delete empty directories. This is needed in particular for empty
# directories due to "git checkout" which never deletes empty
# directories it leaves behind. See Trac #20010.
for dirpath, dirnames, filenames in os.walk(SAGE_DOC_SRC, topdown=False):
if not dirnames + filenames:
logger.warning('Deleting empty directory {0}'.format(dirpath))
os.rmdir(dirpath)
# Set up Intersphinx cache
C = IntersphinxCache()
builder = getattr(get_builder(name), type)
builder()
| 39.082813 | 148 | 0.583719 |
import logging
import optparse
import os
import pickle
import re
import shutil
import subprocess
import sys
import time
import types
import warnings
logger = logging.getLogger(__name__)
import sphinx.util.console
import sphinx.ext.intersphinx
import sage.all
from sage.misc.cachefunc import cached_method
from sage.misc.misc import sage_makedirs
from sage.env import SAGE_DOC_SRC, SAGE_DOC, SAGE_SRC, DOT_SAGE
from .build_options import (LANGUAGES, SPHINXOPTS, PAPER, OMIT,
PAPEROPTS, ALLSPHINXOPTS, NUM_THREADS, WEBSITESPHINXOPTS,
INCREMENTAL_BUILD, ABORT_ON_ERROR)
ref)
ref = re.sub(r'\\textasciicircum{}', r'^', ref)
with open(tex_file, 'w') as f:
f.write(ref)
make_target = "cd '%s' && $MAKE %s && mv -f *.pdf '%s'"
error_message = "failed to run $MAKE %s in %s"
command = 'all-pdf'
if subprocess.call(make_target%(tex_dir, command, pdf_dir), shell=True):
raise RuntimeError(error_message%(command, tex_dir))
logger.warning("Build finished. The built documents can be found in %s", pdf_dir)
def clean(self, *args):
shutil.rmtree(self._doctrees_dir())
output_formats = list(args) if args else self._output_formats()
for format in output_formats:
shutil.rmtree(self._output_dir(format), ignore_errors=True)
html = builder_helper('html')
pickle = builder_helper('pickle')
web = pickle
json = builder_helper('json')
htmlhelp = builder_helper('htmlhelp')
latex = builder_helper('latex')
changes = builder_helper('changes')
linkcheck = builder_helper('linkcheck')
# import the customized builder for object.inv files
inventory = builder_helper('inventory')
from .utils import build_many as _build_many
def build_many(target, args, processes=None):
if processes is None:
processes = NUM_THREADS
try:
_build_many(target, args, processes=processes)
except BaseException as exc:
if ABORT_ON_ERROR:
raise
##########################################
# Parallel Building Ref Manual #
##########################################
def build_other_doc(args):
document = args[0]
name = args[1]
kwds = args[2]
args = args[3:]
logger.warning("\nBuilding %s.\n" % document)
getattr(get_builder(document), name)(*args, **kwds)
class AllBuilder(object):
def __getattr__(self, attr):
from functools import partial
return partial(self._wrapper, attr)
def _wrapper(self, name, *args, **kwds):
start = time.time()
docs = self.get_all_documents()
refs = [x for x in docs if x.endswith('reference')]
others = [x for x in docs if not x.endswith('reference')]
# Build the reference manual twice to resolve references. That is,
# build once with the inventory builder to construct the intersphinx
# inventory files, and then build the second time for real. So the
# first build should be as fast as possible;
logger.warning("\nBuilding reference manual, first pass.\n")
for document in refs:
getattr(get_builder(document), 'inventory')(*args, **kwds)
logger.warning("Building reference manual, second pass.\n")
sage_makedirs(os.path.join(SAGE_DOC, "html", "en", "reference", "_static"))
for document in refs:
getattr(get_builder(document), name)(*args, **kwds)
# build the other documents in parallel
L = [(doc, name, kwds) + args for doc in others]
# Trac #31344: Work around crashes from multiprocessing
if sys.platform == 'darwin':
for target in L:
build_other_doc(target)
else:
build_many(build_other_doc, L)
logger.warning("Elapsed time: %.1f seconds."%(time.time()-start))
logger.warning("Done building the documentation!")
def get_all_documents(self):
documents = []
for lang in LANGUAGES:
for document in os.listdir(os.path.join(SAGE_DOC_SRC, lang)):
if (document not in OMIT
and os.path.isdir(os.path.join(SAGE_DOC_SRC, lang, document))):
documents.append(os.path.join(lang, document))
# Ensure that the reference guide is compiled first so that links from
# the other documents to it are correctly resolved.
if 'en/reference' in documents:
documents.remove('en/reference')
documents.insert(0, 'en/reference')
return documents
class WebsiteBuilder(DocBuilder):
def html(self):
DocBuilder.html(self)
html_output_dir = self._output_dir('html')
for f in os.listdir(html_output_dir):
src = os.path.join(html_output_dir, f)
dst = os.path.join(html_output_dir, '..', f)
if os.path.isdir(src):
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
def create_html_redirects(self):
from sage.misc.superseded import deprecation
deprecation(29993, "This method was created in trac #6495 for backward compatibility. Not necessary anymore.")
# The simple html template which will cause a redirect to the correct file.
html_template = """<html><head>
<meta HTTP-EQUIV="REFRESH" content="0; url=%s">
</head><body></body></html>"""
reference_dir = os.path.abspath(os.path.join(self._output_dir('html'),
'..', 'reference'))
reference_builder = ReferenceBuilder('reference')
refdir = os.path.join(SAGE_DOC_SRC, 'en', 'reference')
for document in reference_builder.get_all_documents(refdir):
# path is the directory above reference dir
path = os.path.abspath(os.path.join(reference_dir, '..'))
# the name of the subdocument
document_name = document.split('/')[1]
# the sage directory within a subdocument, for example
# local/share/doc/sage/html/en/reference/algebras/sage
sage_directory = os.path.join(path, document, 'sage')
# Walk through all of the files in the sage_directory
for dirpath, dirnames, filenames in os.walk(sage_directory):
# a string like reference/algebras/sage/algebras
short_path = dirpath[len(path)+1:]
# a string like sage/algebras
shorter_path = os.path.join(*short_path.split(os.sep)[2:])
# make the shorter path directory
try:
os.makedirs(os.path.join(reference_dir, shorter_path))
except OSError:
pass
for filename in filenames:
if not filename.endswith('html'):
continue
# the name of the html file we are going to create
redirect_filename = os.path.join(reference_dir, shorter_path, filename)
# the number of levels up we need to use in the relative url
levels_up = len(shorter_path.split(os.sep))
# the relative url that we will redirect to
redirect_url = "/".join(['..']*levels_up + [document_name, shorter_path, filename])
# write the html file which performs the redirect
with open(redirect_filename, 'w') as f:
print(redirect_filename)
f.write(html_template % redirect_url)
def clean(self):
html_output_dir = self._output_dir('html')
parent_dir = os.path.realpath(os.path.join(html_output_dir, '..'))
for filename in os.listdir(html_output_dir):
parent_filename = os.path.join(parent_dir, filename)
if not os.path.exists(parent_filename):
continue
if os.path.isdir(parent_filename):
shutil.rmtree(parent_filename, ignore_errors=True)
else:
os.unlink(parent_filename)
DocBuilder.clean(self)
class ReferenceBuilder(AllBuilder):
def __init__(self, name, lang='en'):
AllBuilder.__init__(self)
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = doc[0]
self.lang = lang
def _output_dir(self, type, lang='en'):
d = os.path.join(SAGE_DOC, type, lang, self.name)
sage_makedirs(d)
return d
def _refdir(self, lang):
return os.path.join(SAGE_DOC_SRC, lang, self.name)
def _build_bibliography(self, lang, format, *args, **kwds):
refdir = self._refdir(lang)
references = [
(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)
if doc == 'reference/references'
]
build_many(build_ref_doc, references)
def _build_everything_except_bibliography(self, lang, format, *args, **kwds):
refdir = self._refdir(lang)
non_references = [
(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)
if doc != 'reference/references'
]
build_many(build_ref_doc, non_references)
def _wrapper(self, format, *args, **kwds):
for lang in LANGUAGES:
refdir = self._refdir(lang)
if not os.path.exists(refdir):
continue
logger.info('Building bibliography')
self._build_bibliography(lang, format, *args, **kwds)
logger.info('Bibliography finished, building dependent manuals')
self._build_everything_except_bibliography(lang, format, *args, **kwds)
# The html refman must be build at the end to ensure correct
# merging of indexes and inventories.
# Sphinx is run here in the current process (not in a
# subprocess) and the IntersphinxCache gets populated to be
# used for the second pass of the reference manual and for
# the other documents.
getattr(DocBuilder(self.name, lang), format)(*args, **kwds)
# PDF: we need to build master index file which lists all
# of the PDF file. So we create an html file, based on
# the file index.html from the "website" target.
if format == 'pdf':
# First build the website page. This only takes a few seconds.
getattr(get_builder('website'), 'html')()
website_dir = os.path.join(SAGE_DOC, 'html', 'en', 'website')
output_dir = self._output_dir(format, lang)
# Install in output_dir a symlink to the directory containing static files.
try:
os.symlink(os.path.join(website_dir, '_static'), os.path.join(output_dir, '_static'))
except FileExistsError:
pass
# Now modify website's index.html page and write it to
with open(os.path.join(website_dir, 'index.html')) as f:
html = f.read().replace('Documentation', 'Reference')
html_output_dir = os.path.dirname(website_dir)
html = html.replace('http://www.sagemath.org',
os.path.join(html_output_dir, 'index.html'))
html_end_preamble = html.find('<h1>Sage Reference')
html_bottom = html.rfind('</table>') + len('</table>')
with open(os.path.join(SAGE_DOC_SRC, lang, 'reference', 'index.rst')) as f:
rst = f.read()
rst = rst.replace('.. _reference-manual:\n\n', '')
rst = re.sub(r'\\\\', r'\\', rst)
rst = re.sub(r'`([^`\n]*)`__.*\n\n__ (.*)',
r'<a href="\2">\1</a>.', rst)
rst = re.sub(r'`([^<\n]*)\s+<(.*)>`_',
r'<a href="\2">\1</a>', rst)
rst = re.sub(r':doc:`([^<]*?)\s+<(.*)/index>`',
r'<a href="\2/\2.pdf">\1 <img src="_static/pdf.png"/></a>', rst)
start = rst.rfind('*\n') + 1
end = rst.find('\nUser Interfaces')
rst_body = rst[start:end]
rst_body = rst_body.replace('\n\n', '</p>\n<p>')
start = rst.find('\nUser Interfaces')
end = rst.find('Indices and Tables')
rst_toc = rst[start:end]
# change * to <li>; change rst headers to html headers
rst_toc = re.sub(r'\*(.*)\n',
r'<li>\1</li>\n', rst_toc)
rst_toc = re.sub(r'\n([A-Z][a-zA-Z, ]*)\n[=]*\n',
r'</ul>\n\n\n<h2>\1</h2>\n\n<ul>\n', rst_toc)
rst_toc = re.sub(r'\n([A-Z][a-zA-Z, ]*)\n[-]*\n',
r'</ul>\n\n\n<h3>\1</h3>\n\n<ul>\n', rst_toc)
# now write the file.
with open(os.path.join(output_dir, 'index.html'), 'w') as new_index:
new_index.write(html[:html_end_preamble])
new_index.write('<h1> Sage Reference Manual (PDF version)'+ '</h1>')
new_index.write(rst_body)
new_index.write('<ul>')
new_index.write(rst_toc)
new_index.write('</ul>\n\n')
new_index.write(html[html_bottom:])
logger.warning('''
PDF documents have been created in subdirectories of
%s
Alternatively, you can open
%s
for a webpage listing all of the documents.''' % (output_dir,
os.path.join(output_dir,
'index.html')))
def get_all_documents(self, refdir):
documents = []
for doc in os.listdir(refdir):
directory = os.path.join(refdir, doc)
if os.path.exists(os.path.join(directory, 'index.rst')):
n = len(os.listdir(directory))
documents.append((-n, os.path.join(self.name, doc)))
return [ doc[1] for doc in sorted(documents) ]
class ReferenceSubBuilder(DocBuilder):
def __init__(self, *args, **kwds):
DocBuilder.__init__(self, *args, **kwds)
self._wrap_builder_helpers()
def _wrap_builder_helpers(self):
from functools import partial, update_wrapper
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
f = partial(self._wrapper, attr)
f.is_output_format = True
update_wrapper(f, getattr(self, attr))
setattr(self, attr, f)
def _wrapper(self, build_type, *args, **kwds):
# Force regeneration of all modules if the inherited
# and/or underscored members options have changed.
cache = self.get_cache()
force = False
try:
if (cache['option_inherited'] != self._options.inherited or
cache['option_underscore'] != self._options.underscore):
logger.info("Detected change(s) in inherited and/or underscored members option(s).")
force = True
except KeyError:
force = True
cache['option_inherited'] = self._options.inherited
cache['option_underscore'] = self._options.underscore
self.save_cache()
# After "sage -clone", refresh the reST file mtimes in
# environment.pickle.
if self._options.update_mtimes:
logger.info("Checking for reST file mtimes to update...")
self.update_mtimes()
if force:
# Write reST files for all modules from scratch.
self.clean_auto()
for module_name in self.get_all_included_modules():
self.write_auto_rest_file(module_name)
else:
# Write reST files for new and updated modules.
for module_name in self.get_new_and_updated_modules():
self.write_auto_rest_file(module_name)
# Copy over the custom reST files from _sage
_sage = os.path.join(self.dir, '_sage')
if os.path.exists(_sage):
logger.info("Copying over custom reST files from %s ...", _sage)
shutil.copytree(_sage, os.path.join(self.dir, 'sage'))
getattr(DocBuilder, build_type)(self, *args, **kwds)
def cache_filename(self):
return os.path.join(self._doctrees_dir(), 'reference.pickle')
@cached_method
def get_cache(self):
filename = self.cache_filename()
if not os.path.exists(filename):
return {}
with open(self.cache_filename(), 'rb') as file:
try:
cache = pickle.load(file)
except Exception:
logger.debug("Cache file '%s' is corrupted; ignoring it..." % filename)
cache = {}
else:
logger.debug("Loaded the reference cache: %s", filename)
return cache
def save_cache(self):
cache = self.get_cache()
with open(self.cache_filename(), 'wb') as file:
pickle.dump(cache, file)
logger.debug("Saved the reference cache: %s", self.cache_filename())
def get_sphinx_environment(self):
from sphinx.environment import BuildEnvironment
class FakeConfig(object):
values = tuple()
class FakeApp(object):
def __init__(self, dir):
self.srcdir = dir
self.config = FakeConfig()
env_pickle = os.path.join(self._doctrees_dir(), 'environment.pickle')
try:
with open(env_pickle, 'rb') as f:
env = pickle.load(f)
env.app = FakeApp(self.dir)
env.config.values = env.app.config.values
logger.debug("Opened Sphinx environment: %s", env_pickle)
return env
except IOError as err:
logger.debug("Failed to open Sphinx environment: %s", err)
def update_mtimes(self):
env = self.get_sphinx_environment()
if env is not None:
for doc in env.all_docs:
env.all_docs[doc] = time.time()
logger.info("Updated %d reST file mtimes", len(env.all_docs))
# This is the only place we need to save (as opposed to
# load) Sphinx's pickle, so we do it right here.
env_pickle = os.path.join(self._doctrees_dir(),
'environment.pickle')
# env.topickle(env_pickle), which first writes a temporary
# file. We adapt sphinx.environment's
env.set_warnfunc(None)
del env.config.values
with open(env_pickle, 'wb') as picklefile:
for key, val in vars(env.config).items():
if key.startswith('_') or isinstance(val, (types.ModuleType,
types.FunctionType,
type)):
del env.config[key]
pickle.dump(env, picklefile, pickle.HIGHEST_PROTOCOL)
logger.debug("Saved Sphinx environment: %s", env_pickle)
def get_modified_modules(self):
env = self.get_sphinx_environment()
if env is None:
logger.debug("Stopped check for modified modules.")
return
try:
added, changed, removed = env.get_outdated_files(False)
logger.info("Sphinx found %d modified modules", len(changed))
except OSError as err:
logger.debug("Sphinx failed to determine modified modules: %s", err)
return
for name in changed:
# doc/en/reference/misc as an autogenerated file: see
# #14199.
if name.startswith('sage' + os.sep):
yield name
def print_modified_modules(self):
for module_name in self.get_modified_modules():
print(module_name)
def get_all_rst_files(self, exclude_sage=True):
for directory, subdirs, files in os.walk(self.dir):
if exclude_sage and directory.startswith(os.path.join(self.dir, 'sage')):
continue
for filename in files:
if not filename.endswith('.rst'):
continue
yield os.path.join(directory, filename)
def get_all_included_modules(self):
for filename in self.get_all_rst_files():
for module in self.get_modules(filename):
yield module
def get_new_and_updated_modules(self):
env = self.get_sphinx_environment()
if env is None:
all_docs = {}
else:
all_docs = env.all_docs
new_modules = []
updated_modules = []
old_modules = []
for module_name in self.get_all_included_modules():
docname = module_name.replace('.', os.path.sep)
if docname not in all_docs:
new_modules.append(module_name)
yield module_name
continue
# get the modification timestamp of the reST doc for the module
mtime = all_docs[docname]
try:
with warnings.catch_warnings():
# primarily intended to ignore deprecation warnings
warnings.simplefilter("ignore")
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
raise
module_filename = sys.modules[module_name].__file__
if (module_filename.endswith('.pyc') or module_filename.endswith('.pyo')):
source_filename = module_filename[:-1]
if (os.path.exists(source_filename)): module_filename = source_filename
newtime = os.path.getmtime(module_filename)
if newtime > mtime:
updated_modules.append(module_name)
yield module_name
else: # keep good old module
old_modules.append(module_name)
removed_modules = []
for docname in all_docs.keys():
if docname.startswith('sage' + os.path.sep):
module_name = docname.replace(os.path.sep, '.')
if not (module_name in old_modules or module_name in updated_modules):
try:
os.remove(os.path.join(self.dir, docname) + '.rst')
except OSError: # already removed
pass
logger.debug("Deleted auto-generated reST file %s".format(docname))
removed_modules.append(module_name)
logger.info("Found %d new modules", len(new_modules))
logger.info("Found %d updated modules", len(updated_modules))
logger.info("Removed %d obsolete modules", len(removed_modules))
def print_new_and_updated_modules(self):
for module_name in self.get_new_and_updated_modules():
print(module_name)
def get_modules(self, filename):
# Create the regular expression used to detect an autogenerated file
auto_re = re.compile(r'^\s*(..\/)*(sage(nb)?\/[\w\/]*)\s*$')
# Read the lines
with open(filename) as f:
lines = f.readlines()
for line in lines:
match = auto_re.match(line)
if match:
yield match.group(2).replace(os.path.sep, '.')
def get_module_docstring_title(self, module_name):
#Try to import the module
try:
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
return "UNABLE TO IMPORT MODULE"
module = sys.modules[module_name]
#Get the docstring
doc = module.__doc__
if doc is None:
doc = module.doc if hasattr(module, 'doc') else ""
#Extract the title
i = doc.find('\n')
if i != -1:
return doc[i+1:].lstrip().splitlines()[0]
else:
return doc
def auto_rest_filename(self, module_name):
return self.dir + os.path.sep + module_name.replace('.',os.path.sep) + '.rst'
def write_auto_rest_file(self, module_name):
if not module_name.startswith('sage'):
return
filename = self.auto_rest_filename(module_name)
sage_makedirs(os.path.dirname(filename))
title = self.get_module_docstring_title(module_name)
if title == '':
logger.error("Warning: Missing title for %s", module_name)
title = "MISSING TITLE"
with open(filename, 'w') as outfile:
# Don't doctest the autogenerated file.
outfile.write(".. nodoctest\n\n")
outfile.write(".. _%s:\n\n"%(module_name.replace(".__init__","")))
outfile.write(title + '\n')
outfile.write('='*len(title) + "\n\n")
outfile.write('.. This file has been autogenerated.\n\n')
inherited = ':inherited-members:' if self._options.inherited else ''
automodule = '''
.. automodule:: %s
:members:
:undoc-members:
:show-inheritance:
%s
'''
outfile.write(automodule % (module_name, inherited))
def clean_auto(self):
try:
shutil.rmtree(os.path.join(self.dir, 'sage'))
logger.debug("Deleted auto-generated reST files in: %s",
os.path.join(self.dir, 'sage'))
except OSError:
pass
def get_unincluded_modules(self):
included_modules = {}
for module_name in self.get_all_included_modules():
included_modules[module_name] = True
base_path = os.path.join(SAGE_SRC, 'sage')
for directory, subdirs, files in os.walk(base_path):
for filename in files:
if not (filename.endswith('.py') or
filename.endswith('.pyx')):
continue
path = os.path.join(directory, filename)
module_name = path[len(base_path):].replace(os.path.sep, '.')
module_name = 'sage' + module_name
module_name = module_name[:-4] if module_name.endswith('pyx') else module_name[:-3]
if module_name.endswith('__init__') or module_name.endswith('all'):
continue
if module_name not in included_modules:
yield module_name
def print_unincluded_modules(self):
for module_name in self.get_unincluded_modules():
print(module_name)
def print_included_modules(self):
for module_name in self.get_all_included_modules():
print(module_name)
class SingleFileBuilder(DocBuilder):
def __init__(self, path):
self.lang = 'en'
self.name = 'single_file'
path = os.path.abspath(path)
# Create docbuild and relevant subdirectories, e.g.,
# the static and templates directories in the output directory.
# By default, this is DOT_SAGE/docbuild/MODULE_NAME, but can
# also be specified at the command line.
module_name = os.path.splitext(os.path.basename(path))[0]
latex_name = module_name.replace('_', r'\\_')
if self._options.output_dir:
base_dir = os.path.join(self._options.output_dir, module_name)
if os.path.exists(base_dir):
logger.warning('Warning: Directory %s exists. It is safer to build in a new directory.' % base_dir)
else:
base_dir = os.path.join(DOT_SAGE, 'docbuild', module_name)
try:
shutil.rmtree(base_dir)
except OSError:
pass
self.dir = os.path.join(base_dir, 'source')
sage_makedirs(os.path.join(self.dir, "static"))
sage_makedirs(os.path.join(self.dir, "templates"))
# Write self.dir/conf.py
conf = r"""# This file is automatically generated by {}, do not edit!
import sys, os
sys.path.append({!r})
from sage.docs.conf import *
html_static_path = [] + html_common_static_path
project = 'Documentation for {}'
release = 'unknown'
name = {!r}
html_title = project
html_short_title = project
htmlhelp_basename = name
extensions.remove('multidocs') # see #29651
extensions.remove('inventory_builder')
latex_domain_indices = False
latex_documents = [
('index', name + '.tex', 'Documentation for {}',
'unknown', 'manual'),
]
""".format(__file__, self.dir, module_name, module_name, latex_name)
if 'SAGE_DOC_UNDERSCORE' in os.environ:
conf += r"""
def setup(app):
app.connect('autodoc-skip-member', skip_member)
"""
with open(os.path.join(self.dir, 'conf.py'), 'w') as conffile:
conffile.write(conf)
# Write self.dir/index.rst
title = 'Docs for file %s' % path
heading = title + "\n" + ("=" * len(title))
index = r"""{}
.. This file is automatically generated by {}, do not edit!
.. automodule:: {}
:members:
:undoc-members:
:show-inheritance:
""".format(heading, __file__, module_name)
with open(os.path.join(self.dir, 'index.rst'), 'w') as indexfile:
indexfile.write(index)
# Create link from original file to self.dir. Note that we
# append self.dir to sys.path in conf.py. This is reasonably
# safe (but not perfect), since we just created self.dir.
try:
os.symlink(path, os.path.join(self.dir, os.path.basename(path)))
except OSError:
pass
def _output_dir(self, type):
base_dir = os.path.split(self.dir)[0]
d = os.path.join(base_dir, "output", type)
sage_makedirs(d)
return d
def _doctrees_dir(self):
return self._output_dir('doctrees')
def get_builder(name):
if name == 'all':
return AllBuilder()
elif name.endswith('reference'):
return ReferenceBuilder(name)
elif 'reference' in name and os.path.exists(os.path.join(SAGE_DOC_SRC, 'en', name)):
return ReferenceSubBuilder(name)
elif name.endswith('website'):
return WebsiteBuilder(name)
elif name.startswith('file='):
path = name[5:]
if path.endswith('.sage') or path.endswith('.pyx'):
raise NotImplementedError('Building documentation for a single file only works for Python files.')
return SingleFileBuilder(path)
elif name in get_documents() or name in AllBuilder().get_all_documents():
return DocBuilder(name)
else:
print("'%s' is not a recognized document. Type 'sage --docbuild -D' for a list"%name)
print("of documents, or 'sage --docbuild --help' for more help.")
sys.exit(1)
def format_columns(lst, align='<', cols=None, indent=4, pad=3, width=80):
# Can we generalize this (efficiently) to other / multiple inputs
# and generators?
size = max(map(len, lst)) + pad
if cols is None:
import math
cols = math.trunc((width - indent) / size)
s = " " * indent
for i in range(len(lst)):
if i != 0 and i % cols == 0:
s += "\n" + " " * indent
s += "{0:{1}{2}}".format(lst[i], align, size)
s += "\n"
return s
def help_usage(s="", compact=False):
s += "sage --docbuild [OPTIONS] DOCUMENT (FORMAT | COMMAND)"
if not compact:
s += "\n"
return s
def help_description(s="", compact=False):
s += "Build or return information about Sage documentation.\n\n"
s += " DOCUMENT name of the document to build\n"
s += " FORMAT document output format\n"
s += " COMMAND document-specific command\n\n"
s += "Note that DOCUMENT may have the form 'file=/path/to/FILE',\n"
s += "which builds the documentation for the specified file.\n\n"
s += "A DOCUMENT and either a FORMAT or a COMMAND are required,\n"
s += "unless a list of one or more of these is requested."
if not compact:
s += "\n"
return s
def help_examples(s=""):
s += "Examples:\n"
s += " sage --docbuild -FDC all\n"
s += " sage --docbuild constructions pdf\n"
s += " sage --docbuild reference html -jv3\n"
s += " sage --docbuild --mathjax tutorial html\n"
s += " sage --docbuild reference print_unincluded_modules\n"
s += " sage --docbuild developer -j html --sphinx-opts -q,-aE --verbose 2"
return s
def get_documents():
all_b = AllBuilder()
docs = all_b.get_all_documents()
docs = [(d[3:] if d[0:3] == 'en/' else d) for d in docs]
return docs
def help_documents(s=""):
docs = get_documents()
s += "DOCUMENTs:\n"
s += format_columns(docs + ['all (!)'])
s += "(!) Builds everything.\n\n"
if 'reference' in docs:
s+= "Other valid document names take the form 'reference/DIR', where\n"
s+= "DIR is a subdirectory of SAGE_DOC_SRC/en/reference/.\n"
s+= "This builds just the specified part of the reference manual.\n"
s += "DOCUMENT may also have the form 'file=/path/to/FILE', which builds\n"
s += "the documentation for the specified file.\n"
return s
def get_formats():
tut_b = DocBuilder('en/tutorial')
formats = tut_b._output_formats()
formats.remove('html')
return ['html', 'pdf'] + formats
def help_formats(s=""):
s += "FORMATs:\n"
s += format_columns(get_formats())
return s
def help_commands(name='all', s=""):
# To do: Generate the lists dynamically, using class attributes,
# as with the Builders above.
command_dict = { 'reference' : [
'print_included_modules', 'print_modified_modules (*)',
'print_unincluded_modules', 'print_new_and_updated_modules (*)',
] }
for doc in command_dict:
if name == 'all' or doc == name:
s += "COMMANDs for the DOCUMENT '" + doc + "':\n"
s += format_columns(command_dict[doc])
s += "(*) Since the last build.\n"
return s
def help_message_long(option, opt_str, value, parser):
help_funcs = [ help_usage, help_description, help_documents,
help_formats, help_commands, parser.format_option_help,
help_examples ]
for f in help_funcs:
print(f())
sys.exit(0)
def help_message_short(option=None, opt_str=None, value=None, parser=None,
error=False):
if not hasattr(parser.values, 'printed_help'):
if error is True:
if not hasattr(parser.values, 'printed_list'):
parser.print_help()
else:
parser.print_help()
setattr(parser.values, 'printed_help', 1)
def help_wrapper(option, opt_str, value, parser):
if option.dest == 'commands':
print(help_commands(value), end="")
if option.dest == 'documents':
print(help_documents(), end="")
if option.dest == 'formats':
print(help_formats(), end="")
if option.dest == 'all_documents':
if value == 'en/reference' or value == 'reference':
b = ReferenceBuilder('reference')
refdir = os.path.join(os.environ['SAGE_DOC_SRC'], 'en', b.name)
s = b.get_all_documents(refdir)
# Put the bibliography first, because it needs to be built first:
s.remove('reference/references')
s.insert(0, 'reference/references')
elif value == 'all':
s = get_documents()
# Put the reference manual first, because it needs to be built first:
s.remove('reference')
s.insert(0, 'reference')
else:
raise ValueError("argument for --all-documents must be either 'all'"
" or 'reference'")
for d in s:
print(d)
setattr(parser.values, 'printed_list', 1)
class IndentedHelpFormatter2(optparse.IndentedHelpFormatter, object):
def format_description(self, description):
if description:
lines_in = description.split('\n')
lines_out = [self._format_text(line) for line in lines_in]
return "\n".join(lines_out) + "\n"
else:
return ""
def format_heading(self, heading):
if heading.lower() == 'options':
heading = "OPTIONS"
return super(IndentedHelpFormatter2, self).format_heading(heading)
def setup_parser():
# Documentation: http://docs.python.org/library/optparse.html
parser = optparse.OptionParser(add_help_option=False,
usage=help_usage(compact=True),
formatter=IndentedHelpFormatter2(),
description=help_description(compact=True))
# Standard options. Note: We use explicit option.dest names
# to avoid ambiguity.
standard = optparse.OptionGroup(parser, "Standard")
standard.add_option("-h", "--help",
action="callback", callback=help_message_short,
help="show a help message and exit")
standard.add_option("-H", "--help-all",
action="callback", callback=help_message_long,
help="show an extended help message and exit")
standard.add_option("-D", "--documents", dest="documents",
action="callback", callback=help_wrapper,
help="list all available DOCUMENTs")
standard.add_option("-F", "--formats", dest="formats",
action="callback", callback=help_wrapper,
help="list all output FORMATs")
standard.add_option("-C", "--commands", dest="commands",
type="string", metavar="DOC",
action="callback", callback=help_wrapper,
help="list all COMMANDs for DOCUMENT DOC; use 'all' to list all")
standard.add_option("-i", "--inherited", dest="inherited",
default=False, action="store_true",
help="include inherited members in reference manual; may be slow, may fail for PDF output")
standard.add_option("-u", "--underscore", dest="underscore",
default=False, action="store_true",
help="include variables prefixed with '_' in reference manual; may be slow, may fail for PDF output")
standard.add_option("-j", "--mathjax", "--jsmath", dest="mathjax",
action="store_true",
help="render math using MathJax; FORMATs: html, json, pickle, web")
standard.add_option("--no-plot", dest="no_plot",
action="store_true",
help="do not include graphics auto-generated using the '.. plot' markup")
standard.add_option("--include-tests-blocks", dest="skip_tests", default=True,
action="store_false",
help="include TESTS blocks in the reference manual")
standard.add_option("--no-pdf-links", dest="no_pdf_links",
action="store_true",
help="do not include PDF links in DOCUMENT 'website'; FORMATs: html, json, pickle, web")
standard.add_option("--warn-links", dest="warn_links",
default=False, action="store_true",
help="issue a warning whenever a link is not properly resolved; equivalent to '--sphinx-opts -n' (sphinx option: nitpicky)")
standard.add_option("--check-nested", dest="check_nested",
action="store_true",
help="check picklability of nested classes in DOCUMENT 'reference'")
standard.add_option("-N", "--no-colors", dest="color", default=True,
action="store_false",
help="do not color output; does not affect children")
standard.add_option("-q", "--quiet", dest="verbose",
action="store_const", const=0,
help="work quietly; same as --verbose=0")
standard.add_option("-v", "--verbose", dest="verbose",
type="int", default=1, metavar="LEVEL",
action="store",
help="report progress at LEVEL=0 (quiet), 1 (normal), 2 (info), or 3 (debug); does not affect children")
standard.add_option("-o", "--output", dest="output_dir", default=None,
metavar="DIR", action="store",
help="if DOCUMENT is a single file ('file=...'), write output to this directory")
parser.add_option_group(standard)
# Advanced options.
advanced = optparse.OptionGroup(parser, "Advanced",
"Use these options with care.")
advanced.add_option("-S", "--sphinx-opts", dest="sphinx_opts",
type="string", metavar="OPTS",
action="store",
help="pass comma-separated OPTS to sphinx-build")
advanced.add_option("-U", "--update-mtimes", dest="update_mtimes",
default=False, action="store_true",
help="before building reference manual, update modification times for auto-generated reST files")
advanced.add_option("-k", "--keep-going", dest="keep_going",
default=False, action="store_true",
help="Do not abort on errors but continue as much as possible after an error")
advanced.add_option("--all-documents", dest="all_documents",
type="str", metavar="ARG",
action="callback", callback=help_wrapper,
help="if ARG is 'reference', list all subdocuments"
" of en/reference. If ARG is 'all', list all main"
" documents")
parser.add_option_group(advanced)
return parser
def setup_logger(verbose=1, color=True):
# Set up colors. Adapted from sphinx.cmdline.
import sphinx.util.console as c
if not color or not sys.stdout.isatty() or not c.color_terminal():
c.nocolor()
# Available colors: black, darkgray, (dark)red, dark(green),
# brown, yellow, (dark)blue, purple, fuchsia, turquoise, teal,
# lightgray, white. Available styles: reset, bold, faint,
# standout, underline, blink.
# Set up log record formats.
format_std = "%(message)s"
formatter = logging.Formatter(format_std)
# format_debug = "%(module)s #%(lineno)s %(funcName)s() %(message)s"
fields = ['%(module)s', '
colors = ['darkblue', 'darkred', 'brown', 'reset']
styles = ['reset', 'reset', 'reset', 'reset']
format_debug = ""
for i in range(len(fields)):
format_debug += c.colorize(styles[i], c.colorize(colors[i], fields[i]))
if i != len(fields):
format_debug += " "
# Note: There's also Handler.setLevel(). The argument is the
# levels. See the documentation for details.
if verbose == 0:
logger.setLevel(logging.ERROR)
if verbose == 1:
logger.setLevel(logging.WARNING)
if verbose == 2:
logger.setLevel(logging.INFO)
if verbose == 3:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_debug)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
class IntersphinxCache:
def __init__(self):
self.inventories = {}
self.real_fetch_inventory = sphinx.ext.intersphinx.fetch_inventory
sphinx.ext.intersphinx.fetch_inventory = self.fetch_inventory
def fetch_inventory(self, app, uri, inv):
t = (uri, inv)
try:
return self.inventories[t]
except KeyError:
i = self.real_fetch_inventory(app, uri, inv)
self.inventories[t] = i
return i
def main():
# Parse the command-line.
parser = setup_parser()
options, args = parser.parse_args()
DocBuilder._options = options
# Get the name and type (target format) of the document we are
# trying to build.
try:
name, type = args
except ValueError:
help_message_short(parser=parser, error=True)
sys.exit(1)
# Set up module-wide logging.
setup_logger(options.verbose, options.color)
def excepthook(*exc_info):
logger.error('Error building the documentation.', exc_info=exc_info)
if INCREMENTAL_BUILD:
logger.error('''
Note: incremental documentation builds sometimes cause spurious
error messages. To be certain that these are real errors, run
"make doc-clean" first and try again.''')
sys.excepthook = excepthook
# Process selected options.
#
# MathJax: this check usually has no practical effect, since
# SAGE_DOC_MATHJAX is set to "True" by the script sage-env.
# To disable MathJax, set SAGE_DOC_MATHJAX to "no" or "False".
if options.mathjax or (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
os.environ['SAGE_DOC_MATHJAX'] = 'True'
if options.check_nested:
os.environ['SAGE_CHECK_NESTED'] = 'True'
if options.underscore:
os.environ['SAGE_DOC_UNDERSCORE'] = "True"
global ALLSPHINXOPTS, WEBSITESPHINXOPTS, ABORT_ON_ERROR
if options.sphinx_opts:
ALLSPHINXOPTS += options.sphinx_opts.replace(',', ' ') + " "
if options.no_pdf_links:
WEBSITESPHINXOPTS = " -A hide_pdf_links=1 "
if options.warn_links:
ALLSPHINXOPTS += "-n "
if options.no_plot:
os.environ['SAGE_SKIP_PLOT_DIRECTIVE'] = 'yes'
if options.skip_tests:
os.environ['SAGE_SKIP_TESTS_BLOCKS'] = 'True'
ABORT_ON_ERROR = not options.keep_going
# Delete empty directories. This is needed in particular for empty
# directories due to "git checkout" which never deletes empty
# directories it leaves behind. See Trac #20010.
for dirpath, dirnames, filenames in os.walk(SAGE_DOC_SRC, topdown=False):
if not dirnames + filenames:
logger.warning('Deleting empty directory {0}'.format(dirpath))
os.rmdir(dirpath)
# Set up Intersphinx cache
C = IntersphinxCache()
builder = getattr(get_builder(name), type)
builder()
| true | true |
79005bff925180ba5ac9843c76f6cf706d5dd96b | 1,798 | py | Python | python-3.4.4.amd64/Lib/site-packages/numpy/distutils/__config__.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | null | null | null | python-3.4.4.amd64/Lib/site-packages/numpy/distutils/__config__.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | null | null | null | python-3.4.4.amd64/Lib/site-packages/numpy/distutils/__config__.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | null | null | null | # This file is generated by C:\projects\numpy-wheels\numpy\setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
lapack_mkl_info={}
lapack_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
atlas_3_10_blas_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
atlas_3_10_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
openblas_info={}
blas_mkl_info={}
openblas_lapack_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| 62 | 263 | 0.609566 |
__all__ = ["get_info","show"]
blas_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
lapack_mkl_info={}
lapack_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
atlas_3_10_blas_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
atlas_3_10_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
openblas_info={}
blas_mkl_info={}
openblas_lapack_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| true | true |
79005cde81aed1e83afdf30198966a648bc86fc4 | 1,590 | py | Python | binho/accessory.py | binhollc/binho-python-package | 21ea73b03755c4205e93525ed24b4becba23c93e | [
"BSD-3-Clause"
] | 4 | 2021-03-11T12:40:27.000Z | 2022-02-01T10:08:20.000Z | binho/accessory.py | binhollc/binho-python-package | 21ea73b03755c4205e93525ed24b4becba23c93e | [
"BSD-3-Clause"
] | 1 | 2021-11-26T10:20:18.000Z | 2021-11-30T14:25:56.000Z | binho/accessory.py | binhollc/binho-python-package | 21ea73b03755c4205e93525ed24b4becba23c93e | [
"BSD-3-Clause"
] | 2 | 2021-02-28T00:39:39.000Z | 2021-04-05T12:45:56.000Z | #
from binho.errors import DriverCapabilityError
class binhoAccessory:
""" Base class for objects representing accessory boards. """
# Optional: subclasses can set this variable to override their accessory name.
# If not provided, their name will automatically be taken from their class names.
# This typically doesn't need to be overridden.
ACCESSORY_NAME = None
@classmethod
def get_name(cls):
""" Default implementation of a function that returns a class's name. """
# If we have an overridden accessory name, return it.
if cls.ACCESSORY_NAME:
return cls.ACCESSORY_NAME
# Otherwise, return the given class's name.
return cls.__name__
@classmethod
def available_accessories(cls):
""" Returns a list of available neighbors. """
return [accessory.get_name() for accessory in cls.__subclasses__()]
@classmethod
def from_name(cls, name, board, *args, **kwargs):
""" Creates a new binhoAccessory object from its name. """
target_name = name.lower()
for subclass in cls.__subclasses__():
# Grab the class's name, and check to see if it matches ours.
subclass_name = subclass.get_name()
# If this class matches our target name, this is the class we're looking for!
# Create an instance and return it.
if target_name == subclass_name.lower():
return subclass(board, *args, **kwargs)
raise DriverCapabilityError("No known driver for accessory '{}'.".format(name))
| 34.565217 | 89 | 0.657233 |
from binho.errors import DriverCapabilityError
class binhoAccessory:
ACCESSORY_NAME = None
@classmethod
def get_name(cls):
# If we have an overridden accessory name, return it.
if cls.ACCESSORY_NAME:
return cls.ACCESSORY_NAME
# Otherwise, return the given class's name.
return cls.__name__
@classmethod
def available_accessories(cls):
return [accessory.get_name() for accessory in cls.__subclasses__()]
@classmethod
def from_name(cls, name, board, *args, **kwargs):
target_name = name.lower()
for subclass in cls.__subclasses__():
subclass_name = subclass.get_name()
# If this class matches our target name, this is the class we're looking for!
if target_name == subclass_name.lower():
return subclass(board, *args, **kwargs)
raise DriverCapabilityError("No known driver for accessory '{}'.".format(name))
| true | true |
79005dbc1f01642e60729d143c901421213469ca | 2,732 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifyLaunchTemplateDefaultVersionRequest.py | hetw/aliyun-openapi-python-sdk | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | [
"Apache-2.0"
] | 1 | 2020-12-05T03:03:46.000Z | 2020-12-05T03:03:46.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifyLaunchTemplateDefaultVersionRequest.py | hetw/aliyun-openapi-python-sdk | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifyLaunchTemplateDefaultVersionRequest.py | hetw/aliyun-openapi-python-sdk | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class ModifyLaunchTemplateDefaultVersionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifyLaunchTemplateDefaultVersion')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LaunchTemplateName(self):
return self.get_query_params().get('LaunchTemplateName')
def set_LaunchTemplateName(self,LaunchTemplateName):
self.add_query_param('LaunchTemplateName',LaunchTemplateName)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_LaunchTemplateId(self):
return self.get_query_params().get('LaunchTemplateId')
def set_LaunchTemplateId(self,LaunchTemplateId):
self.add_query_param('LaunchTemplateId',LaunchTemplateId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DefaultVersionNumber(self):
return self.get_query_params().get('DefaultVersionNumber')
def set_DefaultVersionNumber(self,DefaultVersionNumber):
self.add_query_param('DefaultVersionNumber',DefaultVersionNumber) | 36.918919 | 87 | 0.786603 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class ModifyLaunchTemplateDefaultVersionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifyLaunchTemplateDefaultVersion')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LaunchTemplateName(self):
return self.get_query_params().get('LaunchTemplateName')
def set_LaunchTemplateName(self,LaunchTemplateName):
self.add_query_param('LaunchTemplateName',LaunchTemplateName)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_LaunchTemplateId(self):
return self.get_query_params().get('LaunchTemplateId')
def set_LaunchTemplateId(self,LaunchTemplateId):
self.add_query_param('LaunchTemplateId',LaunchTemplateId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DefaultVersionNumber(self):
return self.get_query_params().get('DefaultVersionNumber')
def set_DefaultVersionNumber(self,DefaultVersionNumber):
self.add_query_param('DefaultVersionNumber',DefaultVersionNumber) | true | true |
79005df4d98ef9f7d776dc6ceae1987c1fcee100 | 8,010 | py | Python | tools/check_target_files_vintf.py | FabriSC/Alioth-SC | bbe9723401b351c2a34b09a30978373d456d20a2 | [
"MIT"
] | null | null | null | tools/check_target_files_vintf.py | FabriSC/Alioth-SC | bbe9723401b351c2a34b09a30978373d456d20a2 | [
"MIT"
] | null | null | null | tools/check_target_files_vintf.py | FabriSC/Alioth-SC | bbe9723401b351c2a34b09a30978373d456d20a2 | [
"MIT"
] | 1 | 2022-03-30T04:47:35.000Z | 2022-03-30T04:47:35.000Z | #!/usr/bin/env python
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Check VINTF compatibility from a target files package.
Usage: check_target_files_vintf target_files
target_files can be a ZIP file or an extracted target files directory.
"""
import logging
import subprocess
import sys
import os
import zipfile
import common
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
# Keys are paths that VINTF searches. Must keep in sync with libvintf's search
# paths (VintfObject.cpp).
# These paths are stored in different directories in target files package, so
# we have to search for the correct path and tell checkvintf to remap them.
# Look for TARGET_COPY_OUT_* variables in board_config.mk for possible paths for
# each partition.
DIR_SEARCH_PATHS = {
'/system': ('SYSTEM',),
'/vendor': ('VENDOR', 'SYSTEM/vendor'),
'/product': ('PRODUCT', 'SYSTEM/product'),
'/odm': ('ODM', 'VENDOR/odm', 'SYSTEM/vendor/odm'),
'/system_ext': ('SYSTEM_EXT', 'SYSTEM/system_ext'),
# vendor_dlkm does not have VINTF files.
}
UNZIP_PATTERN = ['META/*', '*/build.prop']
def GetDirmap(input_tmp):
dirmap = {}
for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
for target_files_rel_path in target_files_rel_paths:
target_files_path = os.path.join(input_tmp, target_files_rel_path)
if os.path.isdir(target_files_path):
dirmap[device_path] = target_files_path
break
if device_path not in dirmap:
raise ValueError("Can't determine path for device path " + device_path +
". Searched the following:" +
("\n".join(target_files_rel_paths)))
return dirmap
def GetArgsForSkus(info_dict):
odm_skus = info_dict.get('vintf_odm_manifest_skus', '').strip().split()
if info_dict.get('vintf_include_empty_odm_sku', '') == "true" or not odm_skus:
odm_skus += ['']
vendor_skus = info_dict.get('vintf_vendor_manifest_skus', '').strip().split()
if info_dict.get('vintf_include_empty_vendor_sku', '') == "true" or \
not vendor_skus:
vendor_skus += ['']
return [['--property', 'ro.boot.product.hardware.sku=' + odm_sku,
'--property', 'ro.boot.product.vendor.sku=' + vendor_sku]
for odm_sku in odm_skus for vendor_sku in vendor_skus]
def GetArgsForShippingApiLevel(info_dict):
shipping_api_level = info_dict['vendor.build.prop'].GetProp(
'ro.product.first_api_level')
if not shipping_api_level:
logger.warning('Cannot determine ro.product.first_api_level')
return []
return ['--property', 'ro.product.first_api_level=' + shipping_api_level]
def GetArgsForKernel(input_tmp):
version_path = os.path.join(input_tmp, 'META/kernel_version.txt')
config_path = os.path.join(input_tmp, 'META/kernel_configs.txt')
if not os.path.isfile(version_path) or not os.path.isfile(config_path):
logger.info('Skipping kernel config checks because '
'PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS is not set')
return []
with open(version_path) as f:
version = f.read().strip()
return ['--kernel', '{}:{}'.format(version, config_path)]
def CheckVintfFromExtractedTargetFiles(input_tmp, info_dict=None):
"""
Checks VINTF metadata of an extracted target files directory.
Args:
inp: path to the directory that contains the extracted target files archive.
info_dict: The build-time info dict. If None, it will be loaded from inp.
Returns:
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
if info_dict is None:
info_dict = common.LoadInfoDict(input_tmp)
if info_dict.get('vintf_enforce') != 'true':
logger.warning('PRODUCT_ENFORCE_VINTF_MANIFEST is not set, skipping checks')
return True
dirmap = GetDirmap(input_tmp)
args_for_skus = GetArgsForSkus(info_dict)
shipping_api_level_args = GetArgsForShippingApiLevel(info_dict)
kernel_args = GetArgsForKernel(input_tmp)
common_command = [
'checkvintf',
'--check-compat',
]
for device_path, real_path in dirmap.items():
common_command += ['--dirmap', '{}:{}'.format(device_path, real_path)]
common_command += kernel_args
common_command += shipping_api_level_args
success = True
for sku_args in args_for_skus:
command = common_command + sku_args
proc = common.Run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode == 0:
logger.info("Command `%s` returns 'compatible'", ' '.join(command))
elif out.strip() == "INCOMPATIBLE":
logger.info("Command `%s` returns 'incompatible'", ' '.join(command))
success = False
else:
raise common.ExternalError(
"Failed to run command '{}' (exit code {}):\nstdout:{}\nstderr:{}"
.format(' '.join(command), proc.returncode, out, err))
logger.info("stdout: %s", out)
logger.info("stderr: %s", err)
return success
def GetVintfFileList():
"""
Returns a list of VINTF metadata files that should be read from a target files
package before executing checkvintf.
"""
def PathToPatterns(path):
if path[-1] == '/':
path += '*'
for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
if path.startswith(device_path):
suffix = path[len(device_path):]
return [rel_path + suffix for rel_path in target_files_rel_paths]
raise RuntimeError('Unrecognized path from checkvintf --dump-file-list: ' +
path)
out = common.RunAndCheckOutput(['checkvintf', '--dump-file-list'])
paths = out.strip().split('\n')
paths = sum((PathToPatterns(path) for path in paths if path), [])
return paths
def CheckVintfFromTargetFiles(inp, info_dict=None):
"""
Checks VINTF metadata of a target files zip.
Args:
inp: path to the target files archive.
info_dict: The build-time info dict. If None, it will be loaded from inp.
Returns:
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
input_tmp = common.UnzipTemp(inp, GetVintfFileList() + UNZIP_PATTERN)
return CheckVintfFromExtractedTargetFiles(input_tmp, info_dict)
def CheckVintf(inp, info_dict=None):
"""
Checks VINTF metadata of a target files zip or extracted target files
directory.
Args:
inp: path to the (possibly extracted) target files archive.
info_dict: The build-time info dict. If None, it will be loaded from inp.
Returns:
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
if os.path.isdir(inp):
logger.info('Checking VINTF compatibility extracted target files...')
return CheckVintfFromExtractedTargetFiles(inp, info_dict)
if zipfile.is_zipfile(inp):
logger.info('Checking VINTF compatibility target files...')
return CheckVintfFromTargetFiles(inp, info_dict)
raise ValueError('{} is not a valid directory or zip file'.format(inp))
def main(argv):
args = common.ParseOptions(argv, __doc__)
if len(args) != 1:
common.Usage(__doc__)
sys.exit(1)
common.InitLogging()
if not CheckVintf(args[0]):
sys.exit(1)
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError:
logger.exception('\n ERROR:\n')
sys.exit(1)
finally:
common.Cleanup()
| 32.962963 | 80 | 0.705243 |
import logging
import subprocess
import sys
import os
import zipfile
import common
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
# paths (VintfObject.cpp).
# These paths are stored in different directories in target files package, so
# we have to search for the correct path and tell checkvintf to remap them.
# Look for TARGET_COPY_OUT_* variables in board_config.mk for possible paths for
# each partition.
DIR_SEARCH_PATHS = {
'/system': ('SYSTEM',),
'/vendor': ('VENDOR', 'SYSTEM/vendor'),
'/product': ('PRODUCT', 'SYSTEM/product'),
'/odm': ('ODM', 'VENDOR/odm', 'SYSTEM/vendor/odm'),
'/system_ext': ('SYSTEM_EXT', 'SYSTEM/system_ext'),
# vendor_dlkm does not have VINTF files.
}
UNZIP_PATTERN = ['META/*', '*/build.prop']
def GetDirmap(input_tmp):
dirmap = {}
for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
for target_files_rel_path in target_files_rel_paths:
target_files_path = os.path.join(input_tmp, target_files_rel_path)
if os.path.isdir(target_files_path):
dirmap[device_path] = target_files_path
break
if device_path not in dirmap:
raise ValueError("Can't determine path for device path " + device_path +
". Searched the following:" +
("\n".join(target_files_rel_paths)))
return dirmap
def GetArgsForSkus(info_dict):
odm_skus = info_dict.get('vintf_odm_manifest_skus', '').strip().split()
if info_dict.get('vintf_include_empty_odm_sku', '') == "true" or not odm_skus:
odm_skus += ['']
vendor_skus = info_dict.get('vintf_vendor_manifest_skus', '').strip().split()
if info_dict.get('vintf_include_empty_vendor_sku', '') == "true" or \
not vendor_skus:
vendor_skus += ['']
return [['--property', 'ro.boot.product.hardware.sku=' + odm_sku,
'--property', 'ro.boot.product.vendor.sku=' + vendor_sku]
for odm_sku in odm_skus for vendor_sku in vendor_skus]
def GetArgsForShippingApiLevel(info_dict):
shipping_api_level = info_dict['vendor.build.prop'].GetProp(
'ro.product.first_api_level')
if not shipping_api_level:
logger.warning('Cannot determine ro.product.first_api_level')
return []
return ['--property', 'ro.product.first_api_level=' + shipping_api_level]
def GetArgsForKernel(input_tmp):
version_path = os.path.join(input_tmp, 'META/kernel_version.txt')
config_path = os.path.join(input_tmp, 'META/kernel_configs.txt')
if not os.path.isfile(version_path) or not os.path.isfile(config_path):
logger.info('Skipping kernel config checks because '
'PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS is not set')
return []
with open(version_path) as f:
version = f.read().strip()
return ['--kernel', '{}:{}'.format(version, config_path)]
def CheckVintfFromExtractedTargetFiles(input_tmp, info_dict=None):
if info_dict is None:
info_dict = common.LoadInfoDict(input_tmp)
if info_dict.get('vintf_enforce') != 'true':
logger.warning('PRODUCT_ENFORCE_VINTF_MANIFEST is not set, skipping checks')
return True
dirmap = GetDirmap(input_tmp)
args_for_skus = GetArgsForSkus(info_dict)
shipping_api_level_args = GetArgsForShippingApiLevel(info_dict)
kernel_args = GetArgsForKernel(input_tmp)
common_command = [
'checkvintf',
'--check-compat',
]
for device_path, real_path in dirmap.items():
common_command += ['--dirmap', '{}:{}'.format(device_path, real_path)]
common_command += kernel_args
common_command += shipping_api_level_args
success = True
for sku_args in args_for_skus:
command = common_command + sku_args
proc = common.Run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode == 0:
logger.info("Command `%s` returns 'compatible'", ' '.join(command))
elif out.strip() == "INCOMPATIBLE":
logger.info("Command `%s` returns 'incompatible'", ' '.join(command))
success = False
else:
raise common.ExternalError(
"Failed to run command '{}' (exit code {}):\nstdout:{}\nstderr:{}"
.format(' '.join(command), proc.returncode, out, err))
logger.info("stdout: %s", out)
logger.info("stderr: %s", err)
return success
def GetVintfFileList():
def PathToPatterns(path):
if path[-1] == '/':
path += '*'
for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
if path.startswith(device_path):
suffix = path[len(device_path):]
return [rel_path + suffix for rel_path in target_files_rel_paths]
raise RuntimeError('Unrecognized path from checkvintf --dump-file-list: ' +
path)
out = common.RunAndCheckOutput(['checkvintf', '--dump-file-list'])
paths = out.strip().split('\n')
paths = sum((PathToPatterns(path) for path in paths if path), [])
return paths
def CheckVintfFromTargetFiles(inp, info_dict=None):
input_tmp = common.UnzipTemp(inp, GetVintfFileList() + UNZIP_PATTERN)
return CheckVintfFromExtractedTargetFiles(input_tmp, info_dict)
def CheckVintf(inp, info_dict=None):
if os.path.isdir(inp):
logger.info('Checking VINTF compatibility extracted target files...')
return CheckVintfFromExtractedTargetFiles(inp, info_dict)
if zipfile.is_zipfile(inp):
logger.info('Checking VINTF compatibility target files...')
return CheckVintfFromTargetFiles(inp, info_dict)
raise ValueError('{} is not a valid directory or zip file'.format(inp))
def main(argv):
args = common.ParseOptions(argv, __doc__)
if len(args) != 1:
common.Usage(__doc__)
sys.exit(1)
common.InitLogging()
if not CheckVintf(args[0]):
sys.exit(1)
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError:
logger.exception('\n ERROR:\n')
sys.exit(1)
finally:
common.Cleanup()
| true | true |
7900604d6ef7fafe622d669f90edbb214fa1197d | 3,305 | py | Python | hphp/tools/gdb/lookup.py | donsbot/hhvm | ac98a590f75c569e1249b6c1145c7512c7bd240e | [
"PHP-3.01",
"Zend-2.0"
] | 9,491 | 2015-01-01T00:30:28.000Z | 2022-03-31T20:22:11.000Z | hphp/tools/gdb/lookup.py | donsbot/hhvm | ac98a590f75c569e1249b6c1145c7512c7bd240e | [
"PHP-3.01",
"Zend-2.0"
] | 4,796 | 2015-01-01T00:26:31.000Z | 2022-03-31T01:09:05.000Z | hphp/tools/gdb/lookup.py | donsbot/hhvm | ac98a590f75c569e1249b6c1145c7512c7bd240e | [
"PHP-3.01",
"Zend-2.0"
] | 2,126 | 2015-01-01T11:13:29.000Z | 2022-03-28T19:58:15.000Z | #!/usr/bin/env python3
"""
GDB commands for various HHVM ID lookups.
"""
from compatibility import *
import gdb
import idx
import unit
from gdbutils import *
#------------------------------------------------------------------------------
# `lookup' command.
class LookupCommand(gdb.Command):
"""Lookup HHVM runtime objects by ID."""
def __init__(self):
super(LookupCommand, self).__init__('lookup', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE, True)
LookupCommand()
#------------------------------------------------------------------------------
# `lookup func' command.
def lookup_func(val):
funcid = val.cast(T('HPHP::FuncId'))
try:
# Not LowPtr
result = idx.atomic_low_ptr_vector_at(V('HPHP::Func::s_funcVec'), funcid['m_id'])
return result.cast(T('HPHP::Func').pointer())
except gdb.MemoryError:
raise
except:
# LowPtr
return rawptr(funcid['m_id']).cast(T('HPHP::Func').pointer())
def lookup_func_from_fp(fp):
return lookup_func(fp['m_funcId'])
class LookupFuncCommand(gdb.Command):
"""Lookup a Func* by its FuncId."""
def __init__(self):
super(LookupFuncCommand, self).__init__('lookup func',
gdb.COMMAND_DATA)
@errorwrap
def invoke(self, args, from_tty):
argv = parse_argv(args)
if len(argv) != 1:
print('Usage: lookup func <FuncId>')
return
gdbprint(lookup_func(argv[0]))
class LookupFuncFunction(gdb.Function):
def __init__(self):
super(LookupFuncFunction, self).__init__('lookup_func')
@errorwrap
def invoke(self, val):
return lookup_func(val)
LookupFuncCommand()
LookupFuncFunction()
#------------------------------------------------------------------------------
# `lookup litstr' command.
def lookup_litstr(litstr_id, u):
uloff = V('HPHP::kUnitLitstrOffset')
if litstr_id < uloff:
u = V('HPHP::LitstrTable::s_litstrTable')
else:
litstr_id -= uloff
u = u.cast(T('HPHP::UnitExtended').pointer())
val = u['m_namedInfo']
# get the base type
ty = val.type.fields()[0].type
val = val.address.cast(ty.pointer()).dereference()
elm = idx.compact_vector_at(val, litstr_id)
ty = elm.type.template_argument(0)
return elm.address.cast(ty.pointer()).dereference()
class LookupLitstrCommand(gdb.Command):
"""Lookup a litstr StringData* by its Id and Unit*.
If no Unit is given, the current unit (set by `unit') is used.
"""
def __init__(self):
super(LookupLitstrCommand, self).__init__('lookup litstr',
gdb.COMMAND_DATA)
@errorwrap
def invoke(self, args, from_tty):
argv = parse_argv(args)
if len(argv) == 0 or len(argv) > 2:
print('Usage: lookup litstr <Id> [Unit*]')
return
if len(argv) == 1:
if unit.curunit is None:
print('lookup litstr: No Unit set or provided.')
u = curunit
u = argv[0].cast(T('HPHP::Unit').pointer())
litstr_id = argv[1].cast(T('HPHP::Id'))
litstr = lookup_litstr(litstr_id, u)
gdbprint(litstr)
LookupLitstrCommand()
| 25.037879 | 89 | 0.556732 |
from compatibility import *
import gdb
import idx
import unit
from gdbutils import *
class LookupCommand(gdb.Command):
def __init__(self):
super(LookupCommand, self).__init__('lookup', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE, True)
LookupCommand()
#------------------------------------------------------------------------------
# `lookup func' command.
def lookup_func(val):
funcid = val.cast(T('HPHP::FuncId'))
try:
result = idx.atomic_low_ptr_vector_at(V('HPHP::Func::s_funcVec'), funcid['m_id'])
return result.cast(T('HPHP::Func').pointer())
except gdb.MemoryError:
raise
except:
return rawptr(funcid['m_id']).cast(T('HPHP::Func').pointer())
def lookup_func_from_fp(fp):
return lookup_func(fp['m_funcId'])
class LookupFuncCommand(gdb.Command):
def __init__(self):
super(LookupFuncCommand, self).__init__('lookup func',
gdb.COMMAND_DATA)
@errorwrap
def invoke(self, args, from_tty):
argv = parse_argv(args)
if len(argv) != 1:
print('Usage: lookup func <FuncId>')
return
gdbprint(lookup_func(argv[0]))
class LookupFuncFunction(gdb.Function):
def __init__(self):
super(LookupFuncFunction, self).__init__('lookup_func')
@errorwrap
def invoke(self, val):
return lookup_func(val)
LookupFuncCommand()
LookupFuncFunction()
def lookup_litstr(litstr_id, u):
uloff = V('HPHP::kUnitLitstrOffset')
if litstr_id < uloff:
u = V('HPHP::LitstrTable::s_litstrTable')
else:
litstr_id -= uloff
u = u.cast(T('HPHP::UnitExtended').pointer())
val = u['m_namedInfo']
# get the base type
ty = val.type.fields()[0].type
val = val.address.cast(ty.pointer()).dereference()
elm = idx.compact_vector_at(val, litstr_id)
ty = elm.type.template_argument(0)
return elm.address.cast(ty.pointer()).dereference()
class LookupLitstrCommand(gdb.Command):
def __init__(self):
super(LookupLitstrCommand, self).__init__('lookup litstr',
gdb.COMMAND_DATA)
@errorwrap
def invoke(self, args, from_tty):
argv = parse_argv(args)
if len(argv) == 0 or len(argv) > 2:
print('Usage: lookup litstr <Id> [Unit*]')
return
if len(argv) == 1:
if unit.curunit is None:
print('lookup litstr: No Unit set or provided.')
u = curunit
u = argv[0].cast(T('HPHP::Unit').pointer())
litstr_id = argv[1].cast(T('HPHP::Id'))
litstr = lookup_litstr(litstr_id, u)
gdbprint(litstr)
LookupLitstrCommand()
| true | true |
7900622f7acf4abfec0043a52ff26e93acf0400d | 653 | py | Python | packages/openshift/__init__.py | mhcurlee/openshift-client-python | f8013715d51afcd51c5ab8dd95ee0e2f9f21bb15 | [
"Apache-2.0"
] | null | null | null | packages/openshift/__init__.py | mhcurlee/openshift-client-python | f8013715d51afcd51c5ab8dd95ee0e2f9f21bb15 | [
"Apache-2.0"
] | null | null | null | packages/openshift/__init__.py | mhcurlee/openshift-client-python | f8013715d51afcd51c5ab8dd95ee0e2f9f21bb15 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from .context import *
from .base_verbs import *
from .model import OpenShiftPythonException
from .model import Model, Missing
from .selector import *
from .apiobject import *
from . import naming
from . import status
from . import config
from .ansible import ansible
# Single source for module version
__VERSION__ = '1.0.12'
null = None # Allow scripts to specify null in object definitions
# Allows modules to trigger errors
def error(msg, **kwargs):
raise OpenShiftPythonException(msg, **kwargs)
# Convenience method for accessing the module version
def get_module_version():
return __VERSION__
| 23.321429 | 66 | 0.777948 | from __future__ import absolute_import
from .context import *
from .base_verbs import *
from .model import OpenShiftPythonException
from .model import Model, Missing
from .selector import *
from .apiobject import *
from . import naming
from . import status
from . import config
from .ansible import ansible
__VERSION__ = '1.0.12'
null = None
def error(msg, **kwargs):
raise OpenShiftPythonException(msg, **kwargs)
def get_module_version():
return __VERSION__
| true | true |
7900628bbccf6adf32515d2b3107f004af0d0083 | 3,046 | py | Python | pie_pie_chart.py | mmachenry/pie-pie-chart | d5706c85381b58a3990a20021f6c35c28ee51e0b | [
"MIT"
] | 10 | 2019-03-15T17:10:03.000Z | 2020-03-15T04:44:55.000Z | pie_pie_chart.py | mmachenry/pie-pie-chart | d5706c85381b58a3990a20021f6c35c28ee51e0b | [
"MIT"
] | null | null | null | pie_pie_chart.py | mmachenry/pie-pie-chart | d5706c85381b58a3990a20021f6c35c28ee51e0b | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import hx711
import matplotlib.pyplot as plt
# Read initial calibration and tare weight data then display the plot.
def main():
GPIO.setmode(GPIO.BCM)
hx = hx711.HX711(dout_pin=5, pd_sck_pin=6)
zero_the_scale(hx)
calibrate_scale(hx)
(tare_weight, total_weight) = get_tare_and_full_weight(hx)
plot_reading(hx, tare_weight, total_weight - tare_weight)
# Set scale position to zero. The scale should be empty when this is run.
def zero_the_scale(hx):
err = hx.zero()
if err:
raise ValueError('Tare is unsuccessful.')
zero_reading = hx.get_raw_data_mean()
if zero_reading:
print('Data subtracted by offset: ', zero_reading)
else:
raise ValueError('Invalide zero reading')
# Calibrate the scale with prompts to the user.
def calibrate_scale (hx):
input('Put known weight on the scale and then press Enter')
reading = hx.get_data_mean()
if reading:
print('Mean value from HX711 subtracted by offset:', reading)
user_input = input('Write how many grams it was and press Enter: ')
try:
weight = float(user_input)
print(weight, 'grams')
except ValueError:
print('Expected integer or float and I have got:', user_input)
ratio = reading / weight
hx.set_scale_ratio(ratio)
print('Ratio is set.')
else:
raise ValueError('Cannot calculate mean value.')
# Prompt user and get readings for the tare weight and full pie.
def get_tare_and_full_weight (hx):
input('Put the pie tin on the scale for tare weight and press enter.')
tare_weight = hx.get_weight_mean(20)
print ("Tare weight is ", tare_weight, "g")
input('Put the pie on the scale for a full weight and press enter.')
total_weight = hx.get_weight_mean(20)
print ("Full weight is ", total_weight, "g")
return (tare_weight, total_weight)
# Continually read data from the sensor, update the pie chart, and display.
def plot_reading (hx, tare_weight, full_weight):
while True:
current_weight = hx.get_weight_mean(20)
remaining_weight = max(0,current_weight - tare_weight)
#print ("Current weight is ", current_weight, "g")
labels = ['Remaining', 'Eaten']
sizes = [remaining_weight, max(0,full_weight - remaining_weight)]
colors = ['sandybrown', 'lightgrey']
explode = (0, 0.1)
title_font = { 'color': 'blue', 'weight': 'bold', 'size': 30 }
label_font = { 'color': 'black', 'weight': 'normal', 'size': 20 }
h = plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=180,
textprops=label_font)
plt.title("Pi Day Pie Pie Chart", title_font)
plt.plot()
plt.draw()
plt.pause(1)
plt.clf()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print('Happy Pi Day!')
finally:
GPIO.cleanup()
| 33.108696 | 75 | 0.645765 | import RPi.GPIO as GPIO
import hx711
import matplotlib.pyplot as plt
def main():
GPIO.setmode(GPIO.BCM)
hx = hx711.HX711(dout_pin=5, pd_sck_pin=6)
zero_the_scale(hx)
calibrate_scale(hx)
(tare_weight, total_weight) = get_tare_and_full_weight(hx)
plot_reading(hx, tare_weight, total_weight - tare_weight)
def zero_the_scale(hx):
err = hx.zero()
if err:
raise ValueError('Tare is unsuccessful.')
zero_reading = hx.get_raw_data_mean()
if zero_reading:
print('Data subtracted by offset: ', zero_reading)
else:
raise ValueError('Invalide zero reading')
def calibrate_scale (hx):
input('Put known weight on the scale and then press Enter')
reading = hx.get_data_mean()
if reading:
print('Mean value from HX711 subtracted by offset:', reading)
user_input = input('Write how many grams it was and press Enter: ')
try:
weight = float(user_input)
print(weight, 'grams')
except ValueError:
print('Expected integer or float and I have got:', user_input)
ratio = reading / weight
hx.set_scale_ratio(ratio)
print('Ratio is set.')
else:
raise ValueError('Cannot calculate mean value.')
def get_tare_and_full_weight (hx):
input('Put the pie tin on the scale for tare weight and press enter.')
tare_weight = hx.get_weight_mean(20)
print ("Tare weight is ", tare_weight, "g")
input('Put the pie on the scale for a full weight and press enter.')
total_weight = hx.get_weight_mean(20)
print ("Full weight is ", total_weight, "g")
return (tare_weight, total_weight)
def plot_reading (hx, tare_weight, full_weight):
while True:
current_weight = hx.get_weight_mean(20)
remaining_weight = max(0,current_weight - tare_weight)
labels = ['Remaining', 'Eaten']
sizes = [remaining_weight, max(0,full_weight - remaining_weight)]
colors = ['sandybrown', 'lightgrey']
explode = (0, 0.1)
title_font = { 'color': 'blue', 'weight': 'bold', 'size': 30 }
label_font = { 'color': 'black', 'weight': 'normal', 'size': 20 }
h = plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=180,
textprops=label_font)
plt.title("Pi Day Pie Pie Chart", title_font)
plt.plot()
plt.draw()
plt.pause(1)
plt.clf()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print('Happy Pi Day!')
finally:
GPIO.cleanup()
| true | true |
790062ffb0c8b5195bd47e619cf2e6c08a137f45 | 973 | py | Python | examples/local/main.py | francisso/rembrain_robotframework | bd54a8c51775da3517760c4060edda8c28f5e75d | [
"MIT"
] | null | null | null | examples/local/main.py | francisso/rembrain_robotframework | bd54a8c51775da3517760c4060edda8c28f5e75d | [
"MIT"
] | null | null | null | examples/local/main.py | francisso/rembrain_robotframework | bd54a8c51775da3517760c4060edda8c28f5e75d | [
"MIT"
] | null | null | null | import os
import sys
from envyaml import EnvYAML
# Adding the repository root to the sys path so exports work properly
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
from examples.common.processes import GUIProcess, ImageCapture, YoloImageProcessor # noqa: E402
from rembrain_robot_framework import RobotDispatcher # noqa: E402
def run_dispatcher():
process_map = {
"gui": GUIProcess,
"image_capture": ImageCapture,
"processor": YoloImageProcessor,
}
config = EnvYAML(os.path.join(os.path.dirname(__file__), "config", "processes_config.yaml"))
processes = {p: {"process_class": process_map[p]} for p in config["processes"]}
robot_dispatcher = RobotDispatcher(config, processes, in_cluster=False)
robot_dispatcher.start_processes()
robot_dispatcher.run(robot_dispatcher.shared_objects["exit_flag"])
robot_dispatcher.stop_logging()
if __name__ == "__main__":
run_dispatcher()
| 31.387097 | 96 | 0.727646 | import os
import sys
from envyaml import EnvYAML
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
from examples.common.processes import GUIProcess, ImageCapture, YoloImageProcessor
from rembrain_robot_framework import RobotDispatcher
def run_dispatcher():
process_map = {
"gui": GUIProcess,
"image_capture": ImageCapture,
"processor": YoloImageProcessor,
}
config = EnvYAML(os.path.join(os.path.dirname(__file__), "config", "processes_config.yaml"))
processes = {p: {"process_class": process_map[p]} for p in config["processes"]}
robot_dispatcher = RobotDispatcher(config, processes, in_cluster=False)
robot_dispatcher.start_processes()
robot_dispatcher.run(robot_dispatcher.shared_objects["exit_flag"])
robot_dispatcher.stop_logging()
if __name__ == "__main__":
run_dispatcher()
| true | true |
7900636809ad5b945d66bebe9c60870118d93c4b | 7,769 | py | Python | vumi/transports/xmpp/xmpp.py | rapidsms/vumi | f15c101b599cc1283c84592e8707b6a929f67cbd | [
"BSD-3-Clause"
] | null | null | null | vumi/transports/xmpp/xmpp.py | rapidsms/vumi | f15c101b599cc1283c84592e8707b6a929f67cbd | [
"BSD-3-Clause"
] | null | null | null | vumi/transports/xmpp/xmpp.py | rapidsms/vumi | f15c101b599cc1283c84592e8707b6a929f67cbd | [
"BSD-3-Clause"
] | 2 | 2018-03-05T18:01:45.000Z | 2019-11-02T19:34:18.000Z | # -*- test-case-name: vumi.transports.xmpp.tests.test_xmpp -*-
# -*- encoding: utf-8 -*-
from twisted.python import log
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.domish import Element as DomishElement
from twisted.internet.task import LoopingCall
from twisted.internet.defer import inlineCallbacks
from wokkel.client import XMPPClient
from wokkel.ping import PingClientProtocol
from wokkel.xmppim import (RosterClientProtocol, MessageProtocol,
PresenceClientProtocol)
from vumi.transports.base import Transport
class TransportRosterClientProtocol(RosterClientProtocol):
def connectionInitialized(self):
# get the roster as soon as the connection's been initialized, this
# allows us to see who's online but more importantly, allows us to see
# who's added us to their roster. This allows us to auto subscribe to
# anyone, automatically adding them to our roster, skips the "user ...
# wants to add you to their roster, allow? yes/no" hoopla.
self.getRoster()
class TransportPresenceClientProtocol(PresenceClientProtocol):
"""
A custom presence protocol to automatically accept any subscription
attempt.
"""
def __init__(self, initialized_callback, *args, **kwargs):
super(TransportPresenceClientProtocol, self).__init__(*args, **kwargs)
self.initialized_callback = initialized_callback
def connectionInitialized(self):
super(TransportPresenceClientProtocol, self).connectionInitialized()
self.initialized_callback()
def subscribeReceived(self, entity):
self.subscribe(entity)
self.subscribed(entity)
def unsubscribeReceived(self, entity):
self.unsubscribe(entity)
self.unsubscribed(entity)
class XMPPTransportProtocol(MessageProtocol, object):
def __init__(self, jid, message_callback, connection_callback,
connection_lost_callback=None,):
super(MessageProtocol, self).__init__()
self.jid = jid
self.message_callback = message_callback
self.connection_callback = connection_callback
self.connection_lost_callback = connection_lost_callback
def reply(self, jid, content):
message = domish.Element((None, "message"))
# intentionally leaving from blank, leaving for XMPP server
# to figure out
message['to'] = jid
message['type'] = 'chat'
message.addUniqueId()
message.addElement((None, 'body'), content=content)
self.xmlstream.send(message)
def onMessage(self, message):
"""Messages sent to the bot will arrive here. Command handling routing
is done in this function."""
if not isinstance(message.body, DomishElement):
return None
text = unicode(message.body).encode('utf-8').strip()
from_addr, _, _ = message['from'].partition('/')
self.message_callback(
to_addr=self.jid.userhost(),
from_addr=from_addr,
content=text,
transport_type='xmpp',
transport_metadata={
'xmpp_id': message.getAttribute('id'),
})
def connectionMade(self):
self.connection_callback()
return super(XMPPTransportProtocol, self).connectionMade()
def connectionLost(self, reason):
if self.connection_lost_callback is not None:
self.connection_lost_callback(reason)
log.msg("XMPP Connection lost.")
super(XMPPTransportProtocol, self).connectionLost(reason)
class XMPPTransport(Transport):
"""XMPP transport.
Configuration parameters:
:type host: str
:param host:
The host of the XMPP server to connect to.
:type port: int
:param port:
The port on the XMPP host to connect to.
:type debug: bool
:param debug:
Whether or not to show all the XMPP traffic. Defaults to False.
:type username: str
:param username:
The XMPP account username
:type password: str
:param password:
The XMPP account password
:type status: str
:param status:
The XMPP status 'away', 'xa', 'chat' or 'dnd'
:type status_message: str
:param status_message:
The natural language status message for this XMPP transport.
:type presence_interval: int
:param presence_interval:
How often (in seconds) to send a presence update to the roster.
:type ping_interval: int
:param ping_interval:
How often (in seconds) to send a keep-alive ping to the XMPP server
to keep the connection alive. Defaults to 60 seconds.
"""
start_message_consumer = False
_xmpp_protocol = XMPPTransportProtocol
_xmpp_client = XMPPClient
def __init__(self, options, config=None):
super(XMPPTransport, self).__init__(options, config=config)
self.ping_call = LoopingCall(self.send_ping)
self.presence_call = LoopingCall(self.send_presence)
def validate_config(self):
self.host = self.config['host']
self.port = int(self.config['port'])
self.debug = self.config.get('debug', False)
self.username = self.config['username']
self.password = self.config['password']
self.status = self.config['status']
self.status_message = self.config.get('status_message', '')
self.ping_interval = self.config.get('ping_interval', 60)
self.presence_interval = self.config.get('presence_interval', 60)
def setup_transport(self):
log.msg("Starting XMPPTransport: %s" % self.transport_name)
self.jid = JID(self.username)
self.xmpp_client = self._xmpp_client(self.jid, self.password,
self.host, self.port)
self.xmpp_client.logTraffic = self.debug
self.xmpp_client.setServiceParent(self)
self.presence = TransportPresenceClientProtocol(self.announce_presence)
self.presence.setHandlerParent(self.xmpp_client)
self.pinger = PingClientProtocol()
self.pinger.setHandlerParent(self.xmpp_client)
self.ping_call.start(self.ping_interval, now=False)
roster = TransportRosterClientProtocol()
roster.setHandlerParent(self.xmpp_client)
self.xmpp_protocol = self._xmpp_protocol(
self.jid, self.publish_message, self.message_consumer.unpause)
self.xmpp_protocol.setHandlerParent(self.xmpp_client)
log.msg("XMPPTransport %s started." % self.transport_name)
def announce_presence(self):
if not self.presence_call.running:
self.presence_call.start(self.presence_interval)
@inlineCallbacks
def send_ping(self):
if self.xmpp_client.xmlstream:
yield self.pinger.ping(self.jid)
def send_presence(self):
if self.xmpp_client.xmlstream:
self.presence.available(statuses={
None: self.status})
def teardown_transport(self):
log.msg("XMPPTransport %s stopped." % self.transport_name)
ping_call = getattr(self, 'ping_call', None)
if ping_call and ping_call.running:
ping_call.stop()
presence_call = getattr(self, 'presence_call', None)
if presence_call and presence_call.running:
presence_call.stop()
def handle_outbound_message(self, message):
recipient = message['to_addr']
text = message['content']
jid = JID(recipient).userhost()
if not self.xmpp_protocol.xmlstream:
log.err("Outbound undeliverable, XMPP not initialized yet.")
return False
else:
self.xmpp_protocol.reply(jid, text)
| 36.474178 | 79 | 0.671644 |
from twisted.python import log
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.domish import Element as DomishElement
from twisted.internet.task import LoopingCall
from twisted.internet.defer import inlineCallbacks
from wokkel.client import XMPPClient
from wokkel.ping import PingClientProtocol
from wokkel.xmppim import (RosterClientProtocol, MessageProtocol,
PresenceClientProtocol)
from vumi.transports.base import Transport
class TransportRosterClientProtocol(RosterClientProtocol):
def connectionInitialized(self):
# allows us to see who's online but more importantly, allows us to see
# anyone, automatically adding them to our roster, skips the "user ...
# wants to add you to their roster, allow? yes/no" hoopla.
self.getRoster()
class TransportPresenceClientProtocol(PresenceClientProtocol):
def __init__(self, initialized_callback, *args, **kwargs):
super(TransportPresenceClientProtocol, self).__init__(*args, **kwargs)
self.initialized_callback = initialized_callback
def connectionInitialized(self):
super(TransportPresenceClientProtocol, self).connectionInitialized()
self.initialized_callback()
def subscribeReceived(self, entity):
self.subscribe(entity)
self.subscribed(entity)
def unsubscribeReceived(self, entity):
self.unsubscribe(entity)
self.unsubscribed(entity)
class XMPPTransportProtocol(MessageProtocol, object):
def __init__(self, jid, message_callback, connection_callback,
connection_lost_callback=None,):
super(MessageProtocol, self).__init__()
self.jid = jid
self.message_callback = message_callback
self.connection_callback = connection_callback
self.connection_lost_callback = connection_lost_callback
def reply(self, jid, content):
message = domish.Element((None, "message"))
# intentionally leaving from blank, leaving for XMPP server
# to figure out
message['to'] = jid
message['type'] = 'chat'
message.addUniqueId()
message.addElement((None, 'body'), content=content)
self.xmlstream.send(message)
def onMessage(self, message):
if not isinstance(message.body, DomishElement):
return None
text = unicode(message.body).encode('utf-8').strip()
from_addr, _, _ = message['from'].partition('/')
self.message_callback(
to_addr=self.jid.userhost(),
from_addr=from_addr,
content=text,
transport_type='xmpp',
transport_metadata={
'xmpp_id': message.getAttribute('id'),
})
def connectionMade(self):
self.connection_callback()
return super(XMPPTransportProtocol, self).connectionMade()
def connectionLost(self, reason):
if self.connection_lost_callback is not None:
self.connection_lost_callback(reason)
log.msg("XMPP Connection lost.")
super(XMPPTransportProtocol, self).connectionLost(reason)
class XMPPTransport(Transport):
start_message_consumer = False
_xmpp_protocol = XMPPTransportProtocol
_xmpp_client = XMPPClient
def __init__(self, options, config=None):
super(XMPPTransport, self).__init__(options, config=config)
self.ping_call = LoopingCall(self.send_ping)
self.presence_call = LoopingCall(self.send_presence)
def validate_config(self):
self.host = self.config['host']
self.port = int(self.config['port'])
self.debug = self.config.get('debug', False)
self.username = self.config['username']
self.password = self.config['password']
self.status = self.config['status']
self.status_message = self.config.get('status_message', '')
self.ping_interval = self.config.get('ping_interval', 60)
self.presence_interval = self.config.get('presence_interval', 60)
def setup_transport(self):
log.msg("Starting XMPPTransport: %s" % self.transport_name)
self.jid = JID(self.username)
self.xmpp_client = self._xmpp_client(self.jid, self.password,
self.host, self.port)
self.xmpp_client.logTraffic = self.debug
self.xmpp_client.setServiceParent(self)
self.presence = TransportPresenceClientProtocol(self.announce_presence)
self.presence.setHandlerParent(self.xmpp_client)
self.pinger = PingClientProtocol()
self.pinger.setHandlerParent(self.xmpp_client)
self.ping_call.start(self.ping_interval, now=False)
roster = TransportRosterClientProtocol()
roster.setHandlerParent(self.xmpp_client)
self.xmpp_protocol = self._xmpp_protocol(
self.jid, self.publish_message, self.message_consumer.unpause)
self.xmpp_protocol.setHandlerParent(self.xmpp_client)
log.msg("XMPPTransport %s started." % self.transport_name)
def announce_presence(self):
if not self.presence_call.running:
self.presence_call.start(self.presence_interval)
@inlineCallbacks
def send_ping(self):
if self.xmpp_client.xmlstream:
yield self.pinger.ping(self.jid)
def send_presence(self):
if self.xmpp_client.xmlstream:
self.presence.available(statuses={
None: self.status})
def teardown_transport(self):
log.msg("XMPPTransport %s stopped." % self.transport_name)
ping_call = getattr(self, 'ping_call', None)
if ping_call and ping_call.running:
ping_call.stop()
presence_call = getattr(self, 'presence_call', None)
if presence_call and presence_call.running:
presence_call.stop()
def handle_outbound_message(self, message):
recipient = message['to_addr']
text = message['content']
jid = JID(recipient).userhost()
if not self.xmpp_protocol.xmlstream:
log.err("Outbound undeliverable, XMPP not initialized yet.")
return False
else:
self.xmpp_protocol.reply(jid, text)
| true | true |
7900639b26b8274a09dc475e60532db57a8ca3de | 1,072 | py | Python | model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 2 | 2021-04-19T06:08:35.000Z | 2021-08-25T02:43:43.000Z | model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 6 | 2022-01-11T18:56:22.000Z | 2022-02-21T13:20:20.000Z | model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 3 | 2021-02-05T17:11:17.000Z | 2021-04-19T08:33:31.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
from mo.front.kaldi.utils import read_learning_info
from mo.graph.graph import Node
class AffineComponentFrontExtractor(FrontExtractorOp):
op = 'affinecomponent'
enabled = True
@staticmethod
def extract(node: Node):
read_learning_info(node.parameters)
return FixedAffineComponentFrontExtractor.extract(node)
| 35.733333 | 99 | 0.785448 | from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
from mo.front.kaldi.utils import read_learning_info
from mo.graph.graph import Node
class AffineComponentFrontExtractor(FrontExtractorOp):
op = 'affinecomponent'
enabled = True
@staticmethod
def extract(node: Node):
read_learning_info(node.parameters)
return FixedAffineComponentFrontExtractor.extract(node)
| true | true |
7900641bd721f65da6126568388a4cf1c97d0c2e | 7,465 | py | Python | tests/test_load.py | tiosgz/herbstluftwm | 2f337ab3d73431b02f31e7d3cfee3a60fe77cdb9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_load.py | tiosgz/herbstluftwm | 2f337ab3d73431b02f31e7d3cfee3a60fe77cdb9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/test_load.py | tiosgz/herbstluftwm | 2f337ab3d73431b02f31e7d3cfee3a60fe77cdb9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import pytest
from test_layout import verify_frame_objects_via_dump
@pytest.mark.parametrize("invalid_layout,error_pos", [
('(', 1),
('()', 1),
('foo baar', 0),
('(foo baar', 1),
('((clients max:0 ))', 1),
('(clients)', 8),
('(clients )', 9),
('(split max:0.5:1)', 7),
('(split horizontal:0.05:1)', 7),
('(split horizontal:0.95:1)', 7),
('(split horizontal:x:1)', 7),
('(split horizontal:0.5:x)', 7),
('(split horizontal:0.5:-1)', 7),
('(split horizontal:0.5:2)', 7),
('(split horizontal:0.3)', 7),
('(split horizontal:0.3:0:0)', 7),
('(split horizonta:0.5:0 )', 8),
('(clients max )', 9),
('(clients max:0:0 )', 9),
('(clients ma:0 )', 9),
('(clients max:-1 )', 9),
('(clients grid:0 asdf )', 16),
('(clients grid:0 0xx0)', 16),
('(clients grid:0 09)', 16),
('(clients grid:0 0x)', 16),
('(clients grid:0 x)', 16),
('(split horizontal:0.5:0 x)', 24),
('(split horizontal:0.5:0 (split horizontal:0.5:1', 47),
('(split horizontal:0.5:0 (split horizontal:0.5:1 ', 48),
('(split horizontal:0.5:0 (split horizontal:0.5:1 )', 49),
('(split horizontal:0.5:0 (split horizontal:0.5:1 )))', 50),
('(split horizontal:0.5:0 (clients max:1', 38),
])
def test_syntax_errors_position(hlwm, invalid_layout, error_pos):
c = hlwm.call_xfail(['load', invalid_layout])
c.expect_stderr(r'^load: Syntax error at {}: '.format(error_pos))
def is_subseq(x, y):
"""Checks if x is a subsequence (not substring) of y."""
# from https://stackoverflow.com/a/24017747/4400896
it = iter(y)
return all(c in it for c in x)
@pytest.mark.parametrize("layout", [
"(clients max:0)",
"(clients grid:0)",
" ( clients vertical:0 )",
"(split horizontal:0.3:0)",
"(split vertical:0.3:0 (clients horizontal:0))",
"(split vertical:0.3:0 (split vertical:0.4:1))",
])
@pytest.mark.parametrize('num_splits_before', [0, 1, 2])
def test_valid_layout_syntax_partial_layouts(hlwm, layout, num_splits_before):
for i in range(0, num_splits_before):
hlwm.call('split explode')
# load the layout that defines the layout tree only partially
hlwm.call(['load', layout])
# The new layout is the old layout with some '(clients …)' (and theoretically
# even '(split…)') subtrees inserted.
assert is_subseq(layout.replace(' ', ''), hlwm.call('dump').stdout)
@pytest.mark.parametrize(
"layout", [
# with window ID placeholders 'W'
"(clients max:0 W)",
"(clients max:1 W W)",
"(split horizontal:0.9:0 (split vertical:0.5:1 (clients max:0) (clients grid:0)) (clients horizontal:0))",
"(split vertical:0.4:1 (clients max:2 W W W) (clients grid:0 W))",
])
def test_full_layouts(hlwm, layout):
clients = [hlwm.create_client() for k in range(0, layout.count('W'))]
for winid, _ in clients:
# replace the next W by the window ID
layout = layout.replace('W', winid, 1)
p = hlwm.call(['load', layout])
assert p.stdout == ''
assert layout == hlwm.call('dump').stdout
verify_frame_objects_via_dump(hlwm)
@pytest.mark.parametrize("layout", [
"(clients horizontal:0 0234)",
"(clients vertical:0 0x2343)",
"(clients vertical:0 1713)",
])
def test_load_invalid_winids(hlwm, layout):
p = hlwm.call(['load', layout])
assert p.stdout.startswith("Warning: Unknown window IDs")
@pytest.mark.parametrize(
"running_clients_num,focus",
[(n, f) for n in [1, 3] for f in range(0, n)])
def test_focus_client_via_load(hlwm, running_clients, running_clients_num, focus):
layout = '(clients horizontal:{} {})'.format(
focus, ' '.join(running_clients))
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout == layout
assert hlwm.get_attr('clients.focus.winid') == running_clients[focus]
@pytest.mark.parametrize(
"running_clients_num,num_bring",
[(n, f) for n in [1, 3] for f in range(0, n + 1)])
def test_load_brings_windows(hlwm, running_clients, running_clients_num, num_bring):
hlwm.call('add other')
layout = '(clients horizontal:0{}{})'.format(
(' ' if num_bring > 0 else ''),
' '.join(running_clients[0:num_bring]))
assert int(hlwm.get_attr('tags.0.client_count')) \
== len(running_clients)
assert int(hlwm.get_attr('tags.1.client_count')) == 0
hlwm.call(['load', 'other', layout])
assert int(hlwm.get_attr('tags.0.client_count')) == \
len(running_clients) - num_bring
assert int(hlwm.get_attr('tags.1.client_count')) == num_bring
assert hlwm.call('dump other').stdout == layout
def test_load_invalid_tag(hlwm):
hlwm.call_xfail(['load', 'invalidtagname', '(clients vertical:0)']) \
.expect_stderr(r'Tag.*not found')
def test_fraction_precision(hlwm):
values = [
'0.4', '0.305', '0.8987',
'0.5', '0.4001'
]
layout_format = '(split horizontal:{}:0 (clients max:0) (clients max:0))'
for v in values:
layout = layout_format.format(v)
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout == layout
def test_fraction_precision_outside_range(hlwm):
# here, we test the decimal i/o for values that are outside
# of the allowed frame-split-ratio. This test only makes sense
# because we know that in FrameParser::buildTree(), the already
# parsed decimal is used for the error message
values = [
'0.098',
'-0.098',
'-0.5',
'12.43',
'-110.01',
]
layout_format = '(split horizontal:{}:0 (clients max:0) (clients max:0))'
for v in values:
layout = layout_format.format(v)
hlwm.call_xfail(['load', layout]) \
.expect_stderr('but actually is ' + v)
def test_load_floating_client(hlwm):
winid, _ = hlwm.create_client()
hlwm.call(f'set_attr clients.{winid}.floating true')
hlwm.call('set_layout max')
assert hlwm.call('dump').stdout.rstrip() == '(clients max:0)'
# suck the client into the frame tree
layout = f'(clients max:0 {winid})'
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout.rstrip() == layout
assert hlwm.get_attr(f'clients.{winid}.floating') == 'false'
@pytest.mark.parametrize("othertag,minimized", [
# all combinations where at least one of the flags is True
# such that it is not in the tiling layer of the first tag yet
# and such that it is invisible initially
(True, True), (True, False), (False, True)
])
@pytest.mark.parametrize("floating", [True, False])
def test_load_minimized_client(hlwm, othertag, minimized, floating):
if othertag:
hlwm.call('add othertag')
hlwm.call('rule tag=othertag')
winid, _ = hlwm.create_client()
hlwm.call(f'set_attr clients.{winid}.minimized {hlwm.bool(minimized)}')
hlwm.call(f'set_attr clients.{winid}.floating {hlwm.bool(floating)}')
assert hlwm.get_attr(f'clients.{winid}.visible') == 'false'
# ensure the client is not yet in the tiling layer
hlwm.call('set_layout max')
assert hlwm.call('dump').stdout.rstrip() == '(clients max:0)'
layout = f'(clients max:0 {winid})'
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout.rstrip() == layout
assert hlwm.get_attr(f'clients.{winid}.visible') == 'true'
assert hlwm.get_attr(f'clients.{winid}.minimized') == 'false'
assert hlwm.get_attr(f'clients.{winid}.floating') == 'false'
| 35.046948 | 114 | 0.626256 | import pytest
from test_layout import verify_frame_objects_via_dump
@pytest.mark.parametrize("invalid_layout,error_pos", [
('(', 1),
('()', 1),
('foo baar', 0),
('(foo baar', 1),
('((clients max:0 ))', 1),
('(clients)', 8),
('(clients )', 9),
('(split max:0.5:1)', 7),
('(split horizontal:0.05:1)', 7),
('(split horizontal:0.95:1)', 7),
('(split horizontal:x:1)', 7),
('(split horizontal:0.5:x)', 7),
('(split horizontal:0.5:-1)', 7),
('(split horizontal:0.5:2)', 7),
('(split horizontal:0.3)', 7),
('(split horizontal:0.3:0:0)', 7),
('(split horizonta:0.5:0 )', 8),
('(clients max )', 9),
('(clients max:0:0 )', 9),
('(clients ma:0 )', 9),
('(clients max:-1 )', 9),
('(clients grid:0 asdf )', 16),
('(clients grid:0 0xx0)', 16),
('(clients grid:0 09)', 16),
('(clients grid:0 0x)', 16),
('(clients grid:0 x)', 16),
('(split horizontal:0.5:0 x)', 24),
('(split horizontal:0.5:0 (split horizontal:0.5:1', 47),
('(split horizontal:0.5:0 (split horizontal:0.5:1 ', 48),
('(split horizontal:0.5:0 (split horizontal:0.5:1 )', 49),
('(split horizontal:0.5:0 (split horizontal:0.5:1 )))', 50),
('(split horizontal:0.5:0 (clients max:1', 38),
])
def test_syntax_errors_position(hlwm, invalid_layout, error_pos):
c = hlwm.call_xfail(['load', invalid_layout])
c.expect_stderr(r'^load: Syntax error at {}: '.format(error_pos))
def is_subseq(x, y):
it = iter(y)
return all(c in it for c in x)
@pytest.mark.parametrize("layout", [
"(clients max:0)",
"(clients grid:0)",
" ( clients vertical:0 )",
"(split horizontal:0.3:0)",
"(split vertical:0.3:0 (clients horizontal:0))",
"(split vertical:0.3:0 (split vertical:0.4:1))",
])
@pytest.mark.parametrize('num_splits_before', [0, 1, 2])
def test_valid_layout_syntax_partial_layouts(hlwm, layout, num_splits_before):
for i in range(0, num_splits_before):
hlwm.call('split explode')
hlwm.call(['load', layout])
assert is_subseq(layout.replace(' ', ''), hlwm.call('dump').stdout)
@pytest.mark.parametrize(
"layout", [
"(clients max:0 W)",
"(clients max:1 W W)",
"(split horizontal:0.9:0 (split vertical:0.5:1 (clients max:0) (clients grid:0)) (clients horizontal:0))",
"(split vertical:0.4:1 (clients max:2 W W W) (clients grid:0 W))",
])
def test_full_layouts(hlwm, layout):
clients = [hlwm.create_client() for k in range(0, layout.count('W'))]
for winid, _ in clients:
layout = layout.replace('W', winid, 1)
p = hlwm.call(['load', layout])
assert p.stdout == ''
assert layout == hlwm.call('dump').stdout
verify_frame_objects_via_dump(hlwm)
@pytest.mark.parametrize("layout", [
"(clients horizontal:0 0234)",
"(clients vertical:0 0x2343)",
"(clients vertical:0 1713)",
])
def test_load_invalid_winids(hlwm, layout):
p = hlwm.call(['load', layout])
assert p.stdout.startswith("Warning: Unknown window IDs")
@pytest.mark.parametrize(
"running_clients_num,focus",
[(n, f) for n in [1, 3] for f in range(0, n)])
def test_focus_client_via_load(hlwm, running_clients, running_clients_num, focus):
layout = '(clients horizontal:{} {})'.format(
focus, ' '.join(running_clients))
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout == layout
assert hlwm.get_attr('clients.focus.winid') == running_clients[focus]
@pytest.mark.parametrize(
"running_clients_num,num_bring",
[(n, f) for n in [1, 3] for f in range(0, n + 1)])
def test_load_brings_windows(hlwm, running_clients, running_clients_num, num_bring):
hlwm.call('add other')
layout = '(clients horizontal:0{}{})'.format(
(' ' if num_bring > 0 else ''),
' '.join(running_clients[0:num_bring]))
assert int(hlwm.get_attr('tags.0.client_count')) \
== len(running_clients)
assert int(hlwm.get_attr('tags.1.client_count')) == 0
hlwm.call(['load', 'other', layout])
assert int(hlwm.get_attr('tags.0.client_count')) == \
len(running_clients) - num_bring
assert int(hlwm.get_attr('tags.1.client_count')) == num_bring
assert hlwm.call('dump other').stdout == layout
def test_load_invalid_tag(hlwm):
hlwm.call_xfail(['load', 'invalidtagname', '(clients vertical:0)']) \
.expect_stderr(r'Tag.*not found')
def test_fraction_precision(hlwm):
values = [
'0.4', '0.305', '0.8987',
'0.5', '0.4001'
]
layout_format = '(split horizontal:{}:0 (clients max:0) (clients max:0))'
for v in values:
layout = layout_format.format(v)
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout == layout
def test_fraction_precision_outside_range(hlwm):
values = [
'0.098',
'-0.098',
'-0.5',
'12.43',
'-110.01',
]
layout_format = '(split horizontal:{}:0 (clients max:0) (clients max:0))'
for v in values:
layout = layout_format.format(v)
hlwm.call_xfail(['load', layout]) \
.expect_stderr('but actually is ' + v)
def test_load_floating_client(hlwm):
winid, _ = hlwm.create_client()
hlwm.call(f'set_attr clients.{winid}.floating true')
hlwm.call('set_layout max')
assert hlwm.call('dump').stdout.rstrip() == '(clients max:0)'
layout = f'(clients max:0 {winid})'
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout.rstrip() == layout
assert hlwm.get_attr(f'clients.{winid}.floating') == 'false'
@pytest.mark.parametrize("othertag,minimized", [
(True, True), (True, False), (False, True)
])
@pytest.mark.parametrize("floating", [True, False])
def test_load_minimized_client(hlwm, othertag, minimized, floating):
if othertag:
hlwm.call('add othertag')
hlwm.call('rule tag=othertag')
winid, _ = hlwm.create_client()
hlwm.call(f'set_attr clients.{winid}.minimized {hlwm.bool(minimized)}')
hlwm.call(f'set_attr clients.{winid}.floating {hlwm.bool(floating)}')
assert hlwm.get_attr(f'clients.{winid}.visible') == 'false'
hlwm.call('set_layout max')
assert hlwm.call('dump').stdout.rstrip() == '(clients max:0)'
layout = f'(clients max:0 {winid})'
hlwm.call(['load', layout])
assert hlwm.call('dump').stdout.rstrip() == layout
assert hlwm.get_attr(f'clients.{winid}.visible') == 'true'
assert hlwm.get_attr(f'clients.{winid}.minimized') == 'false'
assert hlwm.get_attr(f'clients.{winid}.floating') == 'false'
| true | true |
7900645ad73aaae7ac7a8634dff8e4f26be3750a | 1,432 | py | Python | Question_11_20/answers/answer_19.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | Question_11_20/answers/answer_19.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | Question_11_20/answers/answer_19.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | import cv2
import numpy as np
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# LoG filter
def LoG_filter(img, K_size=5, sigma=3):
H, W, C = img.shape
# zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
# LoG Kernel
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y + pad, x + pad] = (x ** 2 + y ** 2 - sigma ** 2) * np.exp( -(x ** 2 + y ** 2) / (2 * (sigma ** 2)))
K /= (2 * np.pi * (sigma ** 6))
K /= K.sum()
print(K)
# filtering
for y in range(H):
for x in range(W):
out[pad + y, pad + x] = np.sum(K * tmp[y: y + K_size, x: x + K_size])
out = np.clip(out, 0, 255)
out = out[pad: pad + H, pad: pad + W].astype(np.uint8)
return out
# Read image
img = cv2.imread("imori_noise.jpg")
# grayscale
gray = BGR2GRAY(img)
# LoG filtering
out = LoG_filter(gray, K_size=5, sigma=3)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 23.096774 | 119 | 0.511872 | import cv2
import numpy as np
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
def LoG_filter(img, K_size=5, sigma=3):
H, W, C = img.shape
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y + pad, x + pad] = (x ** 2 + y ** 2 - sigma ** 2) * np.exp( -(x ** 2 + y ** 2) / (2 * (sigma ** 2)))
K /= (2 * np.pi * (sigma ** 6))
K /= K.sum()
print(K)
for y in range(H):
for x in range(W):
out[pad + y, pad + x] = np.sum(K * tmp[y: y + K_size, x: x + K_size])
out = np.clip(out, 0, 255)
out = out[pad: pad + H, pad: pad + W].astype(np.uint8)
return out
img = cv2.imread("imori_noise.jpg")
gray = BGR2GRAY(img)
out = LoG_filter(gray, K_size=5, sigma=3)
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| false | true |
790065560969580d0130bee7b7abb58c557f2eb3 | 27,857 | py | Python | src/dirbs/utils.py | nealmadhu/DIRBS-Core | 5afac86233c56d28e1c76d1291c2a1fec302be6f | [
"BSD-3-Clause-Clear"
] | null | null | null | src/dirbs/utils.py | nealmadhu/DIRBS-Core | 5afac86233c56d28e1c76d1291c2a1fec302be6f | [
"BSD-3-Clause-Clear"
] | null | null | null | src/dirbs/utils.py | nealmadhu/DIRBS-Core | 5afac86233c56d28e1c76d1291c2a1fec302be6f | [
"BSD-3-Clause-Clear"
] | 1 | 2022-02-09T10:55:13.000Z | 2022-02-09T10:55:13.000Z | """
DIRBS module for utility classes and functions.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import datetime
import logging
import hashlib
import json
import time
import copy
import io
import contextlib
import psycopg2
from psycopg2 import sql
from psycopg2.extras import NamedTupleCursor
from dirbs import db_schema_version as code_db_schema_version
import dirbs.metadata as metadata
from dirbs.config import ConfigParseException
class DatabaseSchemaException(Exception):
"""Custom exception class to indicate there was a problem validating the schema."""
def __init__(self, msg):
"""Constructor."""
super().__init__('DB schema check failure: {0}'.format(msg))
class DatabaseRoleCheckException(Exception):
"""Custom exception class to indicate the user does not have the correct roles for this job."""
def __init__(self, msg):
"""Constructor."""
super().__init__('DB role check failure: {0}'.format(msg))
class JSONEncoder(json.JSONEncoder):
"""Custom JSONEncoder class which serializes dates in ISO format."""
def default(self, obj):
"""Overrides JSONEncoder.default."""
if isinstance(obj, datetime.date):
return obj.isoformat()
return JSONEncoder.default(self, obj)
class LoggingNamedTupleCursor(NamedTupleCursor):
"""Named tuple cursor that logs to DIRBS."""
def __init__(self, *args, **kwargs):
"""Constructor."""
super().__init__(*args, **kwargs)
if self.name is not None:
# Default itersize to 100000 for named cursors
self.itersize = 100000
def execute(self, query, params=None):
"""Overrides NamedTupleCursor.execute."""
try:
return super(LoggingNamedTupleCursor, self).execute(query, params)
finally:
if self.query is not None:
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
def callproc(self, procname, params=None):
"""Overrides NamedTupleCursor.callproc."""
try:
return super(LoggingNamedTupleCursor, self).callproc(procname, params)
finally:
if self.query is not None:
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
@contextlib.contextmanager
def db_role_setter(conn, *, role_name):
"""Since we catch exceptions here and log, temporarily install a customised hook."""
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
yield role_name
cursor.execute('SET ROLE %s', [old_role])
class CodeProfiler(object):
"""Profile a block of code and store duration."""
def __enter__(self):
"""Python context manager support for use in with statement (on enter)."""
self.start = time.time()
return self
def __exit__(self, *args):
"""Python context manager support for use in with statement (on exit)."""
self.duration = int((time.time() - self.start) * 1000)
def compute_md5_hash(file, buf_size=65536):
"""Utility method to generate a md5 hash of file."""
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if not data:
break
md5_hash.update(data)
return md5_hash.hexdigest()
def cachebusted_filename_from_contents(byte_array):
"""Utility method to generate a unique filename based on the hash of a given content array (of bytes)."""
return compute_md5_hash(io.BytesIO(byte_array))[:8]
def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
"""Convert DB-related command-line arguments from a DSN into a format appropriate for DIRBS CLI commands."""
db_args = []
db_args.append('--db-user={0}'.format(user if user is not None else dsn.get('user')))
db_args.append('--db-name={0}'.format(database if database is not None else dsn.get('database')))
db_args.append('--db-port={0}'.format(port if port is not None else dsn.get('port')))
db_args.append('--db-host={0}'.format(host if host is not None else dsn.get('host')))
return db_args
def create_db_connection(db_config, readonly=False, autocommit=False):
"""Creates a DB connection to the database.
Imports the config module, which results in the config being read from disk.
Changes to the config file made after this method has been called will not be read.
Calling entity should handle connection errors as appropriate.
"""
logger = logging.getLogger('dirbs.sql')
logger.debug('Attempting to connect to the database {0} on host {1}'.format(db_config.database, db_config.host))
# We hard-code 4 minutes idle keepalives, which is fairly aggressive, to avoid disconnections on VPNs, etc.
conn = psycopg2.connect('{0} keepalives=1 keepalives_idle=240'.format(db_config.connection_string),
cursor_factory=LoggingNamedTupleCursor)
conn.set_session(readonly=readonly, autocommit=autocommit)
logger.debug('Connection to database successful.')
return conn
def verify_db_schema(conn, required_role):
"""Function that runs all DB verification checks."""
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_schema(conn)
verify_db_search_path(conn)
def warn_if_db_superuser(conn):
"""Warn if the current DB user is a PostgreSQL superuser."""
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all '
'DIRBS tasks as a normal user')
def verify_db_roles_installed(conn):
"""Function used to verify whether roles have been installed in the DB."""
# The below is not a guaranteed check, but a heuristic
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute('SELECT 1 AS res FROM pg_roles WHERE rolname = \'dirbs_core_power_user\'')
if cursor.fetchone() is None:
logger.error('DIRBS Core roles have not been installed - run \'dirbs-db install_roles\' before '
'running \'dirbs-db install\'')
raise DatabaseSchemaException('DIRBS Core database roles have not been installed')
def verify_db_role_for_job(conn, expected_role):
"""Function used to verify that the current DB user is in the role expected for this job."""
if not is_db_user_dirbs_role(conn, expected_role):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does not have required role: {1}. To fix this:'
'\n\t1. GRANT {1} TO {0};'.format(role, expected_role))
def verify_db_schema_version(conn):
"""Function used to check whether the DB schema version matches the code schema version."""
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if version != code_db_schema_version:
if version is None:
logger.error('DB schema has not been installed via dirbs-db install!')
raise DatabaseSchemaException('No DB schema installed - perform a dirbs-db install first!')
else:
logger.error('DB schema version does not match code!')
logger.error('Code schema version: %d', code_db_schema_version)
logger.error('DB schema version: %d', version)
raise DatabaseSchemaException('Mismatch between code and DB schema versions - perform a dirbs-db upgrade!')
def verify_db_ownership(conn):
"""Function used to check whether DB ownership matches what we expect."""
logger = logging.getLogger('dirbs.db')
if query_db_ownership(conn) != 'dirbs_core_power_user':
logger.error('Database is not owned by the dirbs_core_power_user group! Please the '
'following as the current DB owner (whilst logged into the database):'
'\n\tALTER DATABASE <database> OWNER TO dirbs_core_power_user;')
raise DatabaseSchemaException('Incorrect database ownership!')
def verify_core_schema(conn):
"""Function used to check whether Core schema exists and has correct ownership."""
if not query_schema_existence(conn, 'core'):
raise DatabaseSchemaException('Missing schema \'core\' in DB. Was dirbs-db install run successfully?')
if query_schema_ownership(conn, 'core') != 'dirbs_core_power_user':
raise DatabaseSchemaException('Schema \'core\' is not owned by dirbs_core_power_user!')
def verify_hll_schema(conn):
"""Function used to check whether HLL schema exists and that extension is installed correctly."""
logger = logging.getLogger('dirbs.db')
if not query_schema_existence(conn, 'hll'):
logger.error('Schema \'hll\' does not exist. Please ensure the hll extension is installed and run the '
'following as a superuser whilst connected to this DB: '
'\n\t1. CREATE SCHEMA hll;'
'\n\t2. GRANT USAGE ON SCHEMA hll TO dirbs_core_base;'
'\n\t3. CREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('HLL schema not created!')
# Check if extension installed correctly by looking for hll.hll_print
with conn.cursor() as cursor:
try:
cursor.execute('SELECT pg_get_functiondef(\'hll.hll_print(hll.hll)\'::regprocedure)')
except psycopg2.ProgrammingError:
logger.error('The HLL extension is not installed correctly. Please issue the following as a superuser '
'whilst connected to this DB: '
'\n\tCREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('DB search_path does not include hll or extension not installed!')
def verify_db_search_path(conn):
"""Function used to check whether db_search_path is correct by looking for objects."""
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'schema_version\')')
res = cursor.fetchone()[0]
if res is None:
is_search_path_valid = False
try:
cursor.execute('SELECT pg_get_functiondef(\'hll_print(hll)\'::regprocedure)')
except psycopg2.ProgrammingError:
is_search_path_valid = False
if not is_search_path_valid:
logger.error('The search_path for the database is not set correctly. Please issue the following '
'whilst connected to this DB: '
'\n\tALTER DATABASE <database> SET search_path TO core, hll;')
raise DatabaseSchemaException('DB search_path not set correctly!')
def query_db_schema_version(conn):
"""Function to fetch the DB version number from the database."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version') # noqa: Q440
return cur.fetchone()[0]
except psycopg2.ProgrammingError as ex:
logger.error(str(ex).strip())
return None
def set_db_schema_version(conn, new_version):
"""Function to set the DB version number in the database."""
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert num_rows <= 1
if num_rows > 0:
cur.execute('UPDATE schema_version SET version = %s', [new_version]) # noqa: Q440
else:
cur.execute('INSERT INTO schema_version(version) VALUES(%s)', [new_version])
def is_db_user_superuser(conn):
"""Function to test whether the current DB user is a PostgreSQL superuser."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolsuper
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0]
def is_db_user_dirbs_role(conn, role_name):
"""Function to test whether the current DB user is in a DIRBS role."""
with conn.cursor() as cur:
cur.execute("""SELECT pg_has_role(%s, 'MEMBER')""", [role_name])
return cur.fetchone()[0]
def is_db_user_dirbs_poweruser(conn):
"""Function to test whether the current DB user is a DIRBS power user."""
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user')
def can_db_user_create_roles(conn):
"""Function to test whether the current DB user has the CREATEROLE privilege."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolcreaterole
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0]
def query_db_ownership(conn):
"""Function to verify whether the current database ownership is correct."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolname
FROM pg_roles
JOIN pg_database
ON (pg_database.datdba = pg_roles.oid)
WHERE datname = current_database()""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to determing DB owner for current_database')
return None
return res[0]
def query_schema_existence(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)',
[schema_name])
return cur.fetchone().exists
def query_schema_ownership(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolname
FROM pg_roles
JOIN pg_namespace
ON (pg_namespace.nspowner = pg_roles.oid)
WHERE nspname = %s""", [schema_name])
res = cur.fetchone()
if res is None:
logger.warn('Failed to determing owner for current_schema')
return None
return res[0]
def compute_analysis_end_date(conn, curr_date):
"""Function to get the end of the analysis window based on current operator data."""
end_date = curr_date
if end_date is None:
# If current date is None, set analysis end date as the last day for which operator data exists."""
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_country')
year_month_list_in_child_tbls_records = table_invariants_list(conn, monthly_country_child_tbl_list,
['triplet_year', 'triplet_month'])
year_month_tuple_list = [(x.triplet_year, x.triplet_month) for x in year_month_list_in_child_tbls_records]
if len(year_month_tuple_list) > 0:
year_month_tuple_list.sort(key=lambda x: (x[0], x[1]), reverse=True)
latest_year, latest_month = year_month_tuple_list[0]
cursor.execute(sql.SQL("""SELECT MAX(last_seen)
FROM monthly_network_triplets_country
WHERE triplet_year = %s
AND triplet_month = %s"""), [latest_year, latest_month])
end_date = cursor.fetchone()[0]
# If there was no operator data imported, this can be None
if end_date is None:
end_date = datetime.date.today()
return end_date + datetime.timedelta(days=1)
def hash_string_64bit(s):
"""Basic string hash based on taking an initial prime number and multiplying it by another prime numnber."""
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = string_hash * 31 + b
return string_hash % (pow(2, 63) - 1) # noqa: S001 Make sure it fits into a 64-bit bigint
def child_table_names(conn, parent_name):
"""Return a list of table names for a parent table name."""
with conn.cursor() as cursor:
cursor.execute("""SELECT c.relname AS child_tblname
FROM pg_inherits
JOIN pg_class AS c
ON (c.oid = inhrelid)
JOIN pg_class AS p
ON (p.oid = inhparent)
JOIN pg_catalog.pg_namespace nc
ON nc.oid = c.relnamespace
JOIN pg_catalog.pg_namespace np
ON np.oid = p.relnamespace
WHERE p.relname = %s
AND np.nspname = current_schema()
AND nc.nspname = current_schema()""",
[parent_name])
return [res.child_tblname for res in cursor]
def table_invariants_list(conn, table_names, invariant_col_names):
"""Gets a list of tuples containing the values for common table invariant columns across a list table names."""
if len(table_names) == 0:
# Need to return an empty list to avoid doing an empty query and generating an error
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_names:
table_queries.append(sql.SQL("""SELECT * FROM (SELECT {0} FROM {1} LIMIT 1) {2}""")
.format(sql.SQL(', ').join(map(sql.Identifier, invariant_col_names)),
sql.Identifier(tblname),
sql.Identifier('tmp_{0}'.format(tblname))))
cursor.execute(sql.SQL(' UNION ALL ').join(table_queries))
return cursor.fetchall()
def most_recently_run_condition_info(conn, cond_names, successful_only=False):
"""For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.
If a particular condition has never completed successfully, the value of the dict will be None, unless the
successful_only parameter is set to True, in which case the key will not exist in the returned dict.
"""
conditions_to_find = copy.copy(cond_names)
rv = {}
# Get list of metadata for dirbs-classify, sorted in reverse order
job_metadata_list = metadata.query_for_command_runs(conn, 'dirbs-classify')
for job_metadata in job_metadata_list:
# Loop back through recent dirbs-classify runs looking for the last time a classification
# ran successfully. This is indicates in the metadata by the presence of an entry in the matched_imei_counts.
# This can happen even though the overall dirbs-classify job failed
extra_metadata = job_metadata.extra_metadata
metadata_conditions = extra_metadata.get('conditions', {})
matched_imei_counts = extra_metadata.get('matched_imei_counts', {})
conditions_lookup = {c['label']: c for c in metadata_conditions}
for req_cond_name in copy.copy(conditions_to_find): # We modify the list in the loop, so take a copy
if req_cond_name in matched_imei_counts:
# If the name was in matched_imei_counts, it should always be in conditions as well
rv[req_cond_name] = {
'run_id': job_metadata.run_id,
'config': conditions_lookup[req_cond_name],
'last_successful_run': job_metadata.start_time
}
# Remove this req_cond_name from conditions_to_find since we already found latest metadata
conditions_to_find.remove(req_cond_name)
# Any items in conditions_to_find at this point are conditions for which we never ran a successful condition
# run
if not successful_only:
for missing_cond_name in conditions_to_find:
rv[missing_cond_name] = None
return rv
def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
"""Function to return SQL filtering out exempted device types."""
# If certain device types are exempted, first select the IMEIs passed in imei_list_sql query.
# These IMEIs are then joined against GSMA TAC db to get their device type.
# Finally, any IMEIs that belong to exempted device types are excluded.
return sql.SQL("""SELECT imei_norm
FROM (SELECT imei_norm,
SUBSTRING(imei_norm, 1, 8) AS tac
FROM ({0}) imeis) imeis_with_tac
JOIN gsma_data
USING (tac)
WHERE device_type NOT IN {1}
""").format(sql.SQL(imei_list_sql),
sql.Literal(tuple(exempted_device_types))).as_string(conn)
def format_datetime_for_report(timestamp_with_tz):
"""Format the datetime into a string for reporting.
Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6
"""
if timestamp_with_tz is not None:
return timestamp_with_tz.strftime('%Y-%m-%d %X')
else:
return None
def validate_exempted_device_types(conn, config):
"""Method to validate exempted device types specified in config."""
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if len(exempted_device_types) > 0:
cursor.execute('SELECT DISTINCT device_type FROM gsma_data')
all_device_types = [x.device_type for x in cursor]
if len(all_device_types) == 0:
logger.warning('RegionConfig: Ignoring setting exempted_device_types={0} as GSMA TAC database '
'not imported or no device types found.'.format(exempted_device_types))
else:
invalid_device_types = set(exempted_device_types) - set(all_device_types)
if len(invalid_device_types) > 0:
msg = 'RegionConfig: exempted_device_types \'{0}\' is/are not valid device type(s). ' \
'The valid GSMA device types are: \'{1}\''.format(invalid_device_types, all_device_types)
logger.error(msg)
raise ConfigParseException(msg)
def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message='',
start_date_inclusive=True, end_date_inclusive=False):
"""Helper function to print out window on used for analysis and list generation using interval notation."""
start_date_interval_notation = '[' if start_date_inclusive else '('
end_date_interval_notation = ']' if end_date_inclusive else ')'
logger.debug('{0} {sd_interval_notation}{start_date}, '
'{end_date}{ed_interval_notation}'.format(start_message,
sd_interval_notation=start_date_interval_notation,
start_date=analysis_start_date,
end_date=analysis_end_date,
ed_interval_notation=end_date_interval_notation))
def registration_list_status_filter_sql():
"""SQL to filter for whitelisted or null registration_list statuses."""
return sql.SQL('(status IS NULL OR status = \'whitelist\')')
def compute_amnesty_flags(app_config, curr_date):
"""Helper function to determine whether the date falls within amnesty eval or amnesty period."""
in_amnesty_eval_period = True if app_config.amnesty_config.amnesty_enabled and \
curr_date <= app_config.amnesty_config.evaluation_period_end_date else False
in_amnesty_period = True if app_config.amnesty_config.amnesty_enabled and \
curr_date > app_config.amnesty_config.evaluation_period_end_date and \
curr_date <= app_config.amnesty_config.amnesty_period_end_date else False
return in_amnesty_eval_period, in_amnesty_period
def table_exists_sql(any_schema=False):
"""SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True."""
if not any_schema:
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL('')
return sql.SQL("""SELECT EXISTS (SELECT 1
FROM pg_tables
WHERE tablename = %s
{schema_filter_sql})""").format(schema_filter_sql=schema_filter_sql)
def is_table_partitioned(conn, tbl_name):
"""Function to determine whether a table is partitioned."""
with conn.cursor() as cursor:
cursor.execute("""SELECT EXISTS (SELECT 1
FROM pg_class
JOIN pg_partitioned_table
ON pg_partitioned_table.partrelid = pg_class.oid
WHERE pg_class.relname = %s)""", [tbl_name])
return cursor.fetchone().exists
| 46.044628 | 119 | 0.651219 |
import datetime
import logging
import hashlib
import json
import time
import copy
import io
import contextlib
import psycopg2
from psycopg2 import sql
from psycopg2.extras import NamedTupleCursor
from dirbs import db_schema_version as code_db_schema_version
import dirbs.metadata as metadata
from dirbs.config import ConfigParseException
class DatabaseSchemaException(Exception):
def __init__(self, msg):
super().__init__('DB schema check failure: {0}'.format(msg))
class DatabaseRoleCheckException(Exception):
def __init__(self, msg):
super().__init__('DB role check failure: {0}'.format(msg))
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
return JSONEncoder.default(self, obj)
class LoggingNamedTupleCursor(NamedTupleCursor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.name is not None:
self.itersize = 100000
def execute(self, query, params=None):
try:
return super(LoggingNamedTupleCursor, self).execute(query, params)
finally:
if self.query is not None:
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
def callproc(self, procname, params=None):
try:
return super(LoggingNamedTupleCursor, self).callproc(procname, params)
finally:
if self.query is not None:
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
@contextlib.contextmanager
def db_role_setter(conn, *, role_name):
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
yield role_name
cursor.execute('SET ROLE %s', [old_role])
class CodeProfiler(object):
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.duration = int((time.time() - self.start) * 1000)
def compute_md5_hash(file, buf_size=65536):
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if not data:
break
md5_hash.update(data)
return md5_hash.hexdigest()
def cachebusted_filename_from_contents(byte_array):
return compute_md5_hash(io.BytesIO(byte_array))[:8]
def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
db_args = []
db_args.append('--db-user={0}'.format(user if user is not None else dsn.get('user')))
db_args.append('--db-name={0}'.format(database if database is not None else dsn.get('database')))
db_args.append('--db-port={0}'.format(port if port is not None else dsn.get('port')))
db_args.append('--db-host={0}'.format(host if host is not None else dsn.get('host')))
return db_args
def create_db_connection(db_config, readonly=False, autocommit=False):
logger = logging.getLogger('dirbs.sql')
logger.debug('Attempting to connect to the database {0} on host {1}'.format(db_config.database, db_config.host))
conn = psycopg2.connect('{0} keepalives=1 keepalives_idle=240'.format(db_config.connection_string),
cursor_factory=LoggingNamedTupleCursor)
conn.set_session(readonly=readonly, autocommit=autocommit)
logger.debug('Connection to database successful.')
return conn
def verify_db_schema(conn, required_role):
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_schema(conn)
verify_db_search_path(conn)
def warn_if_db_superuser(conn):
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all '
'DIRBS tasks as a normal user')
def verify_db_roles_installed(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute('SELECT 1 AS res FROM pg_roles WHERE rolname = \'dirbs_core_power_user\'')
if cursor.fetchone() is None:
logger.error('DIRBS Core roles have not been installed - run \'dirbs-db install_roles\' before '
'running \'dirbs-db install\'')
raise DatabaseSchemaException('DIRBS Core database roles have not been installed')
def verify_db_role_for_job(conn, expected_role):
if not is_db_user_dirbs_role(conn, expected_role):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does not have required role: {1}. To fix this:'
'\n\t1. GRANT {1} TO {0};'.format(role, expected_role))
def verify_db_schema_version(conn):
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if version != code_db_schema_version:
if version is None:
logger.error('DB schema has not been installed via dirbs-db install!')
raise DatabaseSchemaException('No DB schema installed - perform a dirbs-db install first!')
else:
logger.error('DB schema version does not match code!')
logger.error('Code schema version: %d', code_db_schema_version)
logger.error('DB schema version: %d', version)
raise DatabaseSchemaException('Mismatch between code and DB schema versions - perform a dirbs-db upgrade!')
def verify_db_ownership(conn):
logger = logging.getLogger('dirbs.db')
if query_db_ownership(conn) != 'dirbs_core_power_user':
logger.error('Database is not owned by the dirbs_core_power_user group! Please the '
'following as the current DB owner (whilst logged into the database):'
'\n\tALTER DATABASE <database> OWNER TO dirbs_core_power_user;')
raise DatabaseSchemaException('Incorrect database ownership!')
def verify_core_schema(conn):
if not query_schema_existence(conn, 'core'):
raise DatabaseSchemaException('Missing schema \'core\' in DB. Was dirbs-db install run successfully?')
if query_schema_ownership(conn, 'core') != 'dirbs_core_power_user':
raise DatabaseSchemaException('Schema \'core\' is not owned by dirbs_core_power_user!')
def verify_hll_schema(conn):
logger = logging.getLogger('dirbs.db')
if not query_schema_existence(conn, 'hll'):
logger.error('Schema \'hll\' does not exist. Please ensure the hll extension is installed and run the '
'following as a superuser whilst connected to this DB: '
'\n\t1. CREATE SCHEMA hll;'
'\n\t2. GRANT USAGE ON SCHEMA hll TO dirbs_core_base;'
'\n\t3. CREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('HLL schema not created!')
with conn.cursor() as cursor:
try:
cursor.execute('SELECT pg_get_functiondef(\'hll.hll_print(hll.hll)\'::regprocedure)')
except psycopg2.ProgrammingError:
logger.error('The HLL extension is not installed correctly. Please issue the following as a superuser '
'whilst connected to this DB: '
'\n\tCREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('DB search_path does not include hll or extension not installed!')
def verify_db_search_path(conn):
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'schema_version\')')
res = cursor.fetchone()[0]
if res is None:
is_search_path_valid = False
try:
cursor.execute('SELECT pg_get_functiondef(\'hll_print(hll)\'::regprocedure)')
except psycopg2.ProgrammingError:
is_search_path_valid = False
if not is_search_path_valid:
logger.error('The search_path for the database is not set correctly. Please issue the following '
'whilst connected to this DB: '
'\n\tALTER DATABASE <database> SET search_path TO core, hll;')
raise DatabaseSchemaException('DB search_path not set correctly!')
def query_db_schema_version(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version')
return cur.fetchone()[0]
except psycopg2.ProgrammingError as ex:
logger.error(str(ex).strip())
return None
def set_db_schema_version(conn, new_version):
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert num_rows <= 1
if num_rows > 0:
cur.execute('UPDATE schema_version SET version = %s', [new_version])
else:
cur.execute('INSERT INTO schema_version(version) VALUES(%s)', [new_version])
def is_db_user_superuser(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolsuper
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0]
def is_db_user_dirbs_role(conn, role_name):
with conn.cursor() as cur:
cur.execute("""SELECT pg_has_role(%s, 'MEMBER')""", [role_name])
return cur.fetchone()[0]
def is_db_user_dirbs_poweruser(conn):
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user')
def can_db_user_create_roles(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolcreaterole
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0]
def query_db_ownership(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolname
FROM pg_roles
JOIN pg_database
ON (pg_database.datdba = pg_roles.oid)
WHERE datname = current_database()""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to determing DB owner for current_database')
return None
return res[0]
def query_schema_existence(conn, schema_name):
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)',
[schema_name])
return cur.fetchone().exists
def query_schema_ownership(conn, schema_name):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolname
FROM pg_roles
JOIN pg_namespace
ON (pg_namespace.nspowner = pg_roles.oid)
WHERE nspname = %s""", [schema_name])
res = cur.fetchone()
if res is None:
logger.warn('Failed to determing owner for current_schema')
return None
return res[0]
def compute_analysis_end_date(conn, curr_date):
end_date = curr_date
if end_date is None:
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_country')
year_month_list_in_child_tbls_records = table_invariants_list(conn, monthly_country_child_tbl_list,
['triplet_year', 'triplet_month'])
year_month_tuple_list = [(x.triplet_year, x.triplet_month) for x in year_month_list_in_child_tbls_records]
if len(year_month_tuple_list) > 0:
year_month_tuple_list.sort(key=lambda x: (x[0], x[1]), reverse=True)
latest_year, latest_month = year_month_tuple_list[0]
cursor.execute(sql.SQL("""SELECT MAX(last_seen)
FROM monthly_network_triplets_country
WHERE triplet_year = %s
AND triplet_month = %s"""), [latest_year, latest_month])
end_date = cursor.fetchone()[0]
# If there was no operator data imported, this can be None
if end_date is None:
end_date = datetime.date.today()
return end_date + datetime.timedelta(days=1)
def hash_string_64bit(s):
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = string_hash * 31 + b
return string_hash % (pow(2, 63) - 1) # noqa: S001 Make sure it fits into a 64-bit bigint
def child_table_names(conn, parent_name):
with conn.cursor() as cursor:
cursor.execute("""SELECT c.relname AS child_tblname
FROM pg_inherits
JOIN pg_class AS c
ON (c.oid = inhrelid)
JOIN pg_class AS p
ON (p.oid = inhparent)
JOIN pg_catalog.pg_namespace nc
ON nc.oid = c.relnamespace
JOIN pg_catalog.pg_namespace np
ON np.oid = p.relnamespace
WHERE p.relname = %s
AND np.nspname = current_schema()
AND nc.nspname = current_schema()""",
[parent_name])
return [res.child_tblname for res in cursor]
def table_invariants_list(conn, table_names, invariant_col_names):
if len(table_names) == 0:
# Need to return an empty list to avoid doing an empty query and generating an error
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_names:
table_queries.append(sql.SQL("""SELECT * FROM (SELECT {0} FROM {1} LIMIT 1) {2}""")
.format(sql.SQL(', ').join(map(sql.Identifier, invariant_col_names)),
sql.Identifier(tblname),
sql.Identifier('tmp_{0}'.format(tblname))))
cursor.execute(sql.SQL(' UNION ALL ').join(table_queries))
return cursor.fetchall()
def most_recently_run_condition_info(conn, cond_names, successful_only=False):
conditions_to_find = copy.copy(cond_names)
rv = {}
# Get list of metadata for dirbs-classify, sorted in reverse order
job_metadata_list = metadata.query_for_command_runs(conn, 'dirbs-classify')
for job_metadata in job_metadata_list:
# Loop back through recent dirbs-classify runs looking for the last time a classification
# ran successfully. This is indicates in the metadata by the presence of an entry in the matched_imei_counts.
# This can happen even though the overall dirbs-classify job failed
extra_metadata = job_metadata.extra_metadata
metadata_conditions = extra_metadata.get('conditions', {})
matched_imei_counts = extra_metadata.get('matched_imei_counts', {})
conditions_lookup = {c['label']: c for c in metadata_conditions}
for req_cond_name in copy.copy(conditions_to_find): # We modify the list in the loop, so take a copy
if req_cond_name in matched_imei_counts:
# If the name was in matched_imei_counts, it should always be in conditions as well
rv[req_cond_name] = {
'run_id': job_metadata.run_id,
'config': conditions_lookup[req_cond_name],
'last_successful_run': job_metadata.start_time
}
# Remove this req_cond_name from conditions_to_find since we already found latest metadata
conditions_to_find.remove(req_cond_name)
# Any items in conditions_to_find at this point are conditions for which we never ran a successful condition
# run
if not successful_only:
for missing_cond_name in conditions_to_find:
rv[missing_cond_name] = None
return rv
def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
# If certain device types are exempted, first select the IMEIs passed in imei_list_sql query.
# These IMEIs are then joined against GSMA TAC db to get their device type.
# Finally, any IMEIs that belong to exempted device types are excluded.
return sql.SQL("""SELECT imei_norm
FROM (SELECT imei_norm,
SUBSTRING(imei_norm, 1, 8) AS tac
FROM ({0}) imeis) imeis_with_tac
JOIN gsma_data
USING (tac)
WHERE device_type NOT IN {1}
""").format(sql.SQL(imei_list_sql),
sql.Literal(tuple(exempted_device_types))).as_string(conn)
def format_datetime_for_report(timestamp_with_tz):
if timestamp_with_tz is not None:
return timestamp_with_tz.strftime('%Y-%m-%d %X')
else:
return None
def validate_exempted_device_types(conn, config):
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if len(exempted_device_types) > 0:
cursor.execute('SELECT DISTINCT device_type FROM gsma_data')
all_device_types = [x.device_type for x in cursor]
if len(all_device_types) == 0:
logger.warning('RegionConfig: Ignoring setting exempted_device_types={0} as GSMA TAC database '
'not imported or no device types found.'.format(exempted_device_types))
else:
invalid_device_types = set(exempted_device_types) - set(all_device_types)
if len(invalid_device_types) > 0:
msg = 'RegionConfig: exempted_device_types \'{0}\' is/are not valid device type(s). ' \
'The valid GSMA device types are: \'{1}\''.format(invalid_device_types, all_device_types)
logger.error(msg)
raise ConfigParseException(msg)
def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message='',
start_date_inclusive=True, end_date_inclusive=False):
start_date_interval_notation = '[' if start_date_inclusive else '('
end_date_interval_notation = ']' if end_date_inclusive else ')'
logger.debug('{0} {sd_interval_notation}{start_date}, '
'{end_date}{ed_interval_notation}'.format(start_message,
sd_interval_notation=start_date_interval_notation,
start_date=analysis_start_date,
end_date=analysis_end_date,
ed_interval_notation=end_date_interval_notation))
def registration_list_status_filter_sql():
return sql.SQL('(status IS NULL OR status = \'whitelist\')')
def compute_amnesty_flags(app_config, curr_date):
in_amnesty_eval_period = True if app_config.amnesty_config.amnesty_enabled and \
curr_date <= app_config.amnesty_config.evaluation_period_end_date else False
in_amnesty_period = True if app_config.amnesty_config.amnesty_enabled and \
curr_date > app_config.amnesty_config.evaluation_period_end_date and \
curr_date <= app_config.amnesty_config.amnesty_period_end_date else False
return in_amnesty_eval_period, in_amnesty_period
def table_exists_sql(any_schema=False):
if not any_schema:
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL('')
return sql.SQL("""SELECT EXISTS (SELECT 1
FROM pg_tables
WHERE tablename = %s
{schema_filter_sql})""").format(schema_filter_sql=schema_filter_sql)
def is_table_partitioned(conn, tbl_name):
with conn.cursor() as cursor:
cursor.execute("""SELECT EXISTS (SELECT 1
FROM pg_class
JOIN pg_partitioned_table
ON pg_partitioned_table.partrelid = pg_class.oid
WHERE pg_class.relname = %s)""", [tbl_name])
return cursor.fetchone().exists
| true | true |
7900669253c901425b542ac7d6c9655a99875a86 | 351 | py | Python | code/blog/tests/test_models.py | elinguiuriel/djangoTDD | 67b09f6d345ca085fefa42ac11a6a1c2ee5d03dc | [
"BSD-2-Clause"
] | null | null | null | code/blog/tests/test_models.py | elinguiuriel/djangoTDD | 67b09f6d345ca085fefa42ac11a6a1c2ee5d03dc | [
"BSD-2-Clause"
] | null | null | null | code/blog/tests/test_models.py | elinguiuriel/djangoTDD | 67b09f6d345ca085fefa42ac11a6a1c2ee5d03dc | [
"BSD-2-Clause"
] | null | null | null | from django.test import TestCase
from blog.models import Entry
class EntryModelTest(TestCase):
def test_string_representation(self):
entry = Entry(title="My entry title")
self.assertEqual(str(entry), entry.title)
def test_verbose_name_plural(self):
self.assertEqual(str(Entry._meta.verbose_name_plural), "entries")
| 27 | 73 | 0.735043 | from django.test import TestCase
from blog.models import Entry
class EntryModelTest(TestCase):
def test_string_representation(self):
entry = Entry(title="My entry title")
self.assertEqual(str(entry), entry.title)
def test_verbose_name_plural(self):
self.assertEqual(str(Entry._meta.verbose_name_plural), "entries")
| true | true |
79006754031830659990309ab74928cab97b7b2d | 1,625 | py | Python | utest/preferences/test_settings.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-06-27T08:48:24.000Z | 2019-06-27T08:48:24.000Z | utest/preferences/test_settings.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | utest/preferences/test_settings.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import unittest
from robotide.preferences.settings import SettingsMigrator
from robotide.utils import overrides
class SettingsMigrationTestCase(SettingsMigrator, unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
def setUp(self):
self._old_settings = {}
self._default_settings = lambda:0
self._from_0_to_1_called = False
self._from_1_to_2_called = False
self._merge_called = False
def test_migration_from_0_to_2(self):
self._old_settings[SettingsMigrator.SETTINGS_VERSION] = 0
self.migrate()
self.assertTrue(self._from_0_to_1_called)
self.assertTrue(self._from_1_to_2_called)
self.assertTrue(self._merge_called)
def test_migration_from_1_to_2(self):
self._old_settings[SettingsMigrator.SETTINGS_VERSION] = 1
self.migrate()
self.assertFalse(self._from_0_to_1_called)
self.assertTrue(self._from_1_to_2_called)
self.assertTrue(self._merge_called)
@overrides(SettingsMigrator)
def migrate_from_0_to_1(self, settings):
self.assertEqual(self._old_settings, settings)
self._from_0_to_1_called = True
settings[SettingsMigrator.SETTINGS_VERSION] = 1
@overrides(SettingsMigrator)
def migrate_from_1_to_2(self, settings):
self.assertEqual(self._old_settings, settings)
self._from_1_to_2_called = True
settings[SettingsMigrator.SETTINGS_VERSION] = 2
def merge(self):
self._merge_called = True
if __name__ == '__main__':
unittest.main()
| 33.163265 | 69 | 0.721231 | import unittest
from robotide.preferences.settings import SettingsMigrator
from robotide.utils import overrides
class SettingsMigrationTestCase(SettingsMigrator, unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
def setUp(self):
self._old_settings = {}
self._default_settings = lambda:0
self._from_0_to_1_called = False
self._from_1_to_2_called = False
self._merge_called = False
def test_migration_from_0_to_2(self):
self._old_settings[SettingsMigrator.SETTINGS_VERSION] = 0
self.migrate()
self.assertTrue(self._from_0_to_1_called)
self.assertTrue(self._from_1_to_2_called)
self.assertTrue(self._merge_called)
def test_migration_from_1_to_2(self):
self._old_settings[SettingsMigrator.SETTINGS_VERSION] = 1
self.migrate()
self.assertFalse(self._from_0_to_1_called)
self.assertTrue(self._from_1_to_2_called)
self.assertTrue(self._merge_called)
@overrides(SettingsMigrator)
def migrate_from_0_to_1(self, settings):
self.assertEqual(self._old_settings, settings)
self._from_0_to_1_called = True
settings[SettingsMigrator.SETTINGS_VERSION] = 1
@overrides(SettingsMigrator)
def migrate_from_1_to_2(self, settings):
self.assertEqual(self._old_settings, settings)
self._from_1_to_2_called = True
settings[SettingsMigrator.SETTINGS_VERSION] = 2
def merge(self):
self._merge_called = True
if __name__ == '__main__':
unittest.main()
| true | true |
790068413a8a60f051fb4e9e6b1e00b0cfcde5e2 | 633 | py | Python | lexicon.py | philipok-1/raspberry-slack | 269ad6221e09d22deeb842073c53e33d0edb9c34 | [
"MIT"
] | 5 | 2019-04-03T05:55:40.000Z | 2020-03-01T13:46:28.000Z | lexicon.py | philipok-1/raspberry-slack | 269ad6221e09d22deeb842073c53e33d0edb9c34 | [
"MIT"
] | 1 | 2017-05-17T13:13:12.000Z | 2017-05-17T15:01:15.000Z | lexicon.py | philipok-1/raspberry-slack | 269ad6221e09d22deeb842073c53e33d0edb9c34 | [
"MIT"
] | 4 | 2017-12-28T07:48:00.000Z | 2020-03-01T13:46:33.000Z | import logging
import sys
import re
import time
import random
import utils
logger = utils.loggerMaster('slack.lexicon')
def response(type):
phrases={'greetings':[", welcome back", "Hi there", "Good to see you again", "Hello again", "hi"],
'farewells':['bye']
}
try:
length=len(phrases[type])
return phrases[type][(random.randint(0,length-1))]
except KeyError:
logger.error('lexicon read error')
return ('There is an error in the lexicon file you idiot')
def main():
print "This is a module designed to be used with RaspiSlack"
if __name__ == "__main__":
main()
| 18.617647 | 102 | 0.649289 | import logging
import sys
import re
import time
import random
import utils
logger = utils.loggerMaster('slack.lexicon')
def response(type):
phrases={'greetings':[", welcome back", "Hi there", "Good to see you again", "Hello again", "hi"],
'farewells':['bye']
}
try:
length=len(phrases[type])
return phrases[type][(random.randint(0,length-1))]
except KeyError:
logger.error('lexicon read error')
return ('There is an error in the lexicon file you idiot')
def main():
print "This is a module designed to be used with RaspiSlack"
if __name__ == "__main__":
main()
| false | true |
7900684fdd94602e440c03daa9e2ca792e7b0e55 | 1,408 | py | Python | examples/drawing-circuits/designIFFL2.py | PhilippBoeing/synbioweaver | 23efdf79a325885a43e82ba13e6ccefb8eb3d733 | [
"MIT"
] | null | null | null | examples/drawing-circuits/designIFFL2.py | PhilippBoeing/synbioweaver | 23efdf79a325885a43e82ba13e6ccefb8eb3d733 | [
"MIT"
] | null | null | null | examples/drawing-circuits/designIFFL2.py | PhilippBoeing/synbioweaver | 23efdf79a325885a43e82ba13e6ccefb8eb3d733 | [
"MIT"
] | null | null | null | from synbioweaver.core import *
from synbioweaver.aspects.designRulesAspect import *
from synbioweaver.aspects.printStackAspect import *
from synbioweaver.aspects.pigeonOutputAspect import *
declareNewMolecule('A')
declareNewMolecule('B')
declareNewMolecule('C')
declareNewMolecule('In')
declareNewPart('t1',Terminator)
declareNewPart('t2',Terminator)
declareNewPart('t3',Terminator)
declareNewPart('r1',RBS )
declareNewPart('r2',RBS )
declareNewPart('r3',RBS )
declareNewPart('cA',CodingRegion,moleculesAfter=[A])
declareNewPart('cB',CodingRegion,moleculesAfter=[B])
declareNewPart('cC',CodingRegion,moleculesAfter=[C])
declareNewPart('Pin', PositivePromoter, [In])
declareNewPart('Pb', NegativePromoter, [A] )
declareNewPart('Pc', HybridPromoter, [A,B], regulatorInfoMap={A:False,B:False} )
class simpleCircuit(Circuit):
def mainCircuit(self):
self.createMolecule(In)
self.createMolecule(B)
self.addPart(Pin)
self.addPart(r1)
self.addPart(cA)
self.addPart(t1)
self.addPart(Pb)
self.addPart(r2)
self.addPart(cB)
self.addPart(t2)
self.addPart(Pc)
self.addPart(r3)
self.addPart(cC)
self.addPart(t3)
#compiledDesign = Weaver(constGFP, DesignRules, PrintStack, PigeonOutput).output()
compiledDesign = Weaver(simpleCircuit, PigeonOutput).output()
compiledDesign.printPigeonOutput()
| 30.608696 | 82 | 0.725142 | from synbioweaver.core import *
from synbioweaver.aspects.designRulesAspect import *
from synbioweaver.aspects.printStackAspect import *
from synbioweaver.aspects.pigeonOutputAspect import *
declareNewMolecule('A')
declareNewMolecule('B')
declareNewMolecule('C')
declareNewMolecule('In')
declareNewPart('t1',Terminator)
declareNewPart('t2',Terminator)
declareNewPart('t3',Terminator)
declareNewPart('r1',RBS )
declareNewPart('r2',RBS )
declareNewPart('r3',RBS )
declareNewPart('cA',CodingRegion,moleculesAfter=[A])
declareNewPart('cB',CodingRegion,moleculesAfter=[B])
declareNewPart('cC',CodingRegion,moleculesAfter=[C])
declareNewPart('Pin', PositivePromoter, [In])
declareNewPart('Pb', NegativePromoter, [A] )
declareNewPart('Pc', HybridPromoter, [A,B], regulatorInfoMap={A:False,B:False} )
class simpleCircuit(Circuit):
def mainCircuit(self):
self.createMolecule(In)
self.createMolecule(B)
self.addPart(Pin)
self.addPart(r1)
self.addPart(cA)
self.addPart(t1)
self.addPart(Pb)
self.addPart(r2)
self.addPart(cB)
self.addPart(t2)
self.addPart(Pc)
self.addPart(r3)
self.addPart(cC)
self.addPart(t3)
compiledDesign = Weaver(simpleCircuit, PigeonOutput).output()
compiledDesign.printPigeonOutput()
| true | true |
7900699d2665ed42b62df5e1307db3aeb998a0a8 | 7,429 | py | Python | Repeat/CoMPT/utils_node.py | jcchan23/SAIL | 878c59e9f1b4e6df3e2424c8213c1df25459e950 | [
"MIT"
] | 7 | 2022-01-11T14:09:31.000Z | 2022-03-22T05:39:14.000Z | Repeat/CoMPT/utils_node.py | jcchan23/SAIL | 878c59e9f1b4e6df3e2424c8213c1df25459e950 | [
"MIT"
] | 2 | 2022-03-01T08:32:56.000Z | 2022-03-09T02:58:01.000Z | Repeat/CoMPT/utils_node.py | jcchan23/SAIL | 878c59e9f1b4e6df3e2424c8213c1df25459e950 | [
"MIT"
] | 1 | 2022-03-09T19:07:45.000Z | 2022-03-09T19:07:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils_node.py
@Time : 2022/03/08 14:35:13
@Author : Jianwen Chen
@Version : 1.0
@Contact : chenjw48@mail2.sysu.edu.cn
@License : (C)Copyright 2021-2022, SAIL-Lab
'''
######################################## import area ########################################
# common library
import os
import random
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from sklearn import metrics
from torch.optim.lr_scheduler import _LRScheduler
######################################## function area ########################################
def seed_everything(seed=2021):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def initialize_weights(model):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
for param in model.parameters():
if param.dim() > 1:
nn.init.xavier_normal_(param)
def loop(data_loader, model, optimizer, scheduler, device):
batch_size = data_loader.batch_size
data_loader = tqdm(data_loader) if optimizer is not None else data_loader
loss_sum, y_true, y_pred = 0.0, list(), list()
for batch in data_loader:
smiles, mols, batch_node_features, batch_edge_features, batch_distance_matrix, labels = batch
# add mask
batch_masks = torch.sum(torch.abs(batch_node_features), dim=-1) != 0
# (batch, max_length, node_dim)
batch_node_features = batch_node_features.to(device)
# (batch, max_length, max_length, edge_dim)
batch_edge_features = batch_edge_features.to(device)
# (batch, max_length, max_length)
batch_distance_matrix = batch_distance_matrix.to(device)
# (batch, max_length)
batch_masks = batch_masks.to(device)
# (batch, max_length, 1)
labels = labels.to(device)
# (batch, max_length, 1)
outputs = model(batch_node_features, batch_edge_features, batch_distance_matrix, batch_masks, device)
# loss calculation
loss = cal_loss(y_true=labels, y_pred=outputs, device=device)
loss_sum += loss.item()
if optimizer is not None:
# clear gradients for this training step
optimizer.zero_grad()
# back propagation, compute gradients
loss.backward()
# apply gradients
optimizer.step()
# NormLR need step every batch
if scheduler is not None:
scheduler.step()
# collect result
labels = labels.detach().cpu().numpy()
outputs = outputs.detach().cpu().numpy()
y_true.append([])
y_pred.append([])
for label, output in zip(labels, outputs):
label, output = label.flatten(), output.flatten()
for l, o in zip(label, output):
if l != 0.0:
y_true[-1].append(l)
y_pred[-1].append(o)
# clear cuda cache
torch.cuda.empty_cache()
# metric calculation
results = cal_metric(y_true=y_true, y_pred=y_pred)
results['loss'] = loss_sum / (len(data_loader) * batch_size)
return results
def cal_loss(y_true, y_pred, device):
y_true, y_pred = y_true.flatten(), y_pred.flatten()
y_mask = torch.where(y_true != 0.0, torch.full_like(y_true, 1), torch.full_like(y_true, 0))
loss = torch.sum(torch.abs(y_true - y_pred) * y_mask) / torch.sum(y_mask)
return loss
def cal_metric(y_true, y_pred):
concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)
mae = metrics.mean_absolute_error(concatenate_true, concatenate_pred)
r2 = metrics.r2_score(concatenate_true, concatenate_pred)
return {'mae':mae, 'r2':r2}
class NoamLR(_LRScheduler):
"""
Noam learning rate scheduler with piecewise linear increase and exponential decay.
The learning rate increases linearly from init_lr to max_lr over the course of
the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).
Then the learning rate decreases exponentially from max_lr to final_lr over the
course of the remaining total_steps - warmup_steps (where total_steps =
total_epochs * steps_per_epoch). This is roughly based on the learning rate
schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).
"""
def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):
"""
Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs).
"""
assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)
self.num_lrs = len(optimizer.param_groups)
self.optimizer = optimizer
self.warmup_epochs = np.array(warmup_epochs)
self.total_epochs = np.array(total_epochs)
self.steps_per_epoch = steps_per_epoch
self.init_lr = np.array(init_lr)
self.max_lr = np.array(max_lr)
self.final_lr = np.array(final_lr)
self.current_step = 0
self.lr = init_lr
self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)
self.total_steps = self.total_epochs * self.steps_per_epoch
self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps
self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))
super(NoamLR, self).__init__(optimizer)
def get_lr(self):
"""Gets a list of the current learning rates."""
return list(self.lr)
def step(self, current_step: int = None):
"""
Updates the learning rate by taking a step.
:param current_step: Optionally specify what step to set the learning rate to.
If None, current_step = self.current_step + 1.
"""
if current_step is not None:
self.current_step = current_step
else:
self.current_step += 1
for i in range(self.num_lrs):
if self.current_step <= self.warmup_steps[i]:
self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]
elif self.current_step <= self.total_steps[i]:
self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))
else: # theoretically this case should never be reached since training should stop at total_steps
self.lr[i] = self.final_lr[i]
self.optimizer.param_groups[i]['lr'] = self.lr[i]
| 37.331658 | 133 | 0.63481 | true | true | |
79006a3ce2268eb089088ec3e60ec1ae5ecf4990 | 7,902 | py | Python | training.py | aasir22/tools_classification | f5a2606f5fa07c1ebc161c467d17f4e7a04c5ebb | [
"MIT"
] | null | null | null | training.py | aasir22/tools_classification | f5a2606f5fa07c1ebc161c467d17f4e7a04c5ebb | [
"MIT"
] | null | null | null | training.py | aasir22/tools_classification | f5a2606f5fa07c1ebc161c467d17f4e7a04c5ebb | [
"MIT"
] | null | null | null | from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from Logger.app_logger import App_logger
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
class Training:
def __init__(self,train_path,test_path,val_path):
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.file_object = open("Training_Logs/ModelTrainingLog.txt", 'a+')
self.log_object = App_logger()
def train(self):
self.log_object.log(self.file_object,"Entered in to train method in Training class.Training started")
try:
x_train = []
for folder in os.listdir(self.train_path):
sub_path = self.train_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_train.append(img_arr)
x_test = []
for folder in os.listdir(self.test_path):
sub_path = self.test_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_test.append(img_arr)
x_val = []
for folder in os.listdir(self.val_path):
sub_path = self.val_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_val.append(img_arr)
self.log_object.log(self.file_object, "Entered in to train method in Training class.train,test,val split successfull")
train_x = np.array(x_train) / 255.0
test_x = np.array(x_test) / 255.0
val_x = np.array(x_val) / 255.0
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
val_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory(self.train_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
test_set = test_datagen.flow_from_directory(self.test_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
val_set = val_datagen.flow_from_directory(self.val_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
train_y = training_set.classes
test_y = test_set.classes
val_y = val_set.classes
IMAGE_SIZE = [224, 224]
vgg = VGG19(input_shape= IMAGE_SIZE + [3],weights='imagenet',include_top=False)
self.log_object.log(self.file_object, "Entered in to train method in Training class. Model successfully initialized")
for layer in vgg.layers:
layer.trainable = False
x = Flatten() (vgg.output)
prediction = Dense(5 ,activation='softmax') (x)
model = Model(inputs=vgg.input,outputs = prediction)
model.summary()
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer='adam',metrics=['accuracy'])
self.log_object.log(self.file_object, "Entered in to train method in Training class.Model compile successfull")
file_path = 'vgg19_model/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'
self.log_object.log(self.file_object,"check point directory created")
check_point = ModelCheckpoint(file_path,monitor='val_accuracy', verbose=1,save_best_only=True, mode='max')
start = datetime.now()
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Training start time {start}")
history = model.fit(train_x,train_y,
validation_data= (val_x,val_y),
epochs=20,
callbacks = [check_point],
batch_size=64, shuffle=True)
duration = datetime.now() - start
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Total time taken is {duration}")
model.save('mech_tools_model.h5')
self.log_object.log(self.file_object, f"Entered in to train method in Training class.model saved successfully")
# accuracies
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.savefig('vgg-acc-rps-1.png')
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('vgg-loss-rps-1.png')
self.log_object.log(self.file_object, "Entered in to train method in Training class.model evaluation started")
model.evaluate(test_x, test_y, batch_size=32)
# predict
y_pred = model.predict(test_x)
y_pred = np.argmax(y_pred, axis=1)
self.log_object.log(self.file_object, f"Entered in to train method in Training class.classification report {classification_report(y_pred, test_y)}")
self.log_object.log(self.file_object, f"Entered in to train method in Training class.confusion matrix is{confusion_matrix(y_pred, test_y)}")
except Exception as e:
# logging the unsuccessful Training
self.log_object.log(self.file_object, 'Unsuccessful End of Training')
self.log_object.log(self.file_object,f"exception occured.exception is {e}")
raise Exception
self.file_object.close()
if __name__ == "__main__":
train_path = "final_dataset/train"
test_path = "final_dataset/test"
val_path = "final_dataset/val"
train_model = Training(train_path, test_path, val_path)
train_model.train() | 42.945652 | 160 | 0.552645 | from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from Logger.app_logger import App_logger
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
class Training:
def __init__(self,train_path,test_path,val_path):
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.file_object = open("Training_Logs/ModelTrainingLog.txt", 'a+')
self.log_object = App_logger()
def train(self):
self.log_object.log(self.file_object,"Entered in to train method in Training class.Training started")
try:
x_train = []
for folder in os.listdir(self.train_path):
sub_path = self.train_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_train.append(img_arr)
x_test = []
for folder in os.listdir(self.test_path):
sub_path = self.test_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_test.append(img_arr)
x_val = []
for folder in os.listdir(self.val_path):
sub_path = self.val_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_val.append(img_arr)
self.log_object.log(self.file_object, "Entered in to train method in Training class.train,test,val split successfull")
train_x = np.array(x_train) / 255.0
test_x = np.array(x_test) / 255.0
val_x = np.array(x_val) / 255.0
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
val_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory(self.train_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
test_set = test_datagen.flow_from_directory(self.test_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
val_set = val_datagen.flow_from_directory(self.val_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
train_y = training_set.classes
test_y = test_set.classes
val_y = val_set.classes
IMAGE_SIZE = [224, 224]
vgg = VGG19(input_shape= IMAGE_SIZE + [3],weights='imagenet',include_top=False)
self.log_object.log(self.file_object, "Entered in to train method in Training class. Model successfully initialized")
for layer in vgg.layers:
layer.trainable = False
x = Flatten() (vgg.output)
prediction = Dense(5 ,activation='softmax') (x)
model = Model(inputs=vgg.input,outputs = prediction)
model.summary()
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer='adam',metrics=['accuracy'])
self.log_object.log(self.file_object, "Entered in to train method in Training class.Model compile successfull")
file_path = 'vgg19_model/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'
self.log_object.log(self.file_object,"check point directory created")
check_point = ModelCheckpoint(file_path,monitor='val_accuracy', verbose=1,save_best_only=True, mode='max')
start = datetime.now()
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Training start time {start}")
history = model.fit(train_x,train_y,
validation_data= (val_x,val_y),
epochs=20,
callbacks = [check_point],
batch_size=64, shuffle=True)
duration = datetime.now() - start
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Total time taken is {duration}")
model.save('mech_tools_model.h5')
self.log_object.log(self.file_object, f"Entered in to train method in Training class.model saved successfully")
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.savefig('vgg-acc-rps-1.png')
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('vgg-loss-rps-1.png')
self.log_object.log(self.file_object, "Entered in to train method in Training class.model evaluation started")
model.evaluate(test_x, test_y, batch_size=32)
y_pred = model.predict(test_x)
y_pred = np.argmax(y_pred, axis=1)
self.log_object.log(self.file_object, f"Entered in to train method in Training class.classification report {classification_report(y_pred, test_y)}")
self.log_object.log(self.file_object, f"Entered in to train method in Training class.confusion matrix is{confusion_matrix(y_pred, test_y)}")
except Exception as e:
self.log_object.log(self.file_object, 'Unsuccessful End of Training')
self.log_object.log(self.file_object,f"exception occured.exception is {e}")
raise Exception
self.file_object.close()
if __name__ == "__main__":
train_path = "final_dataset/train"
test_path = "final_dataset/test"
val_path = "final_dataset/val"
train_model = Training(train_path, test_path, val_path)
train_model.train() | true | true |
79006ac2bd85c2402d452700b18cea1ff3b1b152 | 249 | py | Python | gehomesdk/erd/values/advantium/erd_advantium_kitchen_timer_min_max.py | willhayslett/gehome | 7e407a1d31cede1453656eaef948332e808484ea | [
"MIT"
] | 17 | 2021-05-18T01:58:06.000Z | 2022-03-22T20:49:32.000Z | gehomesdk/erd/values/advantium/erd_advantium_kitchen_timer_min_max.py | willhayslett/gehome | 7e407a1d31cede1453656eaef948332e808484ea | [
"MIT"
] | 29 | 2021-05-17T21:43:16.000Z | 2022-02-28T22:50:48.000Z | gehomesdk/erd/values/advantium/erd_advantium_kitchen_timer_min_max.py | willhayslett/gehome | 7e407a1d31cede1453656eaef948332e808484ea | [
"MIT"
] | 9 | 2021-05-17T04:40:58.000Z | 2022-02-02T17:26:13.000Z | from datetime import timedelta
from typing import NamedTuple, Optional
class ErdAdvantiumKitchenTimerMinMax(NamedTuple):
"""Defines min/max kitchen timer settings"""
min_time: timedelta
max_time: timedelta
raw_value: Optional[str]
| 24.9 | 49 | 0.7751 | from datetime import timedelta
from typing import NamedTuple, Optional
class ErdAdvantiumKitchenTimerMinMax(NamedTuple):
min_time: timedelta
max_time: timedelta
raw_value: Optional[str]
| true | true |
79006ae664ef405a71cf1ed65d120de34a6647ad | 4,060 | py | Python | burclar/__init__.py | The-Special/Burclar | 3f36085ff887bdd49c1838e03a8335079dee0e2d | [
"MIT"
] | 14 | 2021-04-04T12:20:35.000Z | 2021-11-10T23:59:49.000Z | burclar/__init__.py | The-Special/Burclar | 3f36085ff887bdd49c1838e03a8335079dee0e2d | [
"MIT"
] | 1 | 2021-04-03T18:56:21.000Z | 2021-04-03T18:56:21.000Z | burclar/__init__.py | The-Special/Burclar | 3f36085ff887bdd49c1838e03a8335079dee0e2d | [
"MIT"
] | 4 | 2021-04-04T12:36:40.000Z | 2021-09-17T09:18:16.000Z | import requests
from bs4 import BeautifulSoup
"""
Bu modül burçlar ile ilgilenen arkadaşlarımın işine yarayacaktır.
Çok basit bir kullanımı mevcuttur.
Bir sorunuz olursa seve seve yardım etmek isterim profilimdeki linklerden bana ulaşabilirsiniz.
"""
def makeAPIRequest(path: str, type: str) -> str:
type = type if (type == "gunluk") or (type == "haftalik") else "gunluk"
r = requests.get(
f"https://www.mynet.com/kadin/burclar-astroloji/{path}-burcu-{type}-yorumu.html")
soup = BeautifulSoup(r.content, "html.parser")
data = soup.find_all("div", {"class": "detail-content-inner"})
burc = (data[0].contents)[len(data[0].contents) - 5]
burcYorum = burc.text
return burcYorum
burcList = ["yengec", "koc", "boga", "ikizler", "aslan",
"basak", "terazi", "akrep", "yay", "oglak", "kova", "balik"]
burcStr = ",".join(burcList)
class burclar:
def burc(name: str, type="gunluk") -> str:
if name not in burcList:
raise Exception(f"Geçerli bir burç giriniz. ({burcStr})")
return makeAPIRequest(name, type)
def yengec(type="gunluk") -> str:
return makeAPIRequest("yengec", type)
def koc(type="gunluk") -> str:
return makeAPIRequest("koc", type)
def boga(type="gunluk") -> str:
return makeAPIRequest("boga", type)
def ikizler(type="gunluk") -> str:
return makeAPIRequest("ikizler", type)
def aslan(type="gunluk") -> str:
return makeAPIRequest("aslan", type)
def basak(type="gunluk") -> str:
return makeAPIRequest("basak", type)
def terazi(type="gunluk") -> str:
return makeAPIRequest("terazi", type)
def akrep(type="gunluk") -> str:
return makeAPIRequest("akrep", type)
def yay(type="gunluk") -> str:
return makeAPIRequest("yay", type)
def oglak(type="gunluk") -> str:
return makeAPIRequest("oglak", type)
def kova(type="gunluk") -> str:
return makeAPIRequest("kova", type)
def balik(type="gunluk") -> str:
return makeAPIRequest("balik", type)
"""Haftalik ve Günlük yorumların bitiş kısımı"""
def makeAPIRequestOz(path: str, type: str) -> str:
type = type if (type == "ozellikleri") else "ozellikleri"
y = requests.get(
f"https://www.mynet.com/kadin/burclar-astroloji/{path}-burcu-ozellikleri.html")
soupOz = BeautifulSoup(y.content, "html.parser")
dataOz = soupOz.find_all("div", {"class": "medyanet-content"})
burcOz = (dataOz[0].contents)[len(dataOz[0].contents) - 12]
burcYorumOz = burcOz.text
return burcYorumOz
class burclarOz:
def burcOz(name: str, type="ozellikleri") -> str:
if name not in burcList:
raise Exception(f"Geçerli bir burç giriniz. ({burcStr})")
return makeAPIRequestOz(name, type)
def yengec(type="ozellikleri") -> str:
return makeAPIRequestOz("yengec", type)
def koc(type="ozellikleri") -> str:
return makeAPIRequestOz("koc", type)
def boga(type="ozellikleri") -> str:
return makeAPIRequestOz("boga", type)
def ikizler(type="ozellikleri") -> str:
return makeAPIRequestOz("ikizler", type)
def aslan(type="ozellikleri") -> str:
return makeAPIRequestOz("aslan", type)
def basak(type="ozellikleri") -> str:
return makeAPIRequestOz("basak", type)
def terazi(type="ozellikleri") -> str:
return makeAPIRequestOz("terazi", type)
def akrep(type="ozellikleri") -> str:
return makeAPIRequestOz("akrep", type)
def yay(type="ozellikleri") -> str:
return makeAPIRequestOz("yay", type)
def oglak(type="ozellikleri") -> str:
return makeAPIRequestOz("oglak", type)
def kova(type="ozellikleri") -> str:
return makeAPIRequestOz("kova", type)
def balik(type="ozellikleri") -> str:
return makeAPIRequestOz("balik", type)
"""Burçların özelliklerinin çekildiği kısım"""
| 31.71875 | 96 | 0.619704 | import requests
from bs4 import BeautifulSoup
def makeAPIRequest(path: str, type: str) -> str:
type = type if (type == "gunluk") or (type == "haftalik") else "gunluk"
r = requests.get(
f"https://www.mynet.com/kadin/burclar-astroloji/{path}-burcu-{type}-yorumu.html")
soup = BeautifulSoup(r.content, "html.parser")
data = soup.find_all("div", {"class": "detail-content-inner"})
burc = (data[0].contents)[len(data[0].contents) - 5]
burcYorum = burc.text
return burcYorum
burcList = ["yengec", "koc", "boga", "ikizler", "aslan",
"basak", "terazi", "akrep", "yay", "oglak", "kova", "balik"]
burcStr = ",".join(burcList)
class burclar:
def burc(name: str, type="gunluk") -> str:
if name not in burcList:
raise Exception(f"Geçerli bir burç giriniz. ({burcStr})")
return makeAPIRequest(name, type)
def yengec(type="gunluk") -> str:
return makeAPIRequest("yengec", type)
def koc(type="gunluk") -> str:
return makeAPIRequest("koc", type)
def boga(type="gunluk") -> str:
return makeAPIRequest("boga", type)
def ikizler(type="gunluk") -> str:
return makeAPIRequest("ikizler", type)
def aslan(type="gunluk") -> str:
return makeAPIRequest("aslan", type)
def basak(type="gunluk") -> str:
return makeAPIRequest("basak", type)
def terazi(type="gunluk") -> str:
return makeAPIRequest("terazi", type)
def akrep(type="gunluk") -> str:
return makeAPIRequest("akrep", type)
def yay(type="gunluk") -> str:
return makeAPIRequest("yay", type)
def oglak(type="gunluk") -> str:
return makeAPIRequest("oglak", type)
def kova(type="gunluk") -> str:
return makeAPIRequest("kova", type)
def balik(type="gunluk") -> str:
return makeAPIRequest("balik", type)
def makeAPIRequestOz(path: str, type: str) -> str:
type = type if (type == "ozellikleri") else "ozellikleri"
y = requests.get(
f"https://www.mynet.com/kadin/burclar-astroloji/{path}-burcu-ozellikleri.html")
soupOz = BeautifulSoup(y.content, "html.parser")
dataOz = soupOz.find_all("div", {"class": "medyanet-content"})
burcOz = (dataOz[0].contents)[len(dataOz[0].contents) - 12]
burcYorumOz = burcOz.text
return burcYorumOz
class burclarOz:
def burcOz(name: str, type="ozellikleri") -> str:
if name not in burcList:
raise Exception(f"Geçerli bir burç giriniz. ({burcStr})")
return makeAPIRequestOz(name, type)
def yengec(type="ozellikleri") -> str:
return makeAPIRequestOz("yengec", type)
def koc(type="ozellikleri") -> str:
return makeAPIRequestOz("koc", type)
def boga(type="ozellikleri") -> str:
return makeAPIRequestOz("boga", type)
def ikizler(type="ozellikleri") -> str:
return makeAPIRequestOz("ikizler", type)
def aslan(type="ozellikleri") -> str:
return makeAPIRequestOz("aslan", type)
def basak(type="ozellikleri") -> str:
return makeAPIRequestOz("basak", type)
def terazi(type="ozellikleri") -> str:
return makeAPIRequestOz("terazi", type)
def akrep(type="ozellikleri") -> str:
return makeAPIRequestOz("akrep", type)
def yay(type="ozellikleri") -> str:
return makeAPIRequestOz("yay", type)
def oglak(type="ozellikleri") -> str:
return makeAPIRequestOz("oglak", type)
def kova(type="ozellikleri") -> str:
return makeAPIRequestOz("kova", type)
def balik(type="ozellikleri") -> str:
return makeAPIRequestOz("balik", type)
| true | true |
79006bc4d4b1710981f6b6bbeacc52389a5792f7 | 3,130 | py | Python | MxShop/MxShop/urls.py | ScorpioDoctor/DjangoVueShop | a26380e30232f93fead0ebc6b7608b3d7ed2baf4 | [
"BSD-2-Clause"
] | null | null | null | MxShop/MxShop/urls.py | ScorpioDoctor/DjangoVueShop | a26380e30232f93fead0ebc6b7608b3d7ed2baf4 | [
"BSD-2-Clause"
] | null | null | null | MxShop/MxShop/urls.py | ScorpioDoctor/DjangoVueShop | a26380e30232f93fead0ebc6b7608b3d7ed2baf4 | [
"BSD-2-Clause"
] | null | null | null | """MxShop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
import xadmin
from MxShop.settings import MEDIA_ROOT
from django.views.static import serve
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from rest_framework_jwt.views import obtain_jwt_token
from goods.views import GoodsListViewSet, CategoryViewset, HotSearchsViewset, BannerViewset
from goods.views import IndexCategoryViewset
from users.views import SmsCodeViewset, UserViewset
from user_operation.views import UserFavViewset, LeavingMessageViewset, AddressViewset
from trade.views import ShoppingCartViewset, OrderViewset
router = DefaultRouter()
#配置goods的url
router.register(r'goods', GoodsListViewSet, base_name="goods")
#配置category的url
router.register(r'categorys', CategoryViewset, base_name="categorys")
router.register(r'codes', SmsCodeViewset, base_name="codes")
router.register(r'hotsearchs', HotSearchsViewset, base_name="hotsearchs")
router.register(r'users', UserViewset, base_name="users")
#收藏
router.register(r'userfavs', UserFavViewset, base_name="userfavs")
#留言
router.register(r'messages', LeavingMessageViewset, base_name="messages")
#收货地址
router.register(r'address', AddressViewset, base_name="address")
#购物车url
router.register(r'shopcarts', ShoppingCartViewset, base_name="shopcarts")
#订单相关url
router.register(r'orders', OrderViewset, base_name="orders")
#轮播图url
router.register(r'banners', BannerViewset, base_name="banners")
#首页商品系列数据
router.register(r'indexgoods', IndexCategoryViewset, base_name="indexgoods")
goods_list = GoodsListViewSet.as_view({
'get': 'list',
})
from trade.views import AlipayView
from django.views.generic import TemplateView
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url('', include('social_django.urls', namespace='social')),
url(r'^', include(router.urls)),
url(r'^index/', TemplateView.as_view(template_name="index.html"), name="index"),
url(r'docs/', include_docs_urls(title="慕学生鲜")),
#drf自带的token认证模式
url(r'^api-token-auth/', views.obtain_auth_token),
#jwt的认证接口
url(r'^login/', obtain_jwt_token),
url(r'^alipay/return/', AlipayView.as_view(), name="alipay"),
url(r'^ueditor/',include('DjangoUeditor.urls' )),
]
| 31.938776 | 91 | 0.755911 | from django.conf.urls import url, include
import xadmin
from MxShop.settings import MEDIA_ROOT
from django.views.static import serve
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from rest_framework_jwt.views import obtain_jwt_token
from goods.views import GoodsListViewSet, CategoryViewset, HotSearchsViewset, BannerViewset
from goods.views import IndexCategoryViewset
from users.views import SmsCodeViewset, UserViewset
from user_operation.views import UserFavViewset, LeavingMessageViewset, AddressViewset
from trade.views import ShoppingCartViewset, OrderViewset
router = DefaultRouter()
router.register(r'goods', GoodsListViewSet, base_name="goods")
router.register(r'categorys', CategoryViewset, base_name="categorys")
router.register(r'codes', SmsCodeViewset, base_name="codes")
router.register(r'hotsearchs', HotSearchsViewset, base_name="hotsearchs")
router.register(r'users', UserViewset, base_name="users")
router.register(r'userfavs', UserFavViewset, base_name="userfavs")
router.register(r'messages', LeavingMessageViewset, base_name="messages")
router.register(r'address', AddressViewset, base_name="address")
router.register(r'shopcarts', ShoppingCartViewset, base_name="shopcarts")
router.register(r'orders', OrderViewset, base_name="orders")
router.register(r'banners', BannerViewset, base_name="banners")
router.register(r'indexgoods', IndexCategoryViewset, base_name="indexgoods")
goods_list = GoodsListViewSet.as_view({
'get': 'list',
})
from trade.views import AlipayView
from django.views.generic import TemplateView
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url('', include('social_django.urls', namespace='social')),
url(r'^', include(router.urls)),
url(r'^index/', TemplateView.as_view(template_name="index.html"), name="index"),
url(r'docs/', include_docs_urls(title="慕学生鲜")),
url(r'^api-token-auth/', views.obtain_auth_token),
url(r'^login/', obtain_jwt_token),
url(r'^alipay/return/', AlipayView.as_view(), name="alipay"),
url(r'^ueditor/',include('DjangoUeditor.urls' )),
]
| true | true |
79006ded31fe7e9dee49df7e1f8421903387d741 | 1,935 | py | Python | ml-agents/mlagents/trainers/tests/test_barracuda_converter.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 1 | 2020-10-15T01:28:20.000Z | 2020-10-15T01:28:20.000Z | ml-agents/mlagents/trainers/tests/test_barracuda_converter.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/tests/test_barracuda_converter.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
import pytest
import mlagents.trainers.tensorflow_to_barracuda as tf2bc
from mlagents.trainers.tests.test_nn_policy import create_policy_mock
from mlagents.trainers.settings import TrainerSettings
from mlagents.tf_utils import tf
from mlagents.model_serialization import SerializationSettings, export_policy_model
def test_barracuda_converter():
path_prefix = os.path.dirname(os.path.abspath(__file__))
tmpfile = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()) + ".nn"
)
# make sure there are no left-over files
if os.path.isfile(tmpfile):
os.remove(tmpfile)
tf2bc.convert(path_prefix + "/BasicLearning.pb", tmpfile)
# test if file exists after conversion
assert os.path.isfile(tmpfile)
# currently converter produces small output file even if input file is empty
# 100 bytes is high enough to prove that conversion was successful
assert os.path.getsize(tmpfile) > 100
# cleanup
os.remove(tmpfile)
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])
@pytest.mark.parametrize("visual", [True, False], ids=["visual", "vector"])
@pytest.mark.parametrize("rnn", [True, False], ids=["rnn", "no_rnn"])
def test_policy_conversion(tmpdir, rnn, visual, discrete):
tf.reset_default_graph()
dummy_config = TrainerSettings(output_path=os.path.join(tmpdir, "test"))
policy = create_policy_mock(
dummy_config, use_rnn=rnn, use_discrete=discrete, use_visual=visual
)
policy.save_model(1000)
settings = SerializationSettings(
policy.model_path, os.path.join(tmpdir, policy.brain.brain_name)
)
export_policy_model(settings, policy.graph, policy.sess)
# These checks taken from test_barracuda_converter
assert os.path.isfile(os.path.join(tmpdir, "test.nn"))
assert os.path.getsize(os.path.join(tmpdir, "test.nn")) > 100
| 37.211538 | 86 | 0.740052 | import os
import tempfile
import pytest
import mlagents.trainers.tensorflow_to_barracuda as tf2bc
from mlagents.trainers.tests.test_nn_policy import create_policy_mock
from mlagents.trainers.settings import TrainerSettings
from mlagents.tf_utils import tf
from mlagents.model_serialization import SerializationSettings, export_policy_model
def test_barracuda_converter():
path_prefix = os.path.dirname(os.path.abspath(__file__))
tmpfile = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()) + ".nn"
)
if os.path.isfile(tmpfile):
os.remove(tmpfile)
tf2bc.convert(path_prefix + "/BasicLearning.pb", tmpfile)
assert os.path.isfile(tmpfile)
assert os.path.getsize(tmpfile) > 100
os.remove(tmpfile)
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])
@pytest.mark.parametrize("visual", [True, False], ids=["visual", "vector"])
@pytest.mark.parametrize("rnn", [True, False], ids=["rnn", "no_rnn"])
def test_policy_conversion(tmpdir, rnn, visual, discrete):
tf.reset_default_graph()
dummy_config = TrainerSettings(output_path=os.path.join(tmpdir, "test"))
policy = create_policy_mock(
dummy_config, use_rnn=rnn, use_discrete=discrete, use_visual=visual
)
policy.save_model(1000)
settings = SerializationSettings(
policy.model_path, os.path.join(tmpdir, policy.brain.brain_name)
)
export_policy_model(settings, policy.graph, policy.sess)
assert os.path.isfile(os.path.join(tmpdir, "test.nn"))
assert os.path.getsize(os.path.join(tmpdir, "test.nn")) > 100
| true | true |
79006e10d84cb5a0a7213d5ac1823b6efbb6760c | 6,254 | py | Python | BiblioAlly/ieee.py | gambit4348/BiblioAlly | c04ac378770a3cdcbba863799383103049df22f3 | [
"MIT"
] | null | null | null | BiblioAlly/ieee.py | gambit4348/BiblioAlly | c04ac378770a3cdcbba863799383103049df22f3 | [
"MIT"
] | null | null | null | BiblioAlly/ieee.py | gambit4348/BiblioAlly | c04ac378770a3cdcbba863799383103049df22f3 | [
"MIT"
] | null | null | null | from BiblioAlly import catalog as cat, domain, translator as bibtex
class IeeeXTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
if 'title' in fields:
title = self._unbroken(self._uncurlied(fields['title']))
else:
title = ''
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
author_field = ''
if 'author' in fields:
author_field = self._unbroken(self._all_uncurly(fields['author'].replace('}and', ' and')))
if author_field == '':
author_field = 'Author, Unamed'
authors = self._authors_from_field(author_field)
affiliations = self._expand_affiliations(None, authors)
keywords = []
if 'keywords' in fields:
all_keywords = self._all_uncurly(fields['keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
sub_keyword_names = keyword_name.split(',')
for sub_keyword_name in sub_keyword_names:
name = sub_keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "IEEE Xplore"
if 'doi' in fields:
document.doi = self._uncurlied(fields['doi'])
if 'journal' in fields:
document.journal = self._uncurlied(fields['journal'])
elif 'booktitle' in fields and kind == 'inproceedings':
document.journal = self._uncurlied(fields['booktitle'])
if 'number' in fields:
if len(self._uncurlied(fields['number'])) > 0:
document.number = self._uncurlied(fields['number'])
if 'pages' in fields:
if len(self._uncurlied(fields['pages'])) > 0:
document.pages = self._uncurlied(fields['pages'])
if 'url' in fields:
if len(self._uncurlied(fields['url'])) > 0:
document.url = self._uncurlied(fields['url'])
if 'volume' in fields:
if len(self._uncurlied(fields['volume'])) > 0:
document.volume = self._uncurlied(fields['volume'])
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'inproceedings'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
if document.journal is not None:
if document.kind == 'article':
fields['journal'] = self._curly(str(document.journal))
else:
fields['booktitle'] = self._curly(str(document.journal))
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['ISSN'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['keywords'] = self._curly(keywords, ';')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}'
return bibtex
IeeeXplore = "IeeeXplore"
cat.Catalog.translators[IeeeXplore] = IeeeXTranslator
| 44.992806 | 117 | 0.605373 | from BiblioAlly import catalog as cat, domain, translator as bibtex
class IeeeXTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
if 'title' in fields:
title = self._unbroken(self._uncurlied(fields['title']))
else:
title = ''
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
author_field = ''
if 'author' in fields:
author_field = self._unbroken(self._all_uncurly(fields['author'].replace('}and', ' and')))
if author_field == '':
author_field = 'Author, Unamed'
authors = self._authors_from_field(author_field)
affiliations = self._expand_affiliations(None, authors)
keywords = []
if 'keywords' in fields:
all_keywords = self._all_uncurly(fields['keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
sub_keyword_names = keyword_name.split(',')
for sub_keyword_name in sub_keyword_names:
name = sub_keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "IEEE Xplore"
if 'doi' in fields:
document.doi = self._uncurlied(fields['doi'])
if 'journal' in fields:
document.journal = self._uncurlied(fields['journal'])
elif 'booktitle' in fields and kind == 'inproceedings':
document.journal = self._uncurlied(fields['booktitle'])
if 'number' in fields:
if len(self._uncurlied(fields['number'])) > 0:
document.number = self._uncurlied(fields['number'])
if 'pages' in fields:
if len(self._uncurlied(fields['pages'])) > 0:
document.pages = self._uncurlied(fields['pages'])
if 'url' in fields:
if len(self._uncurlied(fields['url'])) > 0:
document.url = self._uncurlied(fields['url'])
if 'volume' in fields:
if len(self._uncurlied(fields['volume'])) > 0:
document.volume = self._uncurlied(fields['volume'])
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'inproceedings'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
if document.journal is not None:
if document.kind == 'article':
fields['journal'] = self._curly(str(document.journal))
else:
fields['booktitle'] = self._curly(str(document.journal))
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['ISSN'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['keywords'] = self._curly(keywords, ';')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}'
return bibtex
IeeeXplore = "IeeeXplore"
cat.Catalog.translators[IeeeXplore] = IeeeXTranslator
| true | true |
79006e17c04d20eacccb5c41aa9087fdb8d3d8af | 7,071 | py | Python | clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | """
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from ory_keto_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from ory_keto_client.model.delete_ory_access_control_policy_internal_server_error_body import DeleteOryAccessControlPolicyInternalServerErrorBody
globals()['DeleteOryAccessControlPolicyInternalServerErrorBody'] = DeleteOryAccessControlPolicyInternalServerErrorBody
class DeleteOryAccessControlPolicyInternalServerError(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'payload': (DeleteOryAccessControlPolicyInternalServerErrorBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'payload': 'Payload', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.405714 | 161 | 0.606845 |
import re
import sys
import nulltype
from ory_keto_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from ory_keto_client.model.delete_ory_access_control_policy_internal_server_error_body import DeleteOryAccessControlPolicyInternalServerErrorBody
globals()['DeleteOryAccessControlPolicyInternalServerErrorBody'] = DeleteOryAccessControlPolicyInternalServerErrorBody
class DeleteOryAccessControlPolicyInternalServerError(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'payload': (DeleteOryAccessControlPolicyInternalServerErrorBody,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'payload': 'Payload',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true | true |
79006f0736af46df78fa4da6843cc9be0bcfc3f4 | 4,038 | py | Python | webcrawler/__init__.py | debugtalk/WebCrawler | 256707392a10c71033c4d45e9cb28ea60b3f8014 | [
"MIT"
] | 37 | 2017-02-08T13:36:03.000Z | 2021-12-23T06:26:43.000Z | webcrawler/__init__.py | debugtalk/WebCrawler | 256707392a10c71033c4d45e9cb28ea60b3f8014 | [
"MIT"
] | 3 | 2017-03-15T07:09:06.000Z | 2022-01-18T15:30:03.000Z | webcrawler/__init__.py | debugtalk/WebCrawler | 256707392a10c71033c4d45e9cb28ea60b3f8014 | [
"MIT"
] | 18 | 2017-06-09T08:57:18.000Z | 2022-01-12T06:59:58.000Z | __version__ = '0.3.3'
import os
import sys
import logging
import argparse
from .core import WebCrawler
from .helpers import color_logging
def main():
""" parse command line options and run commands.
"""
parser = argparse.ArgumentParser(
description='A web crawler for testing website links validation.')
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--config-file', help="Specify config file path.")
parser.add_argument(
'--seeds', default='http://debugtalk.com',
help="Specify crawl seed url(s), several urls can be specified with pipe; \
if auth needed, seeds can be specified like user1:pwd1@url1|user2:pwd2@url2")
parser.add_argument(
'--include-hosts', help="Specify extra hosts to be crawled.")
parser.add_argument(
'--cookies', help="Specify cookies, several cookies can be joined by '|'. \
e.g. 'lang:en,country:us|lang:zh,country:cn'")
parser.add_argument(
'--crawl-mode', default='BFS', help="Specify crawl mode, BFS or DFS.")
parser.add_argument(
'--max-depth', default=5, type=int, help="Specify max crawl depth.")
parser.add_argument(
'--concurrency', help="Specify concurrent workers number.")
parser.add_argument(
'--save-results', default='NO', help="Specify if save results, default is NO.")
parser.add_argument("--grey-user-agent",
help="Specify grey environment header User-Agent.")
parser.add_argument("--grey-traceid",
help="Specify grey environment cookie traceid.")
parser.add_argument("--grey-view-grey",
help="Specify grey environment cookie view_gray.")
try:
from jenkins_mail_py import MailgunHelper
mailer = MailgunHelper(parser)
except ImportError:
mailer = None
args = parser.parse_args()
if args.version:
print("WebCrawler version: {}".format(__version__))
exit(0)
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level)
color_logging("args: %s" % args)
main_crawler(args, mailer)
def main_crawler(args, mailer=None):
include_hosts = args.include_hosts.split(',') if args.include_hosts else []
cookies_list = args.cookies.split('|') if args.cookies else ['']
jenkins_build_number = args.jenkins_build_number
logs_folder = os.path.join(os.getcwd(), "logs", '{}'.format(jenkins_build_number))
web_crawler = WebCrawler(args.seeds, include_hosts, logs_folder, args.config_file)
# set grey environment
if args.grey_user_agent and args.grey_traceid and args.grey_view_grey:
web_crawler.set_grey_env(args.grey_user_agent, args.grey_traceid, args.grey_view_grey)
canceled = False
try:
for cookies_str in cookies_list:
cookies_str_list = cookies_str.split(',')
cookies = {}
for cookie_str in cookies_str_list:
if ':' not in cookie_str:
continue
key, value = cookie_str.split(':')
cookies[key.strip()] = value.strip()
web_crawler.start(
cookies,
args.crawl_mode,
args.max_depth,
args.concurrency
)
if mailer and mailer.config_ready:
subject = "%s" % args.seeds
mail_content_ordered_dict, flag_code = web_crawler.get_mail_content_ordered_dict()
mailer.send_mail(subject, mail_content_ordered_dict, flag_code)
except KeyboardInterrupt:
canceled = True
color_logging("Canceling...", color='red')
finally:
save_results = False if args.save_results.upper() == "NO" else True
web_crawler.print_result(canceled, save_results)
| 37.388889 | 94 | 0.63794 | __version__ = '0.3.3'
import os
import sys
import logging
import argparse
from .core import WebCrawler
from .helpers import color_logging
def main():
parser = argparse.ArgumentParser(
description='A web crawler for testing website links validation.')
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--config-file', help="Specify config file path.")
parser.add_argument(
'--seeds', default='http://debugtalk.com',
help="Specify crawl seed url(s), several urls can be specified with pipe; \
if auth needed, seeds can be specified like user1:pwd1@url1|user2:pwd2@url2")
parser.add_argument(
'--include-hosts', help="Specify extra hosts to be crawled.")
parser.add_argument(
'--cookies', help="Specify cookies, several cookies can be joined by '|'. \
e.g. 'lang:en,country:us|lang:zh,country:cn'")
parser.add_argument(
'--crawl-mode', default='BFS', help="Specify crawl mode, BFS or DFS.")
parser.add_argument(
'--max-depth', default=5, type=int, help="Specify max crawl depth.")
parser.add_argument(
'--concurrency', help="Specify concurrent workers number.")
parser.add_argument(
'--save-results', default='NO', help="Specify if save results, default is NO.")
parser.add_argument("--grey-user-agent",
help="Specify grey environment header User-Agent.")
parser.add_argument("--grey-traceid",
help="Specify grey environment cookie traceid.")
parser.add_argument("--grey-view-grey",
help="Specify grey environment cookie view_gray.")
try:
from jenkins_mail_py import MailgunHelper
mailer = MailgunHelper(parser)
except ImportError:
mailer = None
args = parser.parse_args()
if args.version:
print("WebCrawler version: {}".format(__version__))
exit(0)
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level)
color_logging("args: %s" % args)
main_crawler(args, mailer)
def main_crawler(args, mailer=None):
include_hosts = args.include_hosts.split(',') if args.include_hosts else []
cookies_list = args.cookies.split('|') if args.cookies else ['']
jenkins_build_number = args.jenkins_build_number
logs_folder = os.path.join(os.getcwd(), "logs", '{}'.format(jenkins_build_number))
web_crawler = WebCrawler(args.seeds, include_hosts, logs_folder, args.config_file)
if args.grey_user_agent and args.grey_traceid and args.grey_view_grey:
web_crawler.set_grey_env(args.grey_user_agent, args.grey_traceid, args.grey_view_grey)
canceled = False
try:
for cookies_str in cookies_list:
cookies_str_list = cookies_str.split(',')
cookies = {}
for cookie_str in cookies_str_list:
if ':' not in cookie_str:
continue
key, value = cookie_str.split(':')
cookies[key.strip()] = value.strip()
web_crawler.start(
cookies,
args.crawl_mode,
args.max_depth,
args.concurrency
)
if mailer and mailer.config_ready:
subject = "%s" % args.seeds
mail_content_ordered_dict, flag_code = web_crawler.get_mail_content_ordered_dict()
mailer.send_mail(subject, mail_content_ordered_dict, flag_code)
except KeyboardInterrupt:
canceled = True
color_logging("Canceling...", color='red')
finally:
save_results = False if args.save_results.upper() == "NO" else True
web_crawler.print_result(canceled, save_results)
| true | true |
79006f3d0cfbe4b7e107610605c8491f90670bd9 | 28,985 | py | Python | app/utils/onelogin/saml2/settings.py | nycrecords/intranet | 33b11d21246eac983d82483483f9d5257b226e12 | [
"MIT"
] | 2 | 2018-07-12T19:14:13.000Z | 2022-01-31T03:19:34.000Z | app/utils/onelogin/saml2/settings.py | nycrecords/intranet | 33b11d21246eac983d82483483f9d5257b226e12 | [
"MIT"
] | 19 | 2018-07-13T15:01:00.000Z | 2021-03-31T19:01:30.000Z | app/utils/onelogin/saml2/settings.py | nycrecords/intranet | 33b11d21246eac983d82483483f9d5257b226e12 | [
"MIT"
] | 1 | 2019-04-10T19:46:00.000Z | 2019-04-10T19:46:00.000Z | # -*- coding: utf-8 -*-
""" OneLogin_Saml2_Settings class
Copyright (c) 2010-2018 OneLogin, Inc.
MIT License
Setting class of OneLogin's Python Toolkit.
"""
from time import time
import re
from os.path import dirname, exists, join, sep
from app.utils.onelogin.saml2 import compat
from app.utils.onelogin.saml2.constants import OneLogin_Saml2_Constants
from app.utils.onelogin.saml2.errors import OneLogin_Saml2_Error
from app.utils.onelogin.saml2.metadata import OneLogin_Saml2_Metadata
from app.utils.onelogin.saml2.utils import OneLogin_Saml2_Utils
from app.utils.onelogin.saml2.xml_utils import OneLogin_Saml2_XML
try:
import ujson as json
except ImportError:
import json
try:
basestring
except NameError:
basestring = str
# Regex from Django Software Foundation and individual contributors.
# Released under a BSD 3-Clause License
url_regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9_](?:[A-Z0-9-_]{0,61}[A-Z0-9_])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
url_schemes = ['http', 'https', 'ftp', 'ftps']
def validate_url(url):
"""
Auxiliary method to validate an urllib
:param url: An url to be validated
:type url: string
:returns: True if the url is valid
:rtype: bool
"""
scheme = url.split('://')[0].lower()
if scheme not in url_schemes:
return False
if not bool(url_regex.search(url)):
return False
return True
class OneLogin_Saml2_Settings(object):
"""
Handles the settings of the Python toolkits.
"""
def __init__(self, settings=None, custom_base_path=None, sp_validation_only=False):
"""
Initializes the settings:
- Sets the paths of the different folders
- Loads settings info from settings file or array/object provided
:param settings: SAML Toolkit Settings
:type settings: dict
:param custom_base_path: Path where are stored the settings file and the cert folder
:type custom_base_path: string
:param sp_validation_only: Avoid the IdP validation
:type sp_validation_only: boolean
"""
self.__sp_validation_only = sp_validation_only
self.__paths = {}
self.__strict = False
self.__debug = False
self.__sp = {}
self.__idp = {}
self.__security = {}
self.__contacts = {}
self.__organization = {}
self.__errors = []
self.__load_paths(base_path=custom_base_path)
self.__update_paths(settings)
if settings is None:
try:
valid = self.__load_settings_from_file()
except Exception as e:
raise e
if not valid:
raise OneLogin_Saml2_Error(
'Invalid dict settings at the file: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
elif isinstance(settings, dict):
if not self.__load_settings_from_dict(settings):
raise OneLogin_Saml2_Error(
'Invalid dict settings: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
else:
raise OneLogin_Saml2_Error(
'Unsupported settings object',
OneLogin_Saml2_Error.UNSUPPORTED_SETTINGS_OBJECT
)
self.format_idp_cert()
if 'x509certMulti' in self.__idp:
self.format_idp_cert_multi()
self.format_sp_cert()
if 'x509certNew' in self.__sp:
self.format_sp_cert_new()
self.format_sp_key()
def __load_paths(self, base_path=None):
"""
Set the paths of the different folders
"""
if base_path is None:
base_path = dirname(dirname(dirname(__file__)))
if not base_path.endswith(sep):
base_path += sep
self.__paths = {
'base': base_path,
'cert': base_path + 'certs' + sep,
'lib': base_path + 'lib' + sep,
'extlib': base_path + 'extlib' + sep,
}
def __update_paths(self, settings):
"""
Set custom paths if necessary
"""
if not isinstance(settings, dict):
return
if 'custom_base_path' in settings:
base_path = settings['custom_base_path']
base_path = join(dirname(__file__), base_path)
self.__load_paths(base_path)
def get_base_path(self):
"""
Returns base path
:return: The base toolkit folder path
:rtype: string
"""
return self.__paths['base']
def get_cert_path(self):
"""
Returns cert path
:return: The cert folder path
:rtype: string
"""
return self.__paths['cert']
def get_lib_path(self):
"""
Returns lib path
:return: The library folder path
:rtype: string
"""
return self.__paths['lib']
def get_ext_lib_path(self):
"""
Returns external lib path
:return: The external library folder path
:rtype: string
"""
return self.__paths['extlib']
def get_schemas_path(self):
"""
Returns schema path
:return: The schema folder path
:rtype: string
"""
return self.__paths['lib'] + 'schemas/'
def __load_settings_from_dict(self, settings):
"""
Loads settings info from a settings Dict
:param settings: SAML Toolkit Settings
:type settings: dict
:returns: True if the settings info is valid
:rtype: boolean
"""
errors = self.check_settings(settings)
if len(errors) == 0:
self.__errors = []
self.__sp = settings['sp']
self.__idp = settings.get('idp', {})
self.__strict = settings.get('strict', False)
self.__debug = settings.get('debug', False)
self.__security = settings.get('security', {})
self.__contacts = settings.get('contactPerson', {})
self.__organization = settings.get('organization', {})
self.__add_default_values()
return True
self.__errors = errors
return False
def __load_settings_from_file(self):
"""
Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean
"""
filename = self.get_base_path() + 'settings.json'
if not exists(filename):
raise OneLogin_Saml2_Error(
'Settings file not found: %s',
OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND,
filename
)
# In the php toolkit instead of being a json file it is a php file and
# it is directly included
with open(filename, 'r') as json_data:
settings = json.loads(json_data.read())
advanced_filename = self.get_base_path() + 'advanced_settings.json'
if exists(advanced_filename):
with open(advanced_filename, 'r') as json_data:
settings.update(json.loads(json_data.read())) # Merge settings
return self.__load_settings_from_dict(settings)
def __add_default_values(self):
"""
Add default values if the settings info is not complete
"""
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
self.__idp.setdefault('singleLogoutService', {})
# Related to nameID
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
# Metadata format
self.__security.setdefault('metadataValidUntil', None) # None means use default
self.__security.setdefault('metadataCacheDuration', None) # None means use default
# Sign provided
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
# Sign expected
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
# NameID element expected
self.__security.setdefault('wantNameId', True)
# Encrypt expected
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
# Signature Algorithm
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
# Digest Algorithm
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
# AttributeStatement required by default
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False)
def check_settings(self, settings):
"""
Checks the settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not self.__sp_validation_only:
errors += self.check_idp_settings(settings)
sp_errors = self.check_sp_settings(settings)
errors += sp_errors
return errors
def check_idp_settings(self, settings):
"""
Checks the IdP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the IdP settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not settings.get('idp'):
errors.append('idp_not_found')
else:
idp = settings['idp']
if not idp.get('entityId'):
errors.append('idp_entityId_not_found')
if not idp.get('singleSignOnService', {}).get('url'):
errors.append('idp_sso_not_found')
elif not validate_url(idp['singleSignOnService']['url']):
errors.append('idp_sso_url_invalid')
slo_url = idp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('idp_slo_url_invalid')
if 'security' in settings:
security = settings['security']
exists_x509 = bool(idp.get('x509cert'))
exists_fingerprint = bool(idp.get('certFingerprint'))
exists_multix509sign = 'x509certMulti' in idp and \
'signing' in idp['x509certMulti'] and \
idp['x509certMulti']['signing']
exists_multix509enc = 'x509certMulti' in idp and \
'encryption' in idp['x509certMulti'] and \
idp['x509certMulti']['encryption']
want_assert_sign = bool(security.get('wantAssertionsSigned'))
want_mes_signed = bool(security.get('wantMessagesSigned'))
nameid_enc = bool(security.get('nameIdEncrypted'))
if (want_assert_sign or want_mes_signed) and \
not(exists_x509 or exists_fingerprint or exists_multix509sign):
errors.append('idp_cert_or_fingerprint_not_found_and_required')
if nameid_enc and not (exists_x509 or exists_multix509enc):
errors.append('idp_cert_not_found_and_required')
return errors
def check_sp_settings(self, settings):
"""
Checks the SP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the SP settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or not settings:
errors.append('invalid_syntax')
else:
if not settings.get('sp'):
errors.append('sp_not_found')
else:
# check_sp_certs uses self.__sp so I add it
old_sp = self.__sp
self.__sp = settings['sp']
sp = settings['sp']
security = settings.get('security', {})
if not sp.get('entityId'):
errors.append('sp_entityId_not_found')
if not sp.get('assertionConsumerService', {}).get('url'):
errors.append('sp_acs_not_found')
elif not validate_url(sp['assertionConsumerService']['url']):
errors.append('sp_acs_url_invalid')
if sp.get('attributeConsumingService'):
attributeConsumingService = sp['attributeConsumingService']
if 'serviceName' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_serviceName_not_found')
elif not isinstance(attributeConsumingService['serviceName'], basestring):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
if 'requestedAttributes' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_requestedAttributes_not_found')
elif not isinstance(attributeConsumingService['requestedAttributes'], list):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
else:
for req_attrib in attributeConsumingService['requestedAttributes']:
if 'name' not in req_attrib:
errors.append('sp_attributeConsumingService_requestedAttributes_name_not_found')
if 'name' in req_attrib and not req_attrib['name'].strip():
errors.append('sp_attributeConsumingService_requestedAttributes_name_invalid')
if 'attributeValue' in req_attrib and type(req_attrib['attributeValue']) != list:
errors.append('sp_attributeConsumingService_requestedAttributes_attributeValue_type_invalid')
if 'isRequired' in req_attrib and type(req_attrib['isRequired']) != bool:
errors.append('sp_attributeConsumingService_requestedAttributes_isRequired_type_invalid')
if "serviceDescription" in attributeConsumingService and not isinstance(attributeConsumingService['serviceDescription'], basestring):
errors.append('sp_attributeConsumingService_serviceDescription_type_invalid')
slo_url = sp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('sp_sls_url_invalid')
if 'signMetadata' in security and isinstance(security['signMetadata'], dict):
if 'keyFileName' not in security['signMetadata'] or \
'certFileName' not in security['signMetadata']:
errors.append('sp_signMetadata_invalid')
authn_sign = bool(security.get('authnRequestsSigned'))
logout_req_sign = bool(security.get('logoutRequestSigned'))
logout_res_sign = bool(security.get('logoutResponseSigned'))
want_assert_enc = bool(security.get('wantAssertionsEncrypted'))
want_nameid_enc = bool(security.get('wantNameIdEncrypted'))
if not self.check_sp_certs():
if authn_sign or logout_req_sign or logout_res_sign or \
want_assert_enc or want_nameid_enc:
errors.append('sp_cert_not_found_and_required')
if 'contactPerson' in settings:
types = settings['contactPerson']
valid_types = ['technical', 'support', 'administrative', 'billing', 'other']
for c_type in types:
if c_type not in valid_types:
errors.append('contact_type_invalid')
break
for c_type in settings['contactPerson']:
contact = settings['contactPerson'][c_type]
if ('givenName' not in contact or len(contact['givenName']) == 0) or \
('emailAddress' not in contact or len(contact['emailAddress']) == 0):
errors.append('contact_not_enought_data')
break
if 'organization' in settings:
for org in settings['organization']:
organization = settings['organization'][org]
if ('name' not in organization or len(organization['name']) == 0) or \
('displayname' not in organization or len(organization['displayname']) == 0) or \
('url' not in organization or len(organization['url']) == 0):
errors.append('organization_not_enought_data')
break
# Restores the value that had the self.__sp
if 'old_sp' in locals():
self.__sp = old_sp
return errors
def check_sp_certs(self):
"""
Checks if the x509 certs of the SP exists and are valid.
:returns: If the x509 certs of the SP exists and are valid
:rtype: boolean
"""
key = self.get_sp_key()
cert = self.get_sp_cert()
return key is not None and cert is not None
def get_sp_key(self):
"""
Returns the x509 private key of the SP.
:returns: SP private key
:rtype: string or None
"""
key = self.__sp.get('privateKey')
key_file_name = self.__paths['cert'] + 'sp.key'
if not key and exists(key_file_name):
with open(key_file_name) as f:
key = f.read()
return key or None
def get_sp_cert(self):
"""
Returns the x509 public cert of the SP.
:returns: SP public cert
:rtype: string or None
"""
cert = self.__sp.get('x509cert')
cert_file_name = self.__paths['cert'] + 'sp.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_sp_cert_new(self):
"""
Returns the x509 public of the SP planned
to be used soon instead the other public cert
:returns: SP public cert new
:rtype: string or None
"""
cert = self.__sp.get('x509certNew')
cert_file_name = self.__paths['cert'] + 'sp_new.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_idp_cert(self):
"""
Returns the x509 public cert of the IdP.
:returns: IdP public cert
:rtype: string
"""
return self.__idp.get('x509cert')
def get_idp_data(self):
"""
Gets the IdP data.
:returns: IdP info
:rtype: dict
"""
return self.__idp
def get_sp_data(self):
"""
Gets the SP data.
:returns: SP info
:rtype: dict
"""
return self.__sp
def get_security_data(self):
"""
Gets security data.
:returns: Security info
:rtype: dict
"""
return self.__security
def get_contacts(self):
"""
Gets contact data.
:returns: Contacts info
:rtype: dict
"""
return self.__contacts
def get_organization(self):
"""
Gets organization data.
:returns: Organization info
:rtype: dict
"""
return self.__organization
def get_sp_metadata(self):
"""
Gets the SP metadata. The XML representation.
:returns: SP metadata (xml)
:rtype: string
"""
metadata = OneLogin_Saml2_Metadata.builder(
self.__sp, self.__security['authnRequestsSigned'],
self.__security['wantAssertionsSigned'],
self.__security['metadataValidUntil'],
self.__security['metadataCacheDuration'],
self.get_contacts(), self.get_organization()
)
add_encryption = self.__security['wantNameIdEncrypted'] or self.__security['wantAssertionsEncrypted']
cert_new = self.get_sp_cert_new()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert_new, add_encryption)
cert = self.get_sp_cert()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert, add_encryption)
# Sign metadata
if 'signMetadata' in self.__security and self.__security['signMetadata'] is not False:
if self.__security['signMetadata'] is True:
# Use the SP's normal key to sign the metadata:
if not cert:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP public key certificate.',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND
)
cert_metadata = cert
key_metadata = self.get_sp_key()
if not key_metadata:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP private key.',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND
)
else:
# Use a custom key to sign the metadata:
if ('keyFileName' not in self.__security['signMetadata'] or
'certFileName' not in self.__security['signMetadata']):
raise OneLogin_Saml2_Error(
'Invalid Setting: signMetadata value of the sp is not valid',
OneLogin_Saml2_Error.SETTINGS_INVALID_SYNTAX
)
key_file_name = self.__security['signMetadata']['keyFileName']
cert_file_name = self.__security['signMetadata']['certFileName']
key_metadata_file = self.__paths['cert'] + key_file_name
cert_metadata_file = self.__paths['cert'] + cert_file_name
try:
with open(key_metadata_file, 'r') as f_metadata_key:
key_metadata = f_metadata_key.read()
except IOError:
raise OneLogin_Saml2_Error(
'Private key file not readable: %s',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND,
key_metadata_file
)
try:
with open(cert_metadata_file, 'r') as f_metadata_cert:
cert_metadata = f_metadata_cert.read()
except IOError:
raise OneLogin_Saml2_Error(
'Public cert file not readable: %s',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND,
cert_metadata_file
)
signature_algorithm = self.__security['signatureAlgorithm']
digest_algorithm = self.__security['digestAlgorithm']
metadata = OneLogin_Saml2_Metadata.sign_metadata(metadata, key_metadata, cert_metadata, signature_algorithm, digest_algorithm)
return metadata
def validate_metadata(self, xml):
"""
Validates an XML SP Metadata.
:param xml: Metadata's XML that will be validate
:type xml: string
:returns: The list of found errors
:rtype: list
"""
assert isinstance(xml, compat.text_types)
if len(xml) == 0:
raise Exception('Empty string supplied as input')
errors = []
root = OneLogin_Saml2_XML.validate_xml(xml, 'saml-schema-metadata-2.0.xsd', self.__debug)
if isinstance(root, str):
errors.append(root)
else:
if root.tag != '{%s}EntityDescriptor' % OneLogin_Saml2_Constants.NS_MD:
errors.append('noEntityDescriptor_xml')
else:
if (len(root.findall('.//md:SPSSODescriptor', namespaces=OneLogin_Saml2_Constants.NSMAP))) != 1:
errors.append('onlySPSSODescriptor_allowed_xml')
else:
valid_until, cache_duration = root.get('validUntil'), root.get('cacheDuration')
if valid_until:
valid_until = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
expire_time = OneLogin_Saml2_Utils.get_expire_time(cache_duration, valid_until)
if expire_time is not None and int(time()) > int(expire_time):
errors.append('expired_xml')
# TODO: Validate Sign
return errors
def format_idp_cert(self):
"""
Formats the IdP cert.
"""
self.__idp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509cert'])
def format_idp_cert_multi(self):
"""
Formats the Multple IdP certs.
"""
if 'x509certMulti' in self.__idp:
if 'signing' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['signing'])):
self.__idp['x509certMulti']['signing'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['signing'][idx])
if 'encryption' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['encryption'])):
self.__idp['x509certMulti']['encryption'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['encryption'][idx])
def format_sp_cert(self):
"""
Formats the SP cert.
"""
self.__sp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509cert'])
def format_sp_cert_new(self):
"""
Formats the SP cert.
"""
self.__sp['x509certNew'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509certNew'])
def format_sp_key(self):
"""
Formats the private key.
"""
self.__sp['privateKey'] = OneLogin_Saml2_Utils.format_private_key(self.__sp['privateKey'])
def get_errors(self):
"""
Returns an array with the errors, the array is empty when the settings is ok.
:returns: Errors
:rtype: list
"""
return self.__errors
def set_strict(self, value):
"""
Activates or deactivates the strict mode.
:param value: Strict parameter
:type value: boolean
"""
assert isinstance(value, bool)
self.__strict = value
def is_strict(self):
"""
Returns if the 'strict' mode is active.
:returns: Strict parameter
:rtype: boolean
"""
return self.__strict
def is_debug_active(self):
"""
Returns if the debug is active.
:returns: Debug parameter
:rtype: boolean
"""
return self.__debug
| 36.322055 | 153 | 0.583198 |
from time import time
import re
from os.path import dirname, exists, join, sep
from app.utils.onelogin.saml2 import compat
from app.utils.onelogin.saml2.constants import OneLogin_Saml2_Constants
from app.utils.onelogin.saml2.errors import OneLogin_Saml2_Error
from app.utils.onelogin.saml2.metadata import OneLogin_Saml2_Metadata
from app.utils.onelogin.saml2.utils import OneLogin_Saml2_Utils
from app.utils.onelogin.saml2.xml_utils import OneLogin_Saml2_XML
try:
import ujson as json
except ImportError:
import json
try:
basestring
except NameError:
basestring = str
url_regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://'
r'(?:(?:[A-Z0-9_](?:[A-Z0-9-_]{0,61}[A-Z0-9_])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|'
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
url_schemes = ['http', 'https', 'ftp', 'ftps']
def validate_url(url):
scheme = url.split('://')[0].lower()
if scheme not in url_schemes:
return False
if not bool(url_regex.search(url)):
return False
return True
class OneLogin_Saml2_Settings(object):
def __init__(self, settings=None, custom_base_path=None, sp_validation_only=False):
self.__sp_validation_only = sp_validation_only
self.__paths = {}
self.__strict = False
self.__debug = False
self.__sp = {}
self.__idp = {}
self.__security = {}
self.__contacts = {}
self.__organization = {}
self.__errors = []
self.__load_paths(base_path=custom_base_path)
self.__update_paths(settings)
if settings is None:
try:
valid = self.__load_settings_from_file()
except Exception as e:
raise e
if not valid:
raise OneLogin_Saml2_Error(
'Invalid dict settings at the file: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
elif isinstance(settings, dict):
if not self.__load_settings_from_dict(settings):
raise OneLogin_Saml2_Error(
'Invalid dict settings: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
else:
raise OneLogin_Saml2_Error(
'Unsupported settings object',
OneLogin_Saml2_Error.UNSUPPORTED_SETTINGS_OBJECT
)
self.format_idp_cert()
if 'x509certMulti' in self.__idp:
self.format_idp_cert_multi()
self.format_sp_cert()
if 'x509certNew' in self.__sp:
self.format_sp_cert_new()
self.format_sp_key()
def __load_paths(self, base_path=None):
if base_path is None:
base_path = dirname(dirname(dirname(__file__)))
if not base_path.endswith(sep):
base_path += sep
self.__paths = {
'base': base_path,
'cert': base_path + 'certs' + sep,
'lib': base_path + 'lib' + sep,
'extlib': base_path + 'extlib' + sep,
}
def __update_paths(self, settings):
if not isinstance(settings, dict):
return
if 'custom_base_path' in settings:
base_path = settings['custom_base_path']
base_path = join(dirname(__file__), base_path)
self.__load_paths(base_path)
def get_base_path(self):
return self.__paths['base']
def get_cert_path(self):
return self.__paths['cert']
def get_lib_path(self):
return self.__paths['lib']
def get_ext_lib_path(self):
return self.__paths['extlib']
def get_schemas_path(self):
return self.__paths['lib'] + 'schemas/'
def __load_settings_from_dict(self, settings):
errors = self.check_settings(settings)
if len(errors) == 0:
self.__errors = []
self.__sp = settings['sp']
self.__idp = settings.get('idp', {})
self.__strict = settings.get('strict', False)
self.__debug = settings.get('debug', False)
self.__security = settings.get('security', {})
self.__contacts = settings.get('contactPerson', {})
self.__organization = settings.get('organization', {})
self.__add_default_values()
return True
self.__errors = errors
return False
def __load_settings_from_file(self):
filename = self.get_base_path() + 'settings.json'
if not exists(filename):
raise OneLogin_Saml2_Error(
'Settings file not found: %s',
OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND,
filename
)
with open(filename, 'r') as json_data:
settings = json.loads(json_data.read())
advanced_filename = self.get_base_path() + 'advanced_settings.json'
if exists(advanced_filename):
with open(advanced_filename, 'r') as json_data:
settings.update(json.loads(json_data.read()))
return self.__load_settings_from_dict(settings)
def __add_default_values(self):
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
self.__idp.setdefault('singleLogoutService', {})
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
self.__security.setdefault('metadataValidUntil', None)
self.__security.setdefault('metadataCacheDuration', None)
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
self.__security.setdefault('wantNameId', True)
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False)
def check_settings(self, settings):
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not self.__sp_validation_only:
errors += self.check_idp_settings(settings)
sp_errors = self.check_sp_settings(settings)
errors += sp_errors
return errors
def check_idp_settings(self, settings):
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not settings.get('idp'):
errors.append('idp_not_found')
else:
idp = settings['idp']
if not idp.get('entityId'):
errors.append('idp_entityId_not_found')
if not idp.get('singleSignOnService', {}).get('url'):
errors.append('idp_sso_not_found')
elif not validate_url(idp['singleSignOnService']['url']):
errors.append('idp_sso_url_invalid')
slo_url = idp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('idp_slo_url_invalid')
if 'security' in settings:
security = settings['security']
exists_x509 = bool(idp.get('x509cert'))
exists_fingerprint = bool(idp.get('certFingerprint'))
exists_multix509sign = 'x509certMulti' in idp and \
'signing' in idp['x509certMulti'] and \
idp['x509certMulti']['signing']
exists_multix509enc = 'x509certMulti' in idp and \
'encryption' in idp['x509certMulti'] and \
idp['x509certMulti']['encryption']
want_assert_sign = bool(security.get('wantAssertionsSigned'))
want_mes_signed = bool(security.get('wantMessagesSigned'))
nameid_enc = bool(security.get('nameIdEncrypted'))
if (want_assert_sign or want_mes_signed) and \
not(exists_x509 or exists_fingerprint or exists_multix509sign):
errors.append('idp_cert_or_fingerprint_not_found_and_required')
if nameid_enc and not (exists_x509 or exists_multix509enc):
errors.append('idp_cert_not_found_and_required')
return errors
def check_sp_settings(self, settings):
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or not settings:
errors.append('invalid_syntax')
else:
if not settings.get('sp'):
errors.append('sp_not_found')
else:
old_sp = self.__sp
self.__sp = settings['sp']
sp = settings['sp']
security = settings.get('security', {})
if not sp.get('entityId'):
errors.append('sp_entityId_not_found')
if not sp.get('assertionConsumerService', {}).get('url'):
errors.append('sp_acs_not_found')
elif not validate_url(sp['assertionConsumerService']['url']):
errors.append('sp_acs_url_invalid')
if sp.get('attributeConsumingService'):
attributeConsumingService = sp['attributeConsumingService']
if 'serviceName' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_serviceName_not_found')
elif not isinstance(attributeConsumingService['serviceName'], basestring):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
if 'requestedAttributes' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_requestedAttributes_not_found')
elif not isinstance(attributeConsumingService['requestedAttributes'], list):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
else:
for req_attrib in attributeConsumingService['requestedAttributes']:
if 'name' not in req_attrib:
errors.append('sp_attributeConsumingService_requestedAttributes_name_not_found')
if 'name' in req_attrib and not req_attrib['name'].strip():
errors.append('sp_attributeConsumingService_requestedAttributes_name_invalid')
if 'attributeValue' in req_attrib and type(req_attrib['attributeValue']) != list:
errors.append('sp_attributeConsumingService_requestedAttributes_attributeValue_type_invalid')
if 'isRequired' in req_attrib and type(req_attrib['isRequired']) != bool:
errors.append('sp_attributeConsumingService_requestedAttributes_isRequired_type_invalid')
if "serviceDescription" in attributeConsumingService and not isinstance(attributeConsumingService['serviceDescription'], basestring):
errors.append('sp_attributeConsumingService_serviceDescription_type_invalid')
slo_url = sp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('sp_sls_url_invalid')
if 'signMetadata' in security and isinstance(security['signMetadata'], dict):
if 'keyFileName' not in security['signMetadata'] or \
'certFileName' not in security['signMetadata']:
errors.append('sp_signMetadata_invalid')
authn_sign = bool(security.get('authnRequestsSigned'))
logout_req_sign = bool(security.get('logoutRequestSigned'))
logout_res_sign = bool(security.get('logoutResponseSigned'))
want_assert_enc = bool(security.get('wantAssertionsEncrypted'))
want_nameid_enc = bool(security.get('wantNameIdEncrypted'))
if not self.check_sp_certs():
if authn_sign or logout_req_sign or logout_res_sign or \
want_assert_enc or want_nameid_enc:
errors.append('sp_cert_not_found_and_required')
if 'contactPerson' in settings:
types = settings['contactPerson']
valid_types = ['technical', 'support', 'administrative', 'billing', 'other']
for c_type in types:
if c_type not in valid_types:
errors.append('contact_type_invalid')
break
for c_type in settings['contactPerson']:
contact = settings['contactPerson'][c_type]
if ('givenName' not in contact or len(contact['givenName']) == 0) or \
('emailAddress' not in contact or len(contact['emailAddress']) == 0):
errors.append('contact_not_enought_data')
break
if 'organization' in settings:
for org in settings['organization']:
organization = settings['organization'][org]
if ('name' not in organization or len(organization['name']) == 0) or \
('displayname' not in organization or len(organization['displayname']) == 0) or \
('url' not in organization or len(organization['url']) == 0):
errors.append('organization_not_enought_data')
break
if 'old_sp' in locals():
self.__sp = old_sp
return errors
def check_sp_certs(self):
key = self.get_sp_key()
cert = self.get_sp_cert()
return key is not None and cert is not None
def get_sp_key(self):
key = self.__sp.get('privateKey')
key_file_name = self.__paths['cert'] + 'sp.key'
if not key and exists(key_file_name):
with open(key_file_name) as f:
key = f.read()
return key or None
def get_sp_cert(self):
cert = self.__sp.get('x509cert')
cert_file_name = self.__paths['cert'] + 'sp.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_sp_cert_new(self):
cert = self.__sp.get('x509certNew')
cert_file_name = self.__paths['cert'] + 'sp_new.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_idp_cert(self):
return self.__idp.get('x509cert')
def get_idp_data(self):
return self.__idp
def get_sp_data(self):
return self.__sp
def get_security_data(self):
return self.__security
def get_contacts(self):
return self.__contacts
def get_organization(self):
return self.__organization
def get_sp_metadata(self):
metadata = OneLogin_Saml2_Metadata.builder(
self.__sp, self.__security['authnRequestsSigned'],
self.__security['wantAssertionsSigned'],
self.__security['metadataValidUntil'],
self.__security['metadataCacheDuration'],
self.get_contacts(), self.get_organization()
)
add_encryption = self.__security['wantNameIdEncrypted'] or self.__security['wantAssertionsEncrypted']
cert_new = self.get_sp_cert_new()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert_new, add_encryption)
cert = self.get_sp_cert()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert, add_encryption)
if 'signMetadata' in self.__security and self.__security['signMetadata'] is not False:
if self.__security['signMetadata'] is True:
if not cert:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP public key certificate.',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND
)
cert_metadata = cert
key_metadata = self.get_sp_key()
if not key_metadata:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP private key.',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND
)
else:
# Use a custom key to sign the metadata:
if ('keyFileName' not in self.__security['signMetadata'] or
'certFileName' not in self.__security['signMetadata']):
raise OneLogin_Saml2_Error(
'Invalid Setting: signMetadata value of the sp is not valid',
OneLogin_Saml2_Error.SETTINGS_INVALID_SYNTAX
)
key_file_name = self.__security['signMetadata']['keyFileName']
cert_file_name = self.__security['signMetadata']['certFileName']
key_metadata_file = self.__paths['cert'] + key_file_name
cert_metadata_file = self.__paths['cert'] + cert_file_name
try:
with open(key_metadata_file, 'r') as f_metadata_key:
key_metadata = f_metadata_key.read()
except IOError:
raise OneLogin_Saml2_Error(
'Private key file not readable: %s',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND,
key_metadata_file
)
try:
with open(cert_metadata_file, 'r') as f_metadata_cert:
cert_metadata = f_metadata_cert.read()
except IOError:
raise OneLogin_Saml2_Error(
'Public cert file not readable: %s',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND,
cert_metadata_file
)
signature_algorithm = self.__security['signatureAlgorithm']
digest_algorithm = self.__security['digestAlgorithm']
metadata = OneLogin_Saml2_Metadata.sign_metadata(metadata, key_metadata, cert_metadata, signature_algorithm, digest_algorithm)
return metadata
def validate_metadata(self, xml):
assert isinstance(xml, compat.text_types)
if len(xml) == 0:
raise Exception('Empty string supplied as input')
errors = []
root = OneLogin_Saml2_XML.validate_xml(xml, 'saml-schema-metadata-2.0.xsd', self.__debug)
if isinstance(root, str):
errors.append(root)
else:
if root.tag != '{%s}EntityDescriptor' % OneLogin_Saml2_Constants.NS_MD:
errors.append('noEntityDescriptor_xml')
else:
if (len(root.findall('.//md:SPSSODescriptor', namespaces=OneLogin_Saml2_Constants.NSMAP))) != 1:
errors.append('onlySPSSODescriptor_allowed_xml')
else:
valid_until, cache_duration = root.get('validUntil'), root.get('cacheDuration')
if valid_until:
valid_until = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
expire_time = OneLogin_Saml2_Utils.get_expire_time(cache_duration, valid_until)
if expire_time is not None and int(time()) > int(expire_time):
errors.append('expired_xml')
# TODO: Validate Sign
return errors
def format_idp_cert(self):
self.__idp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509cert'])
def format_idp_cert_multi(self):
if 'x509certMulti' in self.__idp:
if 'signing' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['signing'])):
self.__idp['x509certMulti']['signing'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['signing'][idx])
if 'encryption' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['encryption'])):
self.__idp['x509certMulti']['encryption'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['encryption'][idx])
def format_sp_cert(self):
self.__sp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509cert'])
def format_sp_cert_new(self):
self.__sp['x509certNew'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509certNew'])
def format_sp_key(self):
self.__sp['privateKey'] = OneLogin_Saml2_Utils.format_private_key(self.__sp['privateKey'])
def get_errors(self):
return self.__errors
def set_strict(self, value):
assert isinstance(value, bool)
self.__strict = value
def is_strict(self):
return self.__strict
def is_debug_active(self):
return self.__debug
| true | true |
79006fa5a9c74f484a8bb010a5285a68f277a0a0 | 23,872 | py | Python | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkVirtualApplianceArgs', 'NetworkVirtualAppliance']
@pulumi.input_type
class NetworkVirtualApplianceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a NetworkVirtualAppliance resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if boot_strap_configuration_blobs is not None:
pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs)
if cloud_init_configuration is not None:
pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration)
if cloud_init_configuration_blobs is not None:
pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if network_virtual_appliance_name is not None:
pulumi.set(__self__, "network_virtual_appliance_name", network_virtual_appliance_name)
if nva_sku is not None:
pulumi.set(__self__, "nva_sku", nva_sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_appliance_asn is not None:
pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@boot_strap_configuration_blobs.setter
def boot_strap_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "boot_strap_configuration_blobs", value)
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@cloud_init_configuration.setter
def cloud_init_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_init_configuration", value)
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@cloud_init_configuration_blobs.setter
def cloud_init_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cloud_init_configuration_blobs", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkVirtualApplianceName")
def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Network Virtual Appliance.
"""
return pulumi.get(self, "network_virtual_appliance_name")
@network_virtual_appliance_name.setter
def network_virtual_appliance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_virtual_appliance_name", value)
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@nva_sku.setter
def nva_sku(self, value: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]):
pulumi.set(self, "nva_sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@virtual_appliance_asn.setter
def virtual_appliance_asn(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "virtual_appliance_asn", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
class NetworkVirtualAppliance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
"""
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkVirtualApplianceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkVirtualApplianceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["boot_strap_configuration_blobs"] = boot_strap_configuration_blobs
__props__.__dict__["cloud_init_configuration"] = cloud_init_configuration
__props__.__dict__["cloud_init_configuration_blobs"] = cloud_init_configuration_blobs
__props__.__dict__["id"] = id
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["network_virtual_appliance_name"] = network_virtual_appliance_name
__props__.__dict__["nva_sku"] = nva_sku
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_appliance_asn"] = virtual_appliance_asn
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["address_prefix"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210301:NetworkVirtualAppliance")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkVirtualAppliance, __self__).__init__(
'azure-native:network/v20201101:NetworkVirtualAppliance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkVirtualAppliance':
"""
Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["boot_strap_configuration_blobs"] = None
__props__.__dict__["cloud_init_configuration"] = None
__props__.__dict__["cloud_init_configuration_blobs"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nva_sku"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_asn"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
__props__.__dict__["virtual_hub"] = None
return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[str]:
"""
Address Prefix.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inboundSecurityRules")
def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to InboundSecurityRules.
"""
return pulumi.get(self, "inbound_security_rules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@property
@pulumi.getter(name="virtualApplianceNics")
def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]:
"""
List of Virtual Appliance Network Interfaces.
"""
return pulumi.get(self, "virtual_appliance_nics")
@property
@pulumi.getter(name="virtualApplianceSites")
def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to VirtualApplianceSite.
"""
return pulumi.get(self, "virtual_appliance_sites")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
| 47.744 | 1,682 | 0.680379 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkVirtualApplianceArgs', 'NetworkVirtualAppliance']
@pulumi.input_type
class NetworkVirtualApplianceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
if boot_strap_configuration_blobs is not None:
pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs)
if cloud_init_configuration is not None:
pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration)
if cloud_init_configuration_blobs is not None:
pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if network_virtual_appliance_name is not None:
pulumi.set(__self__, "network_virtual_appliance_name", network_virtual_appliance_name)
if nva_sku is not None:
pulumi.set(__self__, "nva_sku", nva_sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_appliance_asn is not None:
pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "boot_strap_configuration_blobs")
@boot_strap_configuration_blobs.setter
def boot_strap_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "boot_strap_configuration_blobs", value)
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cloud_init_configuration")
@cloud_init_configuration.setter
def cloud_init_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_init_configuration", value)
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "cloud_init_configuration_blobs")
@cloud_init_configuration_blobs.setter
def cloud_init_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cloud_init_configuration_blobs", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkVirtualApplianceName")
def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "network_virtual_appliance_name")
@network_virtual_appliance_name.setter
def network_virtual_appliance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_virtual_appliance_name", value)
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]:
return pulumi.get(self, "nva_sku")
@nva_sku.setter
def nva_sku(self, value: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]):
pulumi.set(self, "nva_sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "virtual_appliance_asn")
@virtual_appliance_asn.setter
def virtual_appliance_asn(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "virtual_appliance_asn", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
class NetworkVirtualAppliance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkVirtualApplianceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkVirtualApplianceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["boot_strap_configuration_blobs"] = boot_strap_configuration_blobs
__props__.__dict__["cloud_init_configuration"] = cloud_init_configuration
__props__.__dict__["cloud_init_configuration_blobs"] = cloud_init_configuration_blobs
__props__.__dict__["id"] = id
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["network_virtual_appliance_name"] = network_virtual_appliance_name
__props__.__dict__["nva_sku"] = nva_sku
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_appliance_asn"] = virtual_appliance_asn
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["address_prefix"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210301:NetworkVirtualAppliance")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkVirtualAppliance, __self__).__init__(
'azure-native:network/v20201101:NetworkVirtualAppliance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkVirtualAppliance':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["boot_strap_configuration_blobs"] = None
__props__.__dict__["cloud_init_configuration"] = None
__props__.__dict__["cloud_init_configuration_blobs"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nva_sku"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_asn"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
__props__.__dict__["virtual_hub"] = None
return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[str]:
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "boot_strap_configuration_blobs")
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cloud_init_configuration")
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "cloud_init_configuration_blobs")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inboundSecurityRules")
def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
return pulumi.get(self, "inbound_security_rules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]:
return pulumi.get(self, "nva_sku")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]:
return pulumi.get(self, "virtual_appliance_asn")
@property
@pulumi.getter(name="virtualApplianceNics")
def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]:
return pulumi.get(self, "virtual_appliance_nics")
@property
@pulumi.getter(name="virtualApplianceSites")
def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
return pulumi.get(self, "virtual_appliance_sites")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
return pulumi.get(self, "virtual_hub")
| true | true |
790071082ba657366bf960086492ee9ce85e312b | 204 | py | Python | home/hairygael/GESTURES/giving.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
] | 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/hairygael/GESTURES/giving.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
] | 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/hairygael/GESTURES/giving.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
] | 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | def giving()
i01.moveHead(44,82)
i01.moveArm("left",15,55,68,10)
i01.moveArm("right",13,40,74,13)
i01.moveHand("left",61,0,14,0,0,180)
i01.moveHand("right",0,24,24,19,21,25)
i01.moveTorso(90,90,90)
| 25.5 | 39 | 0.681373 | def giving()
i01.moveHead(44,82)
i01.moveArm("left",15,55,68,10)
i01.moveArm("right",13,40,74,13)
i01.moveHand("left",61,0,14,0,0,180)
i01.moveHand("right",0,24,24,19,21,25)
i01.moveTorso(90,90,90)
| false | true |
79007125142bd4d1bda8bc19fe8af633b6825101 | 2,582 | py | Python | ex19/ex19-sd.py | python-practice/lpthw | ad06dfe6a5d2351ee9216b365ff688db820cc035 | [
"MIT"
] | 1 | 2015-07-18T15:09:40.000Z | 2015-07-18T15:09:40.000Z | ex19/ex19-sd.py | python-practice/lpthw | ad06dfe6a5d2351ee9216b365ff688db820cc035 | [
"MIT"
] | null | null | null | ex19/ex19-sd.py | python-practice/lpthw | ad06dfe6a5d2351ee9216b365ff688db820cc035 | [
"MIT"
] | null | null | null | # define a function, which accepts 2 arguments
def cheese_and_crackers(cheese_count, boxes_of_crackers):
# %d is for digit
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
# go to a new line after the end
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
# call the function defined above
# by passing plain numbers,
# also called numeric constants
# or numeric literals
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
# a variable definition
# doesn't need a'def' beforehand
amount_of_cheese = 10
amount_of_crackers = 50
# call (use, invoke, run) the function by passing the above variables
# or vars, for short
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
# python interpreter first calculates the math
# then passes the results as arguments
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
# python substitutes the vars with their values, then does the math,
# and finally passes the calculated results to the function
# literals(consts), variables, math - all those called expressions
# calculating math and substituting var with their vals are called 'expression evaluation'
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
#################################################################
# another way to call a function is using result of calling another function
# which could be a built-in or custom
# also, don't forget about so-called "splats", when a function can accept any amount of args
def pass_any_two(*args):
print "There are %d arguments" % len(args)
print "First: %r" % args[0]
print "Second: %r" % args[1]
return "%r %r" % (args[0], args[1])
# 1: constants
pass_any_two(1, 2)
# 2: variables
first = "f"
second = "s"
pass_any_two(first, second)
# 3: math of consts
pass_any_two(4 + 6, 5 + 8)
# 4: math of vars
a = 5
b = 6
pass_any_two(a + 8, b * 2)
# 5: more than two args
pass_any_two(1, 2, 3, 4)
# 6: built-in function call results
txt = "what is my length?"
pass_any_two(len(txt), txt)
# 7: custom (same) function call results
pass_any_two(0, pass_any_two)
# 8: call by alias (just another name)
pass_any_2 = pass_any_two
pass_any_2("alias", "called")
# 9: call by invoking buil-in __call__ method
pass_any_two.__call__("__call__", "invoked")
# 10: call by passing a list, converted to multiple arguments
pass_any_two(*["list", "converted", 3, 4])
| 27.178947 | 92 | 0.717661 |
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
# go to a new line after the end
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
# call the function defined above
# by passing plain numbers,
# also called numeric constants
# or numeric literals
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
# a variable definition
# doesn't need a'def' beforehand
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| false | true |
790071a7c9f8fb23f63ae74f647204d31255ad40 | 372 | py | Python | mypkg/scripts/twice.py | note032/ROS_robosys | 539fb5c2cb5c3da06f696c27d65ac7eb4efbef36 | [
"BSD-3-Clause"
] | null | null | null | mypkg/scripts/twice.py | note032/ROS_robosys | 539fb5c2cb5c3da06f696c27d65ac7eb4efbef36 | [
"BSD-3-Clause"
] | null | null | null | mypkg/scripts/twice.py | note032/ROS_robosys | 539fb5c2cb5c3da06f696c27d65ac7eb4efbef36 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
import rospy
from std_msgs.msg import Int32
def cb(message):
rospy.loginfo(message.data*2)
print (rospy.loginfo)
if message.data%2 == 0:
print ('0')
elif message.data%2 != 0 :
print('1')
if __name__ == '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
rospy.spin()
| 18.6 | 49 | 0.612903 |
import rospy
from std_msgs.msg import Int32
def cb(message):
rospy.loginfo(message.data*2)
print (rospy.loginfo)
if message.data%2 == 0:
print ('0')
elif message.data%2 != 0 :
print('1')
if __name__ == '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
rospy.spin()
| true | true |
790071b65265c1427d41448669d14b8e49bfef73 | 13,744 | py | Python | 2018/15/helpme.py | mark-inderhees/aoc | ac40055faef4875c8446b27f54977105b65e41ad | [
"MIT"
] | null | null | null | 2018/15/helpme.py | mark-inderhees/aoc | ac40055faef4875c8446b27f54977105b65e41ad | [
"MIT"
] | null | null | null | 2018/15/helpme.py | mark-inderhees/aoc | ac40055faef4875c8446b27f54977105b65e41ad | [
"MIT"
] | null | null | null | def setup_inp(inp):
"""Convert list of strings into list of lists, with glves/goblins replaced by tuples"""
grid = []
for rowI,row in enumerate(inp.split("\n")):
grid.append([x for x in row])
for colI,col in enumerate(row):
if col in ["G","E"]:
#Replace enemies with tuples so we can track them - (character_type, hit_points, moved_already_bool)
char_tup = (col, 200, False)
grid[rowI][colI] = char_tup
return grid
def print_board(inp):
for row in inp:
extra = []
print_row = [] #In case we append hitpoints
for char in row:
if isinstance(char,tuple):
print_row.append(char[0])
extra.append(str(char[1]))
else:
print_row.append(char)
print("".join(print_row)," ", " ".join(extra))
def move_character(inp, from_row, from_col, to_row, to_col, char):
"""Move character on grid, and increment the i value so we can tell we already moved it"""
inp[from_row][from_col] = "."
inp[to_row][to_col] = (char[0],char[1],True)
return inp
def attack(inp, row, col, enemy, damage=3):
"""
Attack weakest adjacent enemy, if one is there
If multiple weakest enemies, attack in reading order
Return the modified board, and a boolean indicating whether anyone died
"""
if not adjacent_enemy(inp, row, col, enemy):
return inp, False
#Create a dict of {coordinates: hp} for each adjacent enemy
enemies = {}
for coords in [(row-1,col), (row+1,col), (row,col-1), (row,col+1)]:
if inp[coords[0]][coords[1]][0] == enemy:
#enemy is a tuple, (char_type, hp, already_moved_bool)
enemies[coords] = inp[coords[0]][coords[1]][1]
#Filter to only the enemies with minimum hp
min_hp = min(enemies.values())
enemies = [x for x in enemies if enemies[x]==min_hp]
#Now we have a list of coordinates, we can sort to get reading order, then take the first to get our enemy
enemies.sort()
coords = enemies[0]
enemy = inp[coords[0]][coords[1]]
enemy_pts = enemy[1] - damage
enemy_tup = (enemy[0], enemy_pts, enemy[2])
#Check for killed
if enemy_pts <= 0:
inp[coords[0]][coords[1]] = "."
return inp, True
else:
inp[coords[0]][coords[1]] = enemy_tup
return inp, False
def adjacent_enemy(inp, rowI, colI, enemy):
"""Check for enemy in adjacent square"""
if any(x[0]==enemy for x in [inp[rowI+1][colI], inp[rowI-1][colI], inp[rowI][colI+1], inp[rowI][colI-1]]):
return True
return False
def get_best_move(best_moves):
"""
Takes a list of tuples of
(first_move, number_of_moves, tile_coordinates), which might look like -
((12, 22), 8, (17, 25))
((12, 22), 8, (18, 24))
((12, 22), 8, (19, 21))
((13, 21), 6, (19, 21))
((13, 23), 6, (17, 25))
((13, 23), 6, (18, 24))
((14, 22), 6, (17, 25))
((14, 22), 6, (18, 24))
((14, 22), 6, (19, 21))
And filters/sorts them to satisfy all the conditions
"""
if not best_moves:
return None
#First condition - fewest number of moves away
min_steps = min([x[1] for x in best_moves])
best_moves = [x for x in best_moves if x[1]==min_steps]
#Second condition - if tie, choose the first tile in reading order
best_moves.sort(key = lambda x:x[2])
best_moves = [x for x in best_moves if x[2]==best_moves[0][2]]
#Third condition - if tie, take the first step in reading order
best_moves.sort(key = lambda x:x[0])
best_moves = [x for x in best_moves if x[0]==best_moves[0][0]]
return best_moves[0][0]
def count_characters(inp):
seen = {"G":0,"E":0}
for row in inp:
for col in row:
if col[0] in ["G","E"]:
seen[col[0]]+=1
return seen
def bfs_move(inp, rowI, colI, hero, enemy):
"""
Perform a breadth first search for each adjacent tile
Although not the most efficient, the approach is still fast and makes it
easy to sort in such a way that satisfies all the conditions
"""
#If an enemy is located adjacent to our current location - no move!
if adjacent_enemy(inp, rowI, colI, enemy):
return None
first_moves = [(rowI+1,colI),(rowI-1,colI),(rowI,colI-1),(rowI,colI+1)]
#Filter down to valid first moves - must be a '.' there
first_moves = [x for x in first_moves if inp[x[0]][x[1]]=="."]
#Keep the list of tuples nearest tiles we've found, in format -
#(first_move, number_of_moves, tile_coordinates)
#At the end we'll need to use all these values to find the proper move
best_moves = []
for move in first_moves:
r,c = move
#We might immediately have an adjacent enemy and not need to search further
if adjacent_enemy(inp, r, c, enemy):
best_moves.append((move, 1, move))
continue
#We'll need to keep track of two things -
#seen_coordinates - the tiles we've already visited
#stack - the "new" tiles accessible from the current furthest points
seen_coordinates = {(rowI,colI),(r,c)}
stack = [(r+1,c),(r-1,c),(r,c-1),(r,c+1)]
#Filter stack to only include "." tiles, which we haven't already seen
stack = [x for x in stack if inp[x[0]][x[1]]=="." and (x[0],x[1]) not in seen_coordinates]
#Now do the search -
i=1 #Already have moved one tile at this point
run = True
while run:
i+=1
#Keep track of the new tiles here
new_stack = []
#Loop through and look for new tiles to add
for tile in stack:
if tile in seen_coordinates:
continue
seen_coordinates.add(tile)
r,c = tile
if adjacent_enemy(inp, r, c, enemy):
best_moves.append((move,i,(r,c)))
#We want to complete this iteration to find all other reachable tiles at the same distance
run = False
continue
#Add all newly accessible tiles to stack
new_tiles = [(r+1,c),(r-1,c),(r,c-1),(r,c+1)]
new_stack += [x for x in new_tiles if inp[x[0]][x[1]]=="." and (x[0],x[1]) not in seen_coordinates]
stack = list(set(new_stack))
#We might also need to end at this point if we have no more newly accessible tiles
if not stack:
run = False
#Take our list of the best_moves from each starting point that we generated, and find the one move we'll take
return get_best_move(best_moves)
def score_game(inp, rounds):
pts = 0
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
pts+=col[1]
return rounds*pts
def reset_moved_bools(inp):
"""Reset the third value in our character tuples, which tracks whether they've moved in a round"""
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
char_tup = (col[0],col[1],False)
inp[rowI][colI] = char_tup
return inp
t0 = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
t1 = """#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######"""
t2 = """#######
#E..EG#
#.#G.E#
#E.##E#
#G..#.#
#..E#.#
#######"""
t3 = """#######
#E.G#.#
#.#G..#
#G.#.G#
#G..#.#
#...E.#
#######"""
t4 = """#######
#.E...#
#.#..G#
#.###.#
#E#G#G#
#...#G#
#######"""
t5 = """#########
#G......#
#.E.#...#
#..##..G#
#...##..#
#...#...#
#.G...G.#
#.....G.#
#########"""
def problem1(inp, print_=False):
grid = setup_inp(inp)
rounds = 0
while True:
#Count the current number of each character type
#We can use this to determine if the game has ended in the middle or end of a round
counts = count_characters(grid)
seen = {}
for rowI,row in enumerate(grid):
for colI,col in enumerate(row):
char = grid[rowI][colI]
if isinstance(char, tuple):
#Indicates we already moved it this round
if char[2]:
continue
r,c = rowI,colI #Keep track of our current coordinates in case we move
hero = char[0]
enemy = "G" if hero=="E" else "E"
counts[hero]-=1
move_to = bfs_move(grid, rowI, colI, hero, enemy)
if move_to:
r,c = move_to #Need to update our current coordinates for the impending attack
grid = move_character(grid, rowI, colI, r, c, char)
grid, death = attack(grid, r, c, enemy)
if death:
#Check to see if it's over - all of one side dead
current_counts = count_characters(grid)
game_over = any(x==0 for x in current_counts.values())
#If game is over, we need to see if the round is complete or not
if game_over:
#Means we ended midround
if counts[hero]>0:
final_score = score_game(grid, rounds)
#Otherwise round is complete- add 1 to rounds when calculating
else:
rounds+=1
final_score = score_game(grid, rounds)
if print_:
print("GAME ENDED",rounds)
print_board(grid)
return final_score
#Reset the variable that tracks whether a character has moved in a round
grid = reset_moved_bools(grid)
rounds += 1
if print_:
print(rounds)
print_board(grid)
def problem2_loop(inp, damage_dict, print_=False):
grid = setup_inp(inp)
rounds = 0
while True:
#Count the current number of each character type
#We can use this to determine if the game has ended in the middle or end of a round
counts = count_characters(grid)
seen = {}
for rowI,row in enumerate(grid):
for colI,col in enumerate(row):
char = grid[rowI][colI]
if isinstance(char, tuple):
#Indicates we already moved it this round
if char[2]:
continue
r,c = rowI,colI #Keep track of our current coordinates in case we move
hero = char[0]
enemy = "G" if hero=="E" else "E"
counts[hero]-=1
move_to = bfs_move(grid, rowI, colI, hero, enemy)
if move_to:
r,c = move_to #Need to update our current coordinates for the impending attack
grid = move_character(grid, rowI, colI, r, c, char)
damage = damage_dict[hero]
grid, death = attack(grid, r, c, enemy, damage)
if death and enemy=="E":
#FAILED
return False
#If goblin death, same logic as before
elif death:
#Check to see if it's over - all of one side dead
current_counts = count_characters(grid)
game_over = any(x==0 for x in current_counts.values())
#If game is over, we need to see if the round is complete or not
if game_over:
#Means we ended midround
if counts[hero]>0:
final_score = score_game(grid, rounds)
#Otherwise round is complete- add 1 to rounds when calculating
else:
rounds+=1
final_score = score_game(grid, rounds)
if print_:
print("GAME ENDED",rounds)
print_board(grid)
return final_score
#Reset the variable that tracks whether a character has moved in a round
grid = reset_moved_bools(grid)
rounds += 1
if print_:
print(rounds)
print_board(grid)
def problem2(inp, print_=False):
score = False
damage_dict = {"G":3, "E":3}
while not score:
damage_dict["E"] += 1
print("Elf power", damage_dict["E"])
score = problem2_loop(inp, damage_dict, print_)
return score
if __name__=="__main__":
with open("input15.txt","r") as f:
data = f.read().strip()
for row in data.split("\n"):
print(row)
assert problem1(t0)==27730
assert problem1(t1)==36334
assert problem1(t2)==39514
assert problem1(t3)==27755
assert problem1(t4)==28944
assert problem1(t5)==18740
print(problem1(data))
print(problem2(data)) | 33.359223 | 117 | 0.511641 | def setup_inp(inp):
grid = []
for rowI,row in enumerate(inp.split("\n")):
grid.append([x for x in row])
for colI,col in enumerate(row):
if col in ["G","E"]:
char_tup = (col, 200, False)
grid[rowI][colI] = char_tup
return grid
def print_board(inp):
for row in inp:
extra = []
print_row = []
for char in row:
if isinstance(char,tuple):
print_row.append(char[0])
extra.append(str(char[1]))
else:
print_row.append(char)
print("".join(print_row)," ", " ".join(extra))
def move_character(inp, from_row, from_col, to_row, to_col, char):
inp[from_row][from_col] = "."
inp[to_row][to_col] = (char[0],char[1],True)
return inp
def attack(inp, row, col, enemy, damage=3):
if not adjacent_enemy(inp, row, col, enemy):
return inp, False
enemies = {}
for coords in [(row-1,col), (row+1,col), (row,col-1), (row,col+1)]:
if inp[coords[0]][coords[1]][0] == enemy:
enemies[coords] = inp[coords[0]][coords[1]][1]
min_hp = min(enemies.values())
enemies = [x for x in enemies if enemies[x]==min_hp]
enemies.sort()
coords = enemies[0]
enemy = inp[coords[0]][coords[1]]
enemy_pts = enemy[1] - damage
enemy_tup = (enemy[0], enemy_pts, enemy[2])
if enemy_pts <= 0:
inp[coords[0]][coords[1]] = "."
return inp, True
else:
inp[coords[0]][coords[1]] = enemy_tup
return inp, False
def adjacent_enemy(inp, rowI, colI, enemy):
if any(x[0]==enemy for x in [inp[rowI+1][colI], inp[rowI-1][colI], inp[rowI][colI+1], inp[rowI][colI-1]]):
return True
return False
def get_best_move(best_moves):
if not best_moves:
return None
min_steps = min([x[1] for x in best_moves])
best_moves = [x for x in best_moves if x[1]==min_steps]
best_moves.sort(key = lambda x:x[2])
best_moves = [x for x in best_moves if x[2]==best_moves[0][2]]
best_moves.sort(key = lambda x:x[0])
best_moves = [x for x in best_moves if x[0]==best_moves[0][0]]
return best_moves[0][0]
def count_characters(inp):
seen = {"G":0,"E":0}
for row in inp:
for col in row:
if col[0] in ["G","E"]:
seen[col[0]]+=1
return seen
def bfs_move(inp, rowI, colI, hero, enemy):
if adjacent_enemy(inp, rowI, colI, enemy):
return None
first_moves = [(rowI+1,colI),(rowI-1,colI),(rowI,colI-1),(rowI,colI+1)]
first_moves = [x for x in first_moves if inp[x[0]][x[1]]=="."]
#(first_move, number_of_moves, tile_coordinates)
#At the end we'll need to use all these values to find the proper move
best_moves = []
for move in first_moves:
r,c = move
if adjacent_enemy(inp, r, c, enemy):
best_moves.append((move, 1, move))
continue
#seen_coordinates - the tiles we've already visited
seen_coordinates = {(rowI,colI),(r,c)}
stack = [(r+1,c),(r-1,c),(r,c-1),(r,c+1)]
stack = [x for x in stack if inp[x[0]][x[1]]=="." and (x[0],x[1]) not in seen_coordinates]
#Now do the search -
i=1 #Already have moved one tile at this point
run = True
while run:
i+=1
#Keep track of the new tiles here
new_stack = []
#Loop through and look for new tiles to add
for tile in stack:
if tile in seen_coordinates:
continue
seen_coordinates.add(tile)
r,c = tile
if adjacent_enemy(inp, r, c, enemy):
best_moves.append((move,i,(r,c)))
#We want to complete this iteration to find all other reachable tiles at the same distance
run = False
continue
#Add all newly accessible tiles to stack
new_tiles = [(r+1,c),(r-1,c),(r,c-1),(r,c+1)]
new_stack += [x for x in new_tiles if inp[x[0]][x[1]]=="." and (x[0],x[1]) not in seen_coordinates]
stack = list(set(new_stack))
#We might also need to end at this point if we have no more newly accessible tiles
if not stack:
run = False
#Take our list of the best_moves from each starting point that we generated, and find the one move we'll take
return get_best_move(best_moves)
def score_game(inp, rounds):
pts = 0
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
pts+=col[1]
return rounds*pts
def reset_moved_bools(inp):
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
char_tup = (col[0],col[1],False)
inp[rowI][colI] = char_tup
return inp
t0 = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
t1 = """#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######"""
t2 = """#######
#E..EG#
#.#G.E#
#E.##E#
#G..#.#
#..E#.#
#######"""
t3 = """#######
#E.G#.#
#.#G..#
#G.#.G#
#G..#.#
#...E.#
#######"""
t4 = """#######
#.E...#
#.#..G#
#.###.#
#E#G#G#
#...#G#
#######"""
t5 = """#########
#G......#
#.E.#...#
#..##..G#
#...##..#
#...#...#
#.G...G.#
#.....G.#
#########"""
def problem1(inp, print_=False):
grid = setup_inp(inp)
rounds = 0
while True:
counts = count_characters(grid)
seen = {}
for rowI,row in enumerate(grid):
for colI,col in enumerate(row):
char = grid[rowI][colI]
if isinstance(char, tuple):
if char[2]:
continue
r,c = rowI,colI
hero = char[0]
enemy = "G" if hero=="E" else "E"
counts[hero]-=1
move_to = bfs_move(grid, rowI, colI, hero, enemy)
if move_to:
r,c = move_to
grid = move_character(grid, rowI, colI, r, c, char)
grid, death = attack(grid, r, c, enemy)
if death:
current_counts = count_characters(grid)
game_over = any(x==0 for x in current_counts.values())
#If game is over, we need to see if the round is complete or not
if game_over:
#Means we ended midround
if counts[hero]>0:
final_score = score_game(grid, rounds)
#Otherwise round is complete- add 1 to rounds when calculating
else:
rounds+=1
final_score = score_game(grid, rounds)
if print_:
print("GAME ENDED",rounds)
print_board(grid)
return final_score
#Reset the variable that tracks whether a character has moved in a round
grid = reset_moved_bools(grid)
rounds += 1
if print_:
print(rounds)
print_board(grid)
def problem2_loop(inp, damage_dict, print_=False):
grid = setup_inp(inp)
rounds = 0
while True:
#Count the current number of each character type
#We can use this to determine if the game has ended in the middle or end of a round
counts = count_characters(grid)
seen = {}
for rowI,row in enumerate(grid):
for colI,col in enumerate(row):
char = grid[rowI][colI]
if isinstance(char, tuple):
#Indicates we already moved it this round
if char[2]:
continue
r,c = rowI,colI #Keep track of our current coordinates in case we move
hero = char[0]
enemy = "G" if hero=="E" else "E"
counts[hero]-=1
move_to = bfs_move(grid, rowI, colI, hero, enemy)
if move_to:
r,c = move_to #Need to update our current coordinates for the impending attack
grid = move_character(grid, rowI, colI, r, c, char)
damage = damage_dict[hero]
grid, death = attack(grid, r, c, enemy, damage)
if death and enemy=="E":
#FAILED
return False
#If goblin death, same logic as before
elif death:
#Check to see if it's over - all of one side dead
current_counts = count_characters(grid)
game_over = any(x==0 for x in current_counts.values())
if game_over:
if counts[hero]>0:
final_score = score_game(grid, rounds)
else:
rounds+=1
final_score = score_game(grid, rounds)
if print_:
print("GAME ENDED",rounds)
print_board(grid)
return final_score
grid = reset_moved_bools(grid)
rounds += 1
if print_:
print(rounds)
print_board(grid)
def problem2(inp, print_=False):
score = False
damage_dict = {"G":3, "E":3}
while not score:
damage_dict["E"] += 1
print("Elf power", damage_dict["E"])
score = problem2_loop(inp, damage_dict, print_)
return score
if __name__=="__main__":
with open("input15.txt","r") as f:
data = f.read().strip()
for row in data.split("\n"):
print(row)
assert problem1(t0)==27730
assert problem1(t1)==36334
assert problem1(t2)==39514
assert problem1(t3)==27755
assert problem1(t4)==28944
assert problem1(t5)==18740
print(problem1(data))
print(problem2(data)) | true | true |
790071b75654e0a02778afc0a9aaf770843324b1 | 5,772 | py | Python | ghostwriter/shepherd/filters.py | studebacon/Ghostwriter | 1cefcaa4859707ee11b2c3617bc03f8b3b74f57d | [
"BSD-3-Clause"
] | null | null | null | ghostwriter/shepherd/filters.py | studebacon/Ghostwriter | 1cefcaa4859707ee11b2c3617bc03f8b3b74f57d | [
"BSD-3-Clause"
] | null | null | null | ghostwriter/shepherd/filters.py | studebacon/Ghostwriter | 1cefcaa4859707ee11b2c3617bc03f8b3b74f57d | [
"BSD-3-Clause"
] | null | null | null | """This contains all of the model filters used by the Shepherd application."""
# Django & Other 3rd Party Libraries
import django_filters
from crispy_forms.bootstrap import (
Accordion,
AccordionGroup,
InlineCheckboxes,
PrependedText,
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, ButtonHolder, Column, Layout, Row, Submit
from django import forms
from django.forms.widgets import TextInput
# Ghostwriter Libraries
from .models import Domain, DomainStatus, HealthStatus, ServerStatus
class DomainFilter(django_filters.FilterSet):
"""
Filter :model:`shepherd.Domain` model for searching.
**Fields**
``name``
Case insensitive search of the name field contents
``all_cat``
Case insensitive search of the all_cat field
``health_status``
Checkbox choice filter using :model:`shepherd.HealthStatus`
``domain_status``
Checkbox choice filter using :model:`shepherd.DomainStatus`
``expiration_status``
Boolean field to filter expired domains
"""
name = django_filters.CharFilter(
lookup_expr="icontains",
label="Domain Name Contains",
widget=TextInput(attrs={"placeholder": "specterops.io", "autocomplete": "off"}),
)
all_cat = django_filters.CharFilter(
lookup_expr="icontains",
label="Categories Contain",
widget=TextInput(attrs={"placeholder": "Category", "autocomplete": "off"}),
)
health_status = django_filters.ModelMultipleChoiceFilter(
queryset=HealthStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="",
)
domain_status = django_filters.ModelMultipleChoiceFilter(
queryset=DomainStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="",
)
STATUS_CHOICES = (
(0, "Active"),
(1, "Expired"),
)
expiration_status = django_filters.ChoiceFilter(
field_name="expired", choices=STATUS_CHOICES, label="Expiration Status"
)
class Meta:
model = Domain
fields = ["name", "all_cat", "health_status", "domain_status"]
def __init__(self, *args, **kwargs):
super(DomainFilter, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "get"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
# Layout the form for Bootstrap
self.helper.layout = Layout(
Row(
Column(
PrependedText("name", '<i class="fas fa-filter"></i>'),
css_class="col-md-4 offset-md-2",
),
Column(
PrependedText("all_cat", '<i class="fas fa-filter"></i>'),
css_class=" col-md-4",
),
css_class="form-row",
),
Accordion(
AccordionGroup("Domain Statuses", InlineCheckboxes("domain_status")),
AccordionGroup("Health Statuses", InlineCheckboxes("health_status")),
),
ButtonHolder(
Submit("submit", "Filter", css_class="btn btn-primary col-md-2"),
HTML(
"""
<a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:domains' %}">Reset</a>
"""
),
),
)
class ServerFilter(django_filters.FilterSet):
"""
Filter :model:`shepherd.StaticServer` model for searching.
**Fields**
``io_address``
Case insensitive search of the ip_address field contents
``name``
Case insensitive search of the name field contents
``server_status``
Checkbox choice filter using :model:`shepherd.ServerStatus`
"""
ip_address = django_filters.CharFilter(
lookup_expr="icontains",
label="IP Address Contains",
widget=TextInput(attrs={"placeholder": "104.31.5.75", "autocomplete": "off"}),
)
name = django_filters.CharFilter(
lookup_expr="icontains",
label="Server Name Contains",
widget=TextInput(attrs={"placeholder": "Hostname", "autocomplete": "off"}),
)
server_status = django_filters.ModelMultipleChoiceFilter(
queryset=ServerStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="Server Status",
)
class Meta:
model = Domain
fields = ["ip_address", "name", "server_status"]
def __init__(self, *args, **kwargs):
super(ServerFilter, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "get"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
# Layout the form for Bootstrap
self.helper.layout = Layout(
Row(
Column(
PrependedText("ip_address", '<i class="fas fa-filter"></i>'),
css_class="col-md-4 offset-md-2",
),
Column(
PrependedText("name", '<i class="fas fa-filter"></i>'),
css_class=" col-md-4",
),
css_class="form-row",
),
Accordion(
AccordionGroup("Server Status", InlineCheckboxes("server_status")),
),
ButtonHolder(
Submit("submit", "Filter", css_class="btn btn-primary col-md-2"),
HTML(
"""
<a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:servers' %}">Reset</a>
"""
),
),
)
| 33.952941 | 126 | 0.580388 |
import django_filters
from crispy_forms.bootstrap import (
Accordion,
AccordionGroup,
InlineCheckboxes,
PrependedText,
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, ButtonHolder, Column, Layout, Row, Submit
from django import forms
from django.forms.widgets import TextInput
from .models import Domain, DomainStatus, HealthStatus, ServerStatus
class DomainFilter(django_filters.FilterSet):
name = django_filters.CharFilter(
lookup_expr="icontains",
label="Domain Name Contains",
widget=TextInput(attrs={"placeholder": "specterops.io", "autocomplete": "off"}),
)
all_cat = django_filters.CharFilter(
lookup_expr="icontains",
label="Categories Contain",
widget=TextInput(attrs={"placeholder": "Category", "autocomplete": "off"}),
)
health_status = django_filters.ModelMultipleChoiceFilter(
queryset=HealthStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="",
)
domain_status = django_filters.ModelMultipleChoiceFilter(
queryset=DomainStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="",
)
STATUS_CHOICES = (
(0, "Active"),
(1, "Expired"),
)
expiration_status = django_filters.ChoiceFilter(
field_name="expired", choices=STATUS_CHOICES, label="Expiration Status"
)
class Meta:
model = Domain
fields = ["name", "all_cat", "health_status", "domain_status"]
def __init__(self, *args, **kwargs):
super(DomainFilter, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "get"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
self.helper.layout = Layout(
Row(
Column(
PrependedText("name", '<i class="fas fa-filter"></i>'),
css_class="col-md-4 offset-md-2",
),
Column(
PrependedText("all_cat", '<i class="fas fa-filter"></i>'),
css_class=" col-md-4",
),
css_class="form-row",
),
Accordion(
AccordionGroup("Domain Statuses", InlineCheckboxes("domain_status")),
AccordionGroup("Health Statuses", InlineCheckboxes("health_status")),
),
ButtonHolder(
Submit("submit", "Filter", css_class="btn btn-primary col-md-2"),
HTML(
"""
<a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:domains' %}">Reset</a>
"""
),
),
)
class ServerFilter(django_filters.FilterSet):
ip_address = django_filters.CharFilter(
lookup_expr="icontains",
label="IP Address Contains",
widget=TextInput(attrs={"placeholder": "104.31.5.75", "autocomplete": "off"}),
)
name = django_filters.CharFilter(
lookup_expr="icontains",
label="Server Name Contains",
widget=TextInput(attrs={"placeholder": "Hostname", "autocomplete": "off"}),
)
server_status = django_filters.ModelMultipleChoiceFilter(
queryset=ServerStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="Server Status",
)
class Meta:
model = Domain
fields = ["ip_address", "name", "server_status"]
def __init__(self, *args, **kwargs):
super(ServerFilter, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "get"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
self.helper.layout = Layout(
Row(
Column(
PrependedText("ip_address", '<i class="fas fa-filter"></i>'),
css_class="col-md-4 offset-md-2",
),
Column(
PrependedText("name", '<i class="fas fa-filter"></i>'),
css_class=" col-md-4",
),
css_class="form-row",
),
Accordion(
AccordionGroup("Server Status", InlineCheckboxes("server_status")),
),
ButtonHolder(
Submit("submit", "Filter", css_class="btn btn-primary col-md-2"),
HTML(
"""
<a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:servers' %}">Reset</a>
"""
),
),
)
| true | true |
790072f6a0e325433211f8f193bdf2307baf415d | 1,708 | py | Python | model/polar_inst.py | Tenvence/polar-inst | 95b2ef2fbc666469b031367e6aeb471d0465c272 | [
"Apache-2.0"
] | null | null | null | model/polar_inst.py | Tenvence/polar-inst | 95b2ef2fbc666469b031367e6aeb471d0465c272 | [
"Apache-2.0"
] | null | null | null | model/polar_inst.py | Tenvence/polar-inst | 95b2ef2fbc666469b031367e6aeb471d0465c272 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from model.modules.stage_backbone import StageBackbone
from model.modules.feature_pyramid_net import FeaturePyramidNet
from model.modules.polar_head import PolarHead
class PolarInst(nn.Module):
def __init__(self, num_polars, num_channels, num_classes):
super(PolarInst, self).__init__()
self.num_classes = num_classes
self.backbone = StageBackbone()
self.fpn = FeaturePyramidNet(num_channels)
self.polar_head = PolarHead(num_polars, num_channels, num_classes)
self.distance_scales = [nn.Parameter(torch.tensor(1., dtype=torch.float)) for _ in range(5)]
def forward(self, x):
batch_size = x.size(0)
backbone_outs = self.backbone(x)
fpn_outs = self.fpn(backbone_outs['c3'], backbone_outs['c4'], backbone_outs['c5'])
class_pred, distance_pred, centerness_pred = [], [], []
for idx, (distance_scale, fpn_out) in enumerate(zip(self.distance_scales, fpn_outs.values())):
head_out = self.polar_head(fpn_out)
head_out['distance'] *= distance_scale
head_out['distance'] = head_out['distance'].exp()
class_pred.append(head_out['cls'].permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_classes))
distance_pred.append(head_out['distance'].permute(0, 2, 3, 1).reshape(batch_size, -1, 4))
centerness_pred.append(head_out['centerness'].permute(0, 2, 3, 1).reshape(batch_size, -1))
class_pred = torch.cat(class_pred, dim=1)
distance_pred = torch.cat(distance_pred, dim=1)
centerness_pred = torch.cat(centerness_pred, dim=1)
return class_pred, distance_pred, centerness_pred
| 39.72093 | 108 | 0.683255 | import torch
import torch.nn as nn
from model.modules.stage_backbone import StageBackbone
from model.modules.feature_pyramid_net import FeaturePyramidNet
from model.modules.polar_head import PolarHead
class PolarInst(nn.Module):
def __init__(self, num_polars, num_channels, num_classes):
super(PolarInst, self).__init__()
self.num_classes = num_classes
self.backbone = StageBackbone()
self.fpn = FeaturePyramidNet(num_channels)
self.polar_head = PolarHead(num_polars, num_channels, num_classes)
self.distance_scales = [nn.Parameter(torch.tensor(1., dtype=torch.float)) for _ in range(5)]
def forward(self, x):
batch_size = x.size(0)
backbone_outs = self.backbone(x)
fpn_outs = self.fpn(backbone_outs['c3'], backbone_outs['c4'], backbone_outs['c5'])
class_pred, distance_pred, centerness_pred = [], [], []
for idx, (distance_scale, fpn_out) in enumerate(zip(self.distance_scales, fpn_outs.values())):
head_out = self.polar_head(fpn_out)
head_out['distance'] *= distance_scale
head_out['distance'] = head_out['distance'].exp()
class_pred.append(head_out['cls'].permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_classes))
distance_pred.append(head_out['distance'].permute(0, 2, 3, 1).reshape(batch_size, -1, 4))
centerness_pred.append(head_out['centerness'].permute(0, 2, 3, 1).reshape(batch_size, -1))
class_pred = torch.cat(class_pred, dim=1)
distance_pred = torch.cat(distance_pred, dim=1)
centerness_pred = torch.cat(centerness_pred, dim=1)
return class_pred, distance_pred, centerness_pred
| true | true |
7900742c8ad5a90154ee4e4191140fe9155766e9 | 3,742 | py | Python | stochastic_to_deterministic/hashing.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | 7 | 2020-03-15T12:14:07.000Z | 2021-12-01T07:01:09.000Z | stochastic_to_deterministic/hashing.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 18 | 2020-09-25T22:45:41.000Z | 2022-02-10T02:39:55.000Z | stochastic_to_deterministic/hashing.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 4 | 2020-06-15T03:06:53.000Z | 2021-08-06T16:38:33.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hashing function to make a stochastic classifier deterministic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
from absl import app
import numpy as np
def compute_hash(features, hash_matrix, hash_vector):
"""Compute hash values for features using the hash function (A * x + c) mod 2.
Args:
features: NumPy float array of shape (n, d), the features to hash.
hash_matrix: NumPy float array of shape (num_feature_bits, num_hash_bits),
a random matrix A to construct the hash function.
hash_vector: NumPy float array of shape (1, num_hash_bits),
a random vector c to construct the hash function.
Returns:
NumPy float array of shape (n, 1) containing the hashed values in [0, 1].
"""
# Helper function to convert an int array to a bit string array.
def convert_int_to_bin(x, dimension):
# Converts x to an array of bit strings of size dimension.
return '{:b}'.format(x).zfill(dimension)[-dimension:]
convert_int_to_bin = np.vectorize(convert_int_to_bin)
# Helper function to convert a bit string array to an into array.
convert_bin_to_int = np.vectorize(lambda x: int(x, 2))
# Number of features and hash bits.
num_features = features.shape[0]
num_feature_bits, num_hash_bits = hash_matrix.shape
# Concatenate features and apply MD5 hash to get a fixed length encoding.
feature_sum_str = [''.join(x) for x in features.astype('str')]
feature_sum_hex = [hashlib.md5(s).hexdigest() for s in feature_sum_str]
feature_sum_int = [int(h, 16) for h in feature_sum_hex]
# Binarize features
feature_sum_bin = convert_int_to_bin(
feature_sum_int, dimension=num_feature_bits)
feature_sum_bin_matrix = np.array(
[[int(c) for c in s] for s in feature_sum_bin])
# Compute hash (Ax + c) mod 2.
feature_hashed = (
np.dot(feature_sum_bin_matrix, hash_matrix) +
np.repeat(hash_vector, repeats=num_features, axis=0))
feature_hashed_bits = np.mod(feature_hashed, 2)
# Convert hash to bit string.
feature_hashed_bit_char = convert_int_to_bin(feature_hashed_bits, 1)
feature_hashed_bit_str = [''.join(s) for s in feature_hashed_bit_char]
feature_hashed_int = convert_bin_to_int(feature_hashed_bit_str)
hashed_val = feature_hashed_int * 1. / 2 ** num_hash_bits
# Return normalized hashed values in [0, 1].
return hashed_val.reshape(-1, 1)
def main(argv):
"""Example usage of hash function."""
del argv
num_feature_bits = 128
num_hash_bits = 32
# Random hash matrix and vector to construct hash function.
hash_matrix = (np.random.rand(
num_feature_bits, num_hash_bits) > 0.5).astype('int')
hash_vector = (np.random.rand(1, num_hash_bits) > 0.5).astype('int')
# Generate random features.
num_examples = 10
dimension = 4
features = np.random.normal(size=(num_examples, dimension)).astype(np.float32)
# Compute hash.
hash_val = compute_hash(features, hash_matrix, hash_vector)
print('Feature matrix:')
print(features)
print('\nHashed values:')
print(hash_val)
if __name__ == '__main__':
app.run(main)
| 34.330275 | 80 | 0.736772 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
from absl import app
import numpy as np
def compute_hash(features, hash_matrix, hash_vector):
def convert_int_to_bin(x, dimension):
return '{:b}'.format(x).zfill(dimension)[-dimension:]
convert_int_to_bin = np.vectorize(convert_int_to_bin)
convert_bin_to_int = np.vectorize(lambda x: int(x, 2))
num_features = features.shape[0]
num_feature_bits, num_hash_bits = hash_matrix.shape
feature_sum_str = [''.join(x) for x in features.astype('str')]
feature_sum_hex = [hashlib.md5(s).hexdigest() for s in feature_sum_str]
feature_sum_int = [int(h, 16) for h in feature_sum_hex]
feature_sum_bin = convert_int_to_bin(
feature_sum_int, dimension=num_feature_bits)
feature_sum_bin_matrix = np.array(
[[int(c) for c in s] for s in feature_sum_bin])
feature_hashed = (
np.dot(feature_sum_bin_matrix, hash_matrix) +
np.repeat(hash_vector, repeats=num_features, axis=0))
feature_hashed_bits = np.mod(feature_hashed, 2)
feature_hashed_bit_char = convert_int_to_bin(feature_hashed_bits, 1)
feature_hashed_bit_str = [''.join(s) for s in feature_hashed_bit_char]
feature_hashed_int = convert_bin_to_int(feature_hashed_bit_str)
hashed_val = feature_hashed_int * 1. / 2 ** num_hash_bits
return hashed_val.reshape(-1, 1)
def main(argv):
del argv
num_feature_bits = 128
num_hash_bits = 32
hash_matrix = (np.random.rand(
num_feature_bits, num_hash_bits) > 0.5).astype('int')
hash_vector = (np.random.rand(1, num_hash_bits) > 0.5).astype('int')
num_examples = 10
dimension = 4
features = np.random.normal(size=(num_examples, dimension)).astype(np.float32)
hash_val = compute_hash(features, hash_matrix, hash_vector)
print('Feature matrix:')
print(features)
print('\nHashed values:')
print(hash_val)
if __name__ == '__main__':
app.run(main)
| true | true |
790074975de38fdef6156d6799de04bb8232b16c | 991 | py | Python | angel-ps/python/pyangel/running_mode.py | weien8899/angel | 829ce1a02e147d1f93b6375c2d07208ea31e53a2 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-11-24T07:40:30.000Z | 2017-11-24T07:40:30.000Z | angel-ps/python/pyangel/running_mode.py | weien8899/angel | 829ce1a02e147d1f93b6375c2d07208ea31e53a2 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | angel-ps/python/pyangel/running_mode.py | weien8899/angel | 829ce1a02e147d1f93b6375c2d07208ea31e53a2 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-09-28T00:31:04.000Z | 2018-09-28T00:31:04.000Z | #
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
#
from enum import Enum, unique
@unique
class RunningMode(Enum):
"""
Enum for running mode
"""
# Run ParameterServer & ParameterServerAgent
ANGEL_PS_PSAGENT = 0
# Only Run ParameterServer
ANGEL_PS = 1
# Run ParameterServer & Worker(embedded ParameterServerAgent)
ANGEL_PS_WORKER = 2
| 31.967742 | 102 | 0.744702 |
from enum import Enum, unique
@unique
class RunningMode(Enum):
ANGEL_PS_PSAGENT = 0
ANGEL_PS = 1
ANGEL_PS_WORKER = 2
| true | true |
790074a6c9c67ccc8680a8288f75beaf820a25eb | 14,135 | py | Python | db/sql/dal/regions.py | Otamio/datamart-api | 728d5da0ce470a669be314c12c8226123a689aa0 | [
"MIT"
] | null | null | null | db/sql/dal/regions.py | Otamio/datamart-api | 728d5da0ce470a669be314c12c8226123a689aa0 | [
"MIT"
] | null | null | null | db/sql/dal/regions.py | Otamio/datamart-api | 728d5da0ce470a669be314c12c8226123a689aa0 | [
"MIT"
] | null | null | null | from typing import Dict, List, Optional
from db.sql.dal.general import sanitize
from db.sql.utils import query_to_dicts
class Region:
admin: str
admin_id: str
region_type: str
country: str
country_id: str
admin1: Optional[str]
admin1_id: Optional[str]
admin2: Optional[str]
admin2_id: Optional[str]
admin3: Optional[str]
admin3_id: Optional[str]
region_coordinate: Optional[str]
alias: Optional[str]
COUNTRY = 'Q6256'
ADMIN1 = 'Q10864048'
ADMIN2 = 'Q13220204'
ADMIN3 = 'Q13221722'
def __init__(self, **kwargs):
self.admin = kwargs['admin']
self.admin_id = kwargs['admin_id']
self.region_type = kwargs['region_type']
self.country = kwargs['country']
self.country_id = kwargs['country_id']
self.admin1 = kwargs.get('admin1')
self.admin1_id = kwargs.get('admin1_id')
self.admin2 = kwargs.get('admin2')
self.admin2_id = kwargs.get('admin2_id')
self.admin3 = kwargs.get('admin3')
self.admin3_id = kwargs.get('admin3_id')
self.region_coordinate = kwargs.get('region_coordinate')
self.alias = kwargs.get('alias')
# country, admin1 and admin2 queries return both admin and country,admin1,admin2 fields.
# admin3 queries do not, so we need to feel these fields ourselves
if self.region_type == Region.ADMIN3:
self.admin3_id, self.admin_3 = self.admin_id, self.admin
def __getitem__(self, key: str) -> str:
return getattr(self, key)
def query_country_qnodes(countries: List[str]) -> Dict[str, Optional[str]]:
# Translates countries to Q-nodes. Returns a dictionary of each input country and its QNode (None if not found)
# We look for countries in a case-insensitive fashion.
if not countries:
return {}
regions = query_countries(countries)
result_dict: Dict[str, Optional[str]] = {region.country: region.country_id for region in regions}
# The result dictionary contains all the countries we have found, we need to add those we did not find
found_countries = set([country.lower() for country in result_dict.keys()])
for country in countries:
if country.lower() not in found_countries:
result_dict[country] = None
return result_dict
def list_to_where(field: str, elements: List[str], lower=False) -> Optional[str]:
if not elements:
return None
if lower:
elements = [element.lower() for element in elements]
field = f"LOWER({field})"
santized = [sanitize(element) for element in elements]
quoted = [f"'{element}'" for element in santized]
joined = ', '.join(quoted)
return f"{field} IN ({joined})"
def region_where_clause(region_field: str, region_list: List[str], region_id_field: str,
region_id_list: List[str], alias_field: Optional[str] = None) -> str:
if not region_list and not region_id_list:
return "1=1"
region_where = list_to_where(region_field, region_list, lower=True) or "0=1"
if alias_field:
alias_where = list_to_where(alias_field, region_list, lower=True) or "0=1"
else:
alias_where = "0=1"
region_id_where = list_to_where(region_id_field, region_id_list) or "0=1"
return f'({region_where} OR {region_id_where} OR {alias_where})'
def _query_regions(query: str) -> List[Region]:
dicts = query_to_dicts(query)
return [Region(**d) for d in dicts]
def query_countries(countries: List[str] = [], country_ids: List[str] = []) -> List[Region]:
""" Returns a list of countries:
If countries or country_ids are not empty, only those countries are returned (all of those in both lists)
Otherwise, all countries are returned
"""
where = region_where_clause('s_country_label.text', countries, 'e_country.node1', country_ids)
query = f'''
SELECT e_country.node1 AS admin_id,
s_country_label.text AS admin,
'Q6256' AS region_type,
e_country.node1 AS country_id,
s_country_label.text AS country,
NULL as admin1_id,
NULL as admin1,
NULL as admin2_id,
NULL as admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_country
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node1=e_country_label.node1 AND e_country_label.label='label')
WHERE e_country.label='P31' AND e_country.node2='Q6256' AND {where}
ORDER BY country
'''
return _query_regions(query)
def query_admin1s(country: Optional[str] = None, country_id: Optional[str] = None, admin1s: List[str] = [],
admin1_ids: List[str] = []) -> List[Region]:
"""
Returns a list of admin1s. If country or country_id is specified, return the admin1s only of that country.
If admin1s or admin1_ids are provided, only those admins are returned.
If all arguments are empty, all admin1s in the system are returned.
"""
if country and country_id:
raise ValueError('Only one of country, country_id may be specified')
if country_id:
country_where = f"e_country.node2='{country_id}'"
elif country: # We are certain country is not None here, but need an `elif` because mypy isn't certain
country_where = f"LOWER(s_country_label.text)='{country.lower()}'"
else:
country_where = "1=1"
admin1_where = region_where_clause('s_admin1_label.text', admin1s, 'e_admin1.node1', admin1_ids)
query = f'''
SELECT e_admin1.node1 AS admin_id,
s_admin1_label.text AS admin,
'Q10864048' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node1 as admin1_id,
s_admin1_label.text as admin1,
NULL as admin2_id,
NULL as admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_admin1
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node1=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node1 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin1.label='P31' AND e_admin1.node2='Q10864048' AND {country_where} AND {admin1_where}
ORDER BY admin1
'''
return _query_regions(query)
def query_admin2s(admin1: Optional[str] = None, admin1_id: Optional[str] = None, admin2s: List[str] = [],
admin2_ids: List[str] = []) -> List[Region]:
"""
Returns a list of admin2s. If admin1 or admin1_id is specified, return the admin2s only of that admin1.
If admin2s or admin2_ids are provided, only those admins are returned.
If all arguments are empty, all admin2s in the system are returned.
"""
if admin1 and admin1_id:
raise ValueError('Only one of admin1, admin1_id may be specified')
if admin1_id:
admin1_where = f"e_admin1.node2='{admin1_id}'"
elif admin1:
admin1_where = f"LOWER(s_admin1_label.text)=LOWER('{admin1}')"
else:
admin1_where = "1=1"
admin2_where = region_where_clause('s_admin2_label.text', admin2s, 'e_admin2.node1', admin2_ids)
query = f'''
SELECT e_admin2.node1 AS admin_id,
s_admin2_label.text AS admin,
'Q13220204' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node2 AS admin1_id,
s_admin1_label.text AS admin1,
e_admin2.node1 AS admin2_id,
s_admin2_label.text AS admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_admin2
JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id)
ON (e_admin2.node1=e_admin2_label.node1 AND e_admin2_label.label='label')
JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001')
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin2.label='P31' AND e_admin2.node2='Q13220204' AND {admin1_where} AND {admin2_where}
ORDER BY admin2
'''
return _query_regions(query)
def query_admin3s(admin2: Optional[str] = None, admin2_id: Optional[str] = None, admin3s: List[str] = [],
admin3_ids: List[str] = [], debug=False) -> List[Region]:
"""
Returns a list of admin3s. If admin2 or admin2_id is specified, return the admin3s only of that admin2.
If admin3s or admin3_ids are provided, only those admins are returned.
If all arguments are empty, all admin3s in the system are returned.
"""
if admin2 and admin2_id:
raise ValueError('Only one of admin2, admin2_id may be specified')
if admin2_id:
admin2_where = f"e_admin2.node2='{admin2_id}'"
elif admin2:
admin2_where = f"LOWER(s_admin2_label.text)=LOWER('{admin2}')"
else:
admin2_where = "1=1"
admin3_where = region_where_clause('s_admin3_label.text', admin3s, 'e_admin3.node1', admin3_ids)
query = f'''
SELECT e_admin3.node1 AS admin_id,
s_admin3_label.text AS admin,
'Q13221722' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node2 AS admin1_id,
s_admin1_label.text AS admin1,
e_admin2.node2 AS admin2_id,
s_admin2_label.text AS admin2,
e_admin2.node1 AS admin3_id,
s_admin3_label.text AS admin3
FROM
edges e_admin3
JOIN edges e_admin3_label JOIN strings s_admin3_label ON (e_admin3_label.id=s_admin3_label.edge_id)
ON (e_admin3.node1=e_admin3_label.node1 AND e_admin3_label.label='label')
JOIN edges e_admin2 ON (e_admin2.node1=e_admin3.node1 AND e_admin2.label='P2006190002')
JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id)
ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label')
JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001')
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin3.label='P31' AND e_admin3.node2='Q13221722' AND {admin2_where} AND {admin3_where}
ORDER BY admin3
'''
if debug:
print(query)
return _query_regions(query)
def query_admins(admins: List[str] = [], admin_ids: List[str] = [], debug=False) -> List[Region]:
where = region_where_clause('s_region_label.text', admins, 'e_region.node1', admin_ids, 's_region_alias.text')
query = f'''
SELECT e_region.node1 AS admin_id, s_region_label.text AS admin, e_region.node2 AS region_type,
e_country.node2 AS country_id, s_country_label.text AS country,
e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1,
e_admin2.node2 AS admin2_id, s_admin2_label.text AS admin2,
'POINT(' || c_coordinate.longitude || ' ' || c_coordinate.latitude || ')' as region_coordinate,
s_region_alias.text AS alias
FROM edges e_region
JOIN edges e_region_label ON (e_region_label.node1=e_region.node1 AND e_region_label.label='label')
JOIN strings s_region_label ON (e_region_label.id=s_region_label.edge_id)
JOIN edges e_country
JOIN edges e_country_label
JOIN strings s_country_label
ON (s_country_label.edge_id=e_country_label.id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
ON (e_region.node1=e_country.node1 AND e_country.label='P17')
LEFT JOIN edges e_admin1
JOIN edges e_admin1_label
JOIN strings s_admin1_label
ON (s_admin1_label.edge_id=e_admin1_label.id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
ON (e_region.node1=e_admin1.node1 AND e_admin1.label='P2006190001')
LEFT JOIN edges e_admin2
JOIN edges e_admin2_label
JOIN strings s_admin2_label
ON (s_admin2_label.edge_id=e_admin2_label.id)
ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label')
ON (e_region.node1=e_admin2.node1 AND e_admin2.label='P2006190002')
LEFT JOIN edges e_coordinate
JOIN coordinates c_coordinate
ON (c_coordinate.edge_id=e_coordinate.id)
ON (e_region.node1=e_coordinate.node1 AND e_coordinate.label='P625')
LEFT JOIN edges e_region_alias
JOIN strings s_region_alias
ON (s_region_alias.edge_id=e_region_alias.id)
ON (e_region.node1=e_region_alias.node1 AND e_region_alias.label='alias')
WHERE e_region.label='P31' AND e_region.node2 IN ('Q6256', 'Q10864048', 'Q13220204', 'Q13221722') AND {where}
'''
if debug:
print(query)
return _query_regions(query)
| 44.449686 | 115 | 0.678882 | from typing import Dict, List, Optional
from db.sql.dal.general import sanitize
from db.sql.utils import query_to_dicts
class Region:
admin: str
admin_id: str
region_type: str
country: str
country_id: str
admin1: Optional[str]
admin1_id: Optional[str]
admin2: Optional[str]
admin2_id: Optional[str]
admin3: Optional[str]
admin3_id: Optional[str]
region_coordinate: Optional[str]
alias: Optional[str]
COUNTRY = 'Q6256'
ADMIN1 = 'Q10864048'
ADMIN2 = 'Q13220204'
ADMIN3 = 'Q13221722'
def __init__(self, **kwargs):
self.admin = kwargs['admin']
self.admin_id = kwargs['admin_id']
self.region_type = kwargs['region_type']
self.country = kwargs['country']
self.country_id = kwargs['country_id']
self.admin1 = kwargs.get('admin1')
self.admin1_id = kwargs.get('admin1_id')
self.admin2 = kwargs.get('admin2')
self.admin2_id = kwargs.get('admin2_id')
self.admin3 = kwargs.get('admin3')
self.admin3_id = kwargs.get('admin3_id')
self.region_coordinate = kwargs.get('region_coordinate')
self.alias = kwargs.get('alias')
if self.region_type == Region.ADMIN3:
self.admin3_id, self.admin_3 = self.admin_id, self.admin
def __getitem__(self, key: str) -> str:
return getattr(self, key)
def query_country_qnodes(countries: List[str]) -> Dict[str, Optional[str]]:
if not countries:
return {}
regions = query_countries(countries)
result_dict: Dict[str, Optional[str]] = {region.country: region.country_id for region in regions}
found_countries = set([country.lower() for country in result_dict.keys()])
for country in countries:
if country.lower() not in found_countries:
result_dict[country] = None
return result_dict
def list_to_where(field: str, elements: List[str], lower=False) -> Optional[str]:
if not elements:
return None
if lower:
elements = [element.lower() for element in elements]
field = f"LOWER({field})"
santized = [sanitize(element) for element in elements]
quoted = [f"'{element}'" for element in santized]
joined = ', '.join(quoted)
return f"{field} IN ({joined})"
def region_where_clause(region_field: str, region_list: List[str], region_id_field: str,
region_id_list: List[str], alias_field: Optional[str] = None) -> str:
if not region_list and not region_id_list:
return "1=1"
region_where = list_to_where(region_field, region_list, lower=True) or "0=1"
if alias_field:
alias_where = list_to_where(alias_field, region_list, lower=True) or "0=1"
else:
alias_where = "0=1"
region_id_where = list_to_where(region_id_field, region_id_list) or "0=1"
return f'({region_where} OR {region_id_where} OR {alias_where})'
def _query_regions(query: str) -> List[Region]:
dicts = query_to_dicts(query)
return [Region(**d) for d in dicts]
def query_countries(countries: List[str] = [], country_ids: List[str] = []) -> List[Region]:
where = region_where_clause('s_country_label.text', countries, 'e_country.node1', country_ids)
query = f'''
SELECT e_country.node1 AS admin_id,
s_country_label.text AS admin,
'Q6256' AS region_type,
e_country.node1 AS country_id,
s_country_label.text AS country,
NULL as admin1_id,
NULL as admin1,
NULL as admin2_id,
NULL as admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_country
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node1=e_country_label.node1 AND e_country_label.label='label')
WHERE e_country.label='P31' AND e_country.node2='Q6256' AND {where}
ORDER BY country
'''
return _query_regions(query)
def query_admin1s(country: Optional[str] = None, country_id: Optional[str] = None, admin1s: List[str] = [],
admin1_ids: List[str] = []) -> List[Region]:
if country and country_id:
raise ValueError('Only one of country, country_id may be specified')
if country_id:
country_where = f"e_country.node2='{country_id}'"
elif country:
country_where = f"LOWER(s_country_label.text)='{country.lower()}'"
else:
country_where = "1=1"
admin1_where = region_where_clause('s_admin1_label.text', admin1s, 'e_admin1.node1', admin1_ids)
query = f'''
SELECT e_admin1.node1 AS admin_id,
s_admin1_label.text AS admin,
'Q10864048' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node1 as admin1_id,
s_admin1_label.text as admin1,
NULL as admin2_id,
NULL as admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_admin1
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node1=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node1 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin1.label='P31' AND e_admin1.node2='Q10864048' AND {country_where} AND {admin1_where}
ORDER BY admin1
'''
return _query_regions(query)
def query_admin2s(admin1: Optional[str] = None, admin1_id: Optional[str] = None, admin2s: List[str] = [],
admin2_ids: List[str] = []) -> List[Region]:
if admin1 and admin1_id:
raise ValueError('Only one of admin1, admin1_id may be specified')
if admin1_id:
admin1_where = f"e_admin1.node2='{admin1_id}'"
elif admin1:
admin1_where = f"LOWER(s_admin1_label.text)=LOWER('{admin1}')"
else:
admin1_where = "1=1"
admin2_where = region_where_clause('s_admin2_label.text', admin2s, 'e_admin2.node1', admin2_ids)
query = f'''
SELECT e_admin2.node1 AS admin_id,
s_admin2_label.text AS admin,
'Q13220204' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node2 AS admin1_id,
s_admin1_label.text AS admin1,
e_admin2.node1 AS admin2_id,
s_admin2_label.text AS admin2,
NULL as admin3_id,
NULL as admin3
FROM edges e_admin2
JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id)
ON (e_admin2.node1=e_admin2_label.node1 AND e_admin2_label.label='label')
JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001')
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin2.label='P31' AND e_admin2.node2='Q13220204' AND {admin1_where} AND {admin2_where}
ORDER BY admin2
'''
return _query_regions(query)
def query_admin3s(admin2: Optional[str] = None, admin2_id: Optional[str] = None, admin3s: List[str] = [],
admin3_ids: List[str] = [], debug=False) -> List[Region]:
if admin2 and admin2_id:
raise ValueError('Only one of admin2, admin2_id may be specified')
if admin2_id:
admin2_where = f"e_admin2.node2='{admin2_id}'"
elif admin2:
admin2_where = f"LOWER(s_admin2_label.text)=LOWER('{admin2}')"
else:
admin2_where = "1=1"
admin3_where = region_where_clause('s_admin3_label.text', admin3s, 'e_admin3.node1', admin3_ids)
query = f'''
SELECT e_admin3.node1 AS admin_id,
s_admin3_label.text AS admin,
'Q13221722' AS region_type,
e_country.node2 AS country_id,
s_country_label.text AS country,
e_admin1.node2 AS admin1_id,
s_admin1_label.text AS admin1,
e_admin2.node2 AS admin2_id,
s_admin2_label.text AS admin2,
e_admin2.node1 AS admin3_id,
s_admin3_label.text AS admin3
FROM
edges e_admin3
JOIN edges e_admin3_label JOIN strings s_admin3_label ON (e_admin3_label.id=s_admin3_label.edge_id)
ON (e_admin3.node1=e_admin3_label.node1 AND e_admin3_label.label='label')
JOIN edges e_admin2 ON (e_admin2.node1=e_admin3.node1 AND e_admin2.label='P2006190002')
JOIN edges e_admin2_label JOIN strings s_admin2_label ON (e_admin2_label.id=s_admin2_label.edge_id)
ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label')
JOIN edges e_admin1 ON (e_admin1.node1=e_admin2.node1 AND e_admin1.label='P2006190001')
JOIN edges e_admin1_label JOIN strings s_admin1_label ON (e_admin1_label.id=s_admin1_label.edge_id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
JOIN edges e_country ON (e_country.node1=e_admin1.node2 AND e_country.label='P17')
JOIN edges e_country_label JOIN strings s_country_label ON (e_country_label.id=s_country_label.edge_id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
WHERE e_admin3.label='P31' AND e_admin3.node2='Q13221722' AND {admin2_where} AND {admin3_where}
ORDER BY admin3
'''
if debug:
print(query)
return _query_regions(query)
def query_admins(admins: List[str] = [], admin_ids: List[str] = [], debug=False) -> List[Region]:
where = region_where_clause('s_region_label.text', admins, 'e_region.node1', admin_ids, 's_region_alias.text')
query = f'''
SELECT e_region.node1 AS admin_id, s_region_label.text AS admin, e_region.node2 AS region_type,
e_country.node2 AS country_id, s_country_label.text AS country,
e_admin1.node2 AS admin1_id, s_admin1_label.text AS admin1,
e_admin2.node2 AS admin2_id, s_admin2_label.text AS admin2,
'POINT(' || c_coordinate.longitude || ' ' || c_coordinate.latitude || ')' as region_coordinate,
s_region_alias.text AS alias
FROM edges e_region
JOIN edges e_region_label ON (e_region_label.node1=e_region.node1 AND e_region_label.label='label')
JOIN strings s_region_label ON (e_region_label.id=s_region_label.edge_id)
JOIN edges e_country
JOIN edges e_country_label
JOIN strings s_country_label
ON (s_country_label.edge_id=e_country_label.id)
ON (e_country.node2=e_country_label.node1 AND e_country_label.label='label')
ON (e_region.node1=e_country.node1 AND e_country.label='P17')
LEFT JOIN edges e_admin1
JOIN edges e_admin1_label
JOIN strings s_admin1_label
ON (s_admin1_label.edge_id=e_admin1_label.id)
ON (e_admin1.node2=e_admin1_label.node1 AND e_admin1_label.label='label')
ON (e_region.node1=e_admin1.node1 AND e_admin1.label='P2006190001')
LEFT JOIN edges e_admin2
JOIN edges e_admin2_label
JOIN strings s_admin2_label
ON (s_admin2_label.edge_id=e_admin2_label.id)
ON (e_admin2.node2=e_admin2_label.node1 AND e_admin2_label.label='label')
ON (e_region.node1=e_admin2.node1 AND e_admin2.label='P2006190002')
LEFT JOIN edges e_coordinate
JOIN coordinates c_coordinate
ON (c_coordinate.edge_id=e_coordinate.id)
ON (e_region.node1=e_coordinate.node1 AND e_coordinate.label='P625')
LEFT JOIN edges e_region_alias
JOIN strings s_region_alias
ON (s_region_alias.edge_id=e_region_alias.id)
ON (e_region.node1=e_region_alias.node1 AND e_region_alias.label='alias')
WHERE e_region.label='P31' AND e_region.node2 IN ('Q6256', 'Q10864048', 'Q13220204', 'Q13221722') AND {where}
'''
if debug:
print(query)
return _query_regions(query)
| true | true |
79007595ae1796878ebe6ff22b66eacdb323b63f | 30,220 | py | Python | selfdrive/controls/lib/events.py | gomtings/for_NEXO | 8274e4569d96d67d18d458fba48a3254772ea8e0 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/events.py | gomtings/for_NEXO | 8274e4569d96d67d18d458fba48a3254772ea8e0 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/events.py | gomtings/for_NEXO | 8274e4569d96d67d18d458fba48a3254772ea8e0 | [
"MIT"
] | null | null | null | from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str, visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__("오픈파일럿을 사용할 수 없음", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "오픈파일럿이 해제됩니다."
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "항상 핸들을 잡고 도로를 주시하세요", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 5.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
#if soft_disable_time < int(0.5 / DT_CTRL):
# return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
#if soft_disable_time < int(0.5 / DT_CTRL):
# return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
alc_timer = sm['lateralPlan'].autoLaneChangeTimer
return Alert(
"차선 변경을 시작합니다 in (%d)" % alc_timer,
"차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
EventName.lkasDisabled: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("WARNING: This branch is not tested",
alert_status=AlertStatus.userPrompt),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"핸들을 잡아주세요",
"차선이탈 감지됨",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 일시적으로 사용불가",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요 : 운전자 도로주시 불안",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요",
"운전자 도로주시 불안",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 도로주시 불안",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요 : 운전자 인식 불가",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"운전자 응답하지않음",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 응답하지않음",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"수동으로 재활성화하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"앞차량 멈춤",
"앞차가 출발하면 자동 재출발",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"차선을 변경합니다",
"좌측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"차선을 변경합니다",
"우측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"후측방 차량감지",
"차선에 차량이 감지되니 대기하세요",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"차선을 변경합니다",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 제한을 초과함",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.SOFT_DISABLE: SoftDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.SOFT_DISABLE: SoftDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steer Unavailable while Turning",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
EventName.slowingDownSpeed: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.MID, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.slowingDownSpeedSound: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.HIGH, VisualAlert.none, AudibleAlert.slowingDownSpeed, 2.),
},
}
| 34.815668 | 152 | 0.691264 | from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str, visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__("오픈파일럿을 사용할 수 없음", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "오픈파일럿이 해제됩니다."
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("핸들을 즉시 잡아주세요", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "항상 핸들을 잡고 도로를 주시하세요", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 5.),
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
alc_timer = sm['lateralPlan'].autoLaneChangeTimer
return Alert(
"차선 변경을 시작합니다 in (%d)" % alc_timer,
"차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
EventName.stockFcw: {},
EventName.lkasDisabled: {},
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("WARNING: This branch is not tested",
alert_status=AlertStatus.userPrompt),
},
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
},
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"브레이크!",
"추돌 위험",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"핸들을 잡아주세요",
"차선이탈 감지됨",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 일시적으로 사용불가",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요 : 운전자 도로주시 불안",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"도로를 주시하세요",
"운전자 도로주시 불안",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 도로주시 불안",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요 : 운전자 인식 불가",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"운전자 응답하지않음",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"조향제어가 강제로 해제됩니다",
"운전자 응답하지않음",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"수동으로 재활성화하세요",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"앞차량 멈춤",
"앞차가 출발하면 자동 재출발",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"차선을 변경합니다",
"좌측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"차선을 변경합니다",
"우측차선의 차량을 확인하세요",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"후측방 차량감지",
"차선에 차량이 감지되니 대기하세요",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"차선을 변경합니다",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"핸들을 잡아주세요",
"조향제어 제한을 초과함",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.SOFT_DISABLE: SoftDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
EventName.plannerError: {
ET.SOFT_DISABLE: SoftDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steer Unavailable while Turning",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
EventName.slowingDownSpeed: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.MID, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.slowingDownSpeedSound: {
ET.PERMANENT: Alert("Slowing down","", AlertStatus.normal, AlertSize.small,
Priority.HIGH, VisualAlert.none, AudibleAlert.slowingDownSpeed, 2.),
},
}
| true | true |
790075bc25c92d36cee667f523411b0518d70602 | 6,466 | py | Python | selfdrive/car/hyundai/hyundaican.py | zzune/openpilot | 9f8c6e4b61d00efaaefbe4d2b2c0a65a7976d656 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/hyundaican.py | zzune/openpilot | 9f8c6e4b61d00efaaefbe4d2b2c0a65a7976d656 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/hyundaican.py | zzune/openpilot | 9f8c6e4b61d00efaaefbe4d2b2c0a65a7976d656 | [
"MIT"
] | null | null | null | import crcmod
from selfdrive.car.hyundai.values import CAR, CHECKSUM
hyundai_checksum = crcmod.mkCrcFun(0x11D, initCrc=0xFD, rev=False, xorOut=0xdf)
def create_lkas11(packer, car_fingerprint, bus, apply_steer, steer_req, cnt, enabled, lkas11, hud_alert,
lane_visible, left_lane_depart, right_lane_depart, keep_stock=False):
values = {
"CF_Lkas_Bca_R": lkas11["CF_Lkas_Bca_R"] if keep_stock else 3,
#"CF_Lkas_LdwsSysState": 3 if steer_req else lane_visible,
"CF_Lkas_LdwsSysState": 3 if enabled else 1,
"CF_Lkas_SysWarning": hud_alert,
#"CF_Lkas_LdwsLHWarning": lkas11["CF_Lkas_LdwsLHWarning"],
#"CF_Lkas_LdwsRHWarning": lkas11["CF_Lkas_LdwsRHWarning"],
"CF_Lkas_LdwsLHWarning": left_lane_depart,
"CF_Lkas_LdwsRHWarning": right_lane_depart,
"CF_Lkas_HbaLamp": lkas11["CF_Lkas_HbaLamp"] if keep_stock else 0,
"CF_Lkas_FcwBasReq": lkas11["CF_Lkas_FcwBasReq"] if keep_stock else 0,
"CR_Lkas_StrToqReq": apply_steer,
"CF_Lkas_ActToi": steer_req,
"CF_Lkas_ToiFlt": 0,
"CF_Lkas_HbaSysState": lkas11["CF_Lkas_HbaSysState"] if keep_stock else 1,
"CF_Lkas_FcwOpt": lkas11["CF_Lkas_FcwOpt"] if keep_stock else 0,
"CF_Lkas_HbaOpt": lkas11["CF_Lkas_HbaOpt"] if keep_stock else 3,
"CF_Lkas_MsgCount": cnt,
"CF_Lkas_FcwSysState": lkas11["CF_Lkas_FcwSysState"] if keep_stock else 0,
"CF_Lkas_FcwCollisionWarning": lkas11["CF_Lkas_FcwCollisionWarning"] if keep_stock else 0,
"CF_Lkas_FusionState": lkas11["CF_Lkas_FusionState"] if keep_stock else 0,
"CF_Lkas_Chksum": 0,
"CF_Lkas_FcwOpt_USM": lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2,
"CF_Lkas_LdwsOpt_USM": lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 3,
}
if car_fingerprint == CAR.GENESIS:
values["CF_Lkas_Bca_R"] = 2
values["CF_Lkas_HbaSysState"] = lkas11["CF_Lkas_HbaSysState"] if keep_stock else 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2
values["CF_Lkas_LdwsOpt_USM"] = lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_OPTIMA:
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_CARDENZA:
########################################################
#values["CF_Lkas_Bca_R"] = int(left_lane) + (int(right_lane) << 1)
#values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1
# FcwOpt_USM 5 = Orange blinking car + lanes
# FcwOpt_USM 4 = Orange car + lanes
# FcwOpt_USM 3 = Green blinking car + lanes
# FcwOpt_USM 2 = Green car + lanes
# FcwOpt_USM 1 = White car + lanes
# FcwOpt_USM 0 = No car + lanes
#values["CF_Lkas_SysWarning"] = 4 if sys_warning else 0
# SysWarning 4 = keep hands on wheel
# SysWarning 5 = keep hands on wheel (red)
# SysWarning 6 = keep hands on wheel (red) + beep
# Note: the warning is hidden while the blinkers are on
#values["CF_Lkas_LdwsOpt_USM"] = 2
########################################################
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_FcwOpt_USM"] = 1
values["CF_Lkas_LdwsOpt_USM"] = 3
dat = packer.make_can_msg("LKAS11", 0, values)[2]
if car_fingerprint in CHECKSUM["crc8"]:
# CRC Checksum as seen on 2019 Hyundai Santa Fe
dat = dat[:6] + dat[7:8]
checksum = hyundai_checksum(dat)
elif car_fingerprint in CHECKSUM["6B"]:
# Checksum of first 6 Bytes, as seen on 2018 Kia Sorento
checksum = sum(dat[:6]) % 256
else:
# Checksum of first 6 Bytes and last Byte as seen on 2018 Kia Stinger
checksum = (sum(dat[:6]) + dat[7]) % 256
values["CF_Lkas_Chksum"] = checksum
return packer.make_can_msg("LKAS11", bus, values)
def create_clu11(packer, bus, clu11, button, speed, cnt):
values = {
"CF_Clu_CruiseSwState": button,
"CF_Clu_CruiseSwMain": clu11["CF_Clu_CruiseSwMain"],
"CF_Clu_SldMainSW": clu11["CF_Clu_SldMainSW"],
"CF_Clu_ParityBit1": clu11["CF_Clu_ParityBit1"],
"CF_Clu_VanzDecimal": clu11["CF_Clu_VanzDecimal"],
"CF_Clu_Vanz": speed,
"CF_Clu_SPEED_UNIT": clu11["CF_Clu_SPEED_UNIT"],
"CF_Clu_DetentOut": clu11["CF_Clu_DetentOut"],
"CF_Clu_RheostatLevel": clu11["CF_Clu_RheostatLevel"],
"CF_Clu_CluInfo": clu11["CF_Clu_CluInfo"],
"CF_Clu_AmpInfo": clu11["CF_Clu_AmpInfo"],
"CF_Clu_AliveCnt1": cnt,
}
return packer.make_can_msg("CLU11", bus, values)
def create_scc12(packer, apply_accel, enabled, cnt, scc12):
values = {
"CF_VSM_Prefill": scc12["CF_VSM_Prefill"],
"CF_VSM_DecCmdAct": scc12["CF_VSM_DecCmdAct"],
"CF_VSM_HBACmd": scc12["CF_VSM_HBACmd"],
"CF_VSM_Warn": scc12["CF_VSM_Warn"],
"CF_VSM_Stat": scc12["CF_VSM_Stat"],
"CF_VSM_BeltCmd": scc12["CF_VSM_BeltCmd"],
"ACCFailInfo": scc12["ACCFailInfo"],
"ACCMode": scc12["ACCMode"],
"StopReq": scc12["StopReq"],
"CR_VSM_DecCmd": scc12["CR_VSM_DecCmd"],
"aReqMax": apply_accel if enabled and scc12["ACCMode"] == 1 else scc12["aReqMax"],
"TakeOverReq": scc12["TakeOverReq"],
"PreFill": scc12["PreFill"],
"aReqMin": apply_accel if enabled and scc12["ACCMode"] == 1 else scc12["aReqMin"],
"CF_VSM_ConfMode": scc12["CF_VSM_ConfMode"],
"AEB_Failinfo": scc12["AEB_Failinfo"],
"AEB_Status": scc12["AEB_Status"],
"AEB_CmdAct": scc12["AEB_CmdAct"],
"AEB_StopReq": scc12["AEB_StopReq"],
"CR_VSM_Alive": cnt,
"CR_VSM_ChkSum": 0,
}
dat = packer.make_can_msg("SCC12", 0, values)[2]
values["CR_VSM_ChkSum"] = 16 - sum([sum(divmod(i, 16)) for i in dat]) % 16
return packer.make_can_msg("SCC12", 0, values)
def create_mdps12(packer, car_fingerprint, cnt, mdps12):
values = {
"CR_Mdps_StrColTq": mdps12["CR_Mdps_StrColTq"],
"CF_Mdps_Def": mdps12["CF_Mdps_Def"],
"CF_Mdps_ToiActive": 0,
"CF_Mdps_ToiUnavail": 1,
"CF_Mdps_MsgCount2": cnt,
"CF_Mdps_Chksum2": 0,
"CF_Mdps_ToiFlt": mdps12["CF_Mdps_ToiFlt"],
"CF_Mdps_SErr": mdps12["CF_Mdps_SErr"],
"CR_Mdps_StrTq": mdps12["CR_Mdps_StrTq"],
"CF_Mdps_FailStat": mdps12["CF_Mdps_FailStat"],
"CR_Mdps_OutTq": mdps12["CR_Mdps_OutTq"],
}
dat = packer.make_can_msg("MDPS12", 2, values)[2]
checksum = sum(dat) % 256
values["CF_Mdps_Chksum2"] = checksum
return packer.make_can_msg("MDPS12", 2, values)
| 43.395973 | 104 | 0.689453 | import crcmod
from selfdrive.car.hyundai.values import CAR, CHECKSUM
hyundai_checksum = crcmod.mkCrcFun(0x11D, initCrc=0xFD, rev=False, xorOut=0xdf)
def create_lkas11(packer, car_fingerprint, bus, apply_steer, steer_req, cnt, enabled, lkas11, hud_alert,
lane_visible, left_lane_depart, right_lane_depart, keep_stock=False):
values = {
"CF_Lkas_Bca_R": lkas11["CF_Lkas_Bca_R"] if keep_stock else 3,
"CF_Lkas_LdwsSysState": 3 if enabled else 1,
"CF_Lkas_SysWarning": hud_alert,
"CF_Lkas_LdwsLHWarning": left_lane_depart,
"CF_Lkas_LdwsRHWarning": right_lane_depart,
"CF_Lkas_HbaLamp": lkas11["CF_Lkas_HbaLamp"] if keep_stock else 0,
"CF_Lkas_FcwBasReq": lkas11["CF_Lkas_FcwBasReq"] if keep_stock else 0,
"CR_Lkas_StrToqReq": apply_steer,
"CF_Lkas_ActToi": steer_req,
"CF_Lkas_ToiFlt": 0,
"CF_Lkas_HbaSysState": lkas11["CF_Lkas_HbaSysState"] if keep_stock else 1,
"CF_Lkas_FcwOpt": lkas11["CF_Lkas_FcwOpt"] if keep_stock else 0,
"CF_Lkas_HbaOpt": lkas11["CF_Lkas_HbaOpt"] if keep_stock else 3,
"CF_Lkas_MsgCount": cnt,
"CF_Lkas_FcwSysState": lkas11["CF_Lkas_FcwSysState"] if keep_stock else 0,
"CF_Lkas_FcwCollisionWarning": lkas11["CF_Lkas_FcwCollisionWarning"] if keep_stock else 0,
"CF_Lkas_FusionState": lkas11["CF_Lkas_FusionState"] if keep_stock else 0,
"CF_Lkas_Chksum": 0,
"CF_Lkas_FcwOpt_USM": lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2,
"CF_Lkas_LdwsOpt_USM": lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 3,
}
if car_fingerprint == CAR.GENESIS:
values["CF_Lkas_Bca_R"] = 2
values["CF_Lkas_HbaSysState"] = lkas11["CF_Lkas_HbaSysState"] if keep_stock else 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2
values["CF_Lkas_LdwsOpt_USM"] = lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_OPTIMA:
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_CARDENZA:
hksum2"] = checksum
return packer.make_can_msg("MDPS12", 2, values)
| true | true |
79007603346930c0b565c6c3b79c52882716d78e | 7,293 | py | Python | utils/datasets.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 19 | 2018-06-08T05:33:47.000Z | 2021-04-26T16:19:32.000Z | utils/datasets.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | null | null | null | utils/datasets.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 13 | 2018-09-24T21:52:06.000Z | 2021-02-26T10:40:25.000Z | import os
import numpy as np
import pandas as pd
from sklearn.datasets.samples_generator import make_swiss_roll
import torch
import torchvision
from torchvision import transforms
import glob
import random
import config as cfg
import utils.metadata as meta
from . import csv_loader
from . import img_loader
# Datasets
# pytorch.org/docs/master/torchvision/datasets.html
# https://github.com/bfortuner/pytorch-cheatsheet/blob/master/pytorch-cheatsheet.ipynb
def get_iris_data():
fpath = "../data/iris.csv"
url = "https://raw.githubusercontent.com/pydata/pandas/master/pandas/tests/data/iris.csv"
df = csv_loader.load_or_download_df(fpath, url)
return df
def get_sin_data():
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
return X,y
def get_housing_data():
# https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html
fpath = "../data/housing.csv"
url = "https://raw.githubusercontent.com/ggallo/boston-housing/master/housing.csv"
df = csv_loader.load_or_download_df(fpath, url)
return df
def get_advertising_data():
fpath = "../data/advertising.csv"
url = "http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv"
df = csv_loader.load_or_download_df(fpath, url)
df = df.drop(df.columns[0], axis=1)
return df
def get_swiss_roll_data(n_samples=1000):
noise = 0.2
X, _ = make_swiss_roll(n_samples, noise)
X = X.astype('float32')[:, [0, 2]]
return X, _
def get_swiss_roll_loader(n_samples=1000):
X, _ = get_swiss_roll_data(n_samples)
dataset = torch.utils.data.dataset.TensorDataset(
torch.FloatTensor(X), torch.FloatTensor(_))
loader = torch.utils.data.dataloader.DataLoader(
dataset, batch_size=100, shuffle=True)
return loader
def get_mnist_loader():
MNIST_MEAN = np.array([0.1307,])
MNIST_STD = np.array([0.3081,])
normTransform = transforms.Normalize(MNIST_MEAN, MNIST_STD)
trainTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = torchvision.datasets.MNIST(root='../data', train=True,
download=True, transform=trainTransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
shuffle=True, num_workers=2)
testset = torchvision.datasets.MNIST(root='../data', train=False,
download=True, transform=testTransform)
testloader = torch.utils.data.DataLoader(testset, batch_size=128,
shuffle=False, num_workers=2)
return trainloader, testloader
def get_cifar_loader():
# https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py
CIFAR_MEAN = np.array([0.49139968, 0.48215827, 0.44653124])
CIFAR_STD = np.array([0.24703233, 0.24348505, 0.26158768])
normTransform = transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
trainTransform = transforms.Compose([
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = torchvision.datasets.CIFAR10(root='../data', train=True,
download=True, transform=trainTransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='../data', train=False,
download=True, transform=testTransform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, classes
def get_catsdogs_loader(imgs_dir):
# Need to download Kaggle cats/dogs competition
# And move ALL images into single directory
classes = ['cat','dog']
class_to_idx, idx_to_class = meta.get_key_int_maps(classes)
def get_targs_from_fpaths(fpaths):
targs = []
for fpath in fpaths:
classname = fpath.split('/')[-1].split('.')[0]
# For one-hot sigmoid
#targ = meta.onehot_encode_class(
# class_to_idx, classname)
targs.append(class_to_idx[classname])
return targs
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainTransform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
testTransform = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
fpaths = glob.glob(imgs_dir + '*.jpg')
random.shuffle(fpaths)
trn_fpaths = fpaths[:20000]
val_fpaths = fpaths[20000:]
trn_targs = get_targs_from_fpaths(trn_fpaths)
val_targs = get_targs_from_fpaths(val_fpaths)
img_reader = 'pil'
trn_dataset = FileDataset(
trn_fpaths, img_reader, trn_targs, trainTransform)
val_dataset = FileDataset(
val_fpaths, img_reader, val_targs, testTransform)
trn_loader = torch.utils.data.DataLoader(
trn_dataset, batch_size=64,
shuffle=True, num_workers=4)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=64,
shuffle=False, num_workers=2)
return trn_loader, val_loader, classes
loaders = {
'pil': img_loader.pil_loader,
'tns': img_loader.tensor_loader,
'npy': img_loader.numpy_loader,
'io': img_loader.io_loader
}
class FileDataset(torch.utils.data.Dataset):
def __init__(self, fpaths,
img_loader='pil',
targets=None,
transform=None,
target_transform=None):
self.fpaths = fpaths
self.loader = self._get_loader(img_loader)
self.targets = targets
self.transform = transform
self.target_transform = target_transform
def _get_loader(self, loader_type):
return loaders[loader_type]
def _get_target(self, index):
if self.targets is None:
return 1
target = self.targets[index]
if self.target_transform is not None:
return self.target_transform(target)
return int(target)
def _get_input(self, index):
img_path = self.fpaths[index]
img = self.loader(img_path)
if self.transform is not None:
img = self.transform(img)
return img
def __getitem__(self, index):
input_ = self._get_input(index)
target = self._get_target(index)
img_path = self.fpaths[index]
return input_, target, img_path
def __len__(self):
return len(self.fpaths)
| 31.571429 | 96 | 0.639106 | import os
import numpy as np
import pandas as pd
from sklearn.datasets.samples_generator import make_swiss_roll
import torch
import torchvision
from torchvision import transforms
import glob
import random
import config as cfg
import utils.metadata as meta
from . import csv_loader
from . import img_loader
def get_iris_data():
fpath = "../data/iris.csv"
url = "https://raw.githubusercontent.com/pydata/pandas/master/pandas/tests/data/iris.csv"
df = csv_loader.load_or_download_df(fpath, url)
return df
def get_sin_data():
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
return X,y
def get_housing_data():
fpath = "../data/housing.csv"
url = "https://raw.githubusercontent.com/ggallo/boston-housing/master/housing.csv"
df = csv_loader.load_or_download_df(fpath, url)
return df
def get_advertising_data():
fpath = "../data/advertising.csv"
url = "http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv"
df = csv_loader.load_or_download_df(fpath, url)
df = df.drop(df.columns[0], axis=1)
return df
def get_swiss_roll_data(n_samples=1000):
noise = 0.2
X, _ = make_swiss_roll(n_samples, noise)
X = X.astype('float32')[:, [0, 2]]
return X, _
def get_swiss_roll_loader(n_samples=1000):
X, _ = get_swiss_roll_data(n_samples)
dataset = torch.utils.data.dataset.TensorDataset(
torch.FloatTensor(X), torch.FloatTensor(_))
loader = torch.utils.data.dataloader.DataLoader(
dataset, batch_size=100, shuffle=True)
return loader
def get_mnist_loader():
MNIST_MEAN = np.array([0.1307,])
MNIST_STD = np.array([0.3081,])
normTransform = transforms.Normalize(MNIST_MEAN, MNIST_STD)
trainTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = torchvision.datasets.MNIST(root='../data', train=True,
download=True, transform=trainTransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
shuffle=True, num_workers=2)
testset = torchvision.datasets.MNIST(root='../data', train=False,
download=True, transform=testTransform)
testloader = torch.utils.data.DataLoader(testset, batch_size=128,
shuffle=False, num_workers=2)
return trainloader, testloader
def get_cifar_loader():
CIFAR_MEAN = np.array([0.49139968, 0.48215827, 0.44653124])
CIFAR_STD = np.array([0.24703233, 0.24348505, 0.26158768])
normTransform = transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
trainTransform = transforms.Compose([
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = torchvision.datasets.CIFAR10(root='../data', train=True,
download=True, transform=trainTransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='../data', train=False,
download=True, transform=testTransform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, classes
def get_catsdogs_loader(imgs_dir):
classes = ['cat','dog']
class_to_idx, idx_to_class = meta.get_key_int_maps(classes)
def get_targs_from_fpaths(fpaths):
targs = []
for fpath in fpaths:
classname = fpath.split('/')[-1].split('.')[0]
targs.append(class_to_idx[classname])
return targs
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainTransform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
testTransform = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
fpaths = glob.glob(imgs_dir + '*.jpg')
random.shuffle(fpaths)
trn_fpaths = fpaths[:20000]
val_fpaths = fpaths[20000:]
trn_targs = get_targs_from_fpaths(trn_fpaths)
val_targs = get_targs_from_fpaths(val_fpaths)
img_reader = 'pil'
trn_dataset = FileDataset(
trn_fpaths, img_reader, trn_targs, trainTransform)
val_dataset = FileDataset(
val_fpaths, img_reader, val_targs, testTransform)
trn_loader = torch.utils.data.DataLoader(
trn_dataset, batch_size=64,
shuffle=True, num_workers=4)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=64,
shuffle=False, num_workers=2)
return trn_loader, val_loader, classes
loaders = {
'pil': img_loader.pil_loader,
'tns': img_loader.tensor_loader,
'npy': img_loader.numpy_loader,
'io': img_loader.io_loader
}
class FileDataset(torch.utils.data.Dataset):
def __init__(self, fpaths,
img_loader='pil',
targets=None,
transform=None,
target_transform=None):
self.fpaths = fpaths
self.loader = self._get_loader(img_loader)
self.targets = targets
self.transform = transform
self.target_transform = target_transform
def _get_loader(self, loader_type):
return loaders[loader_type]
def _get_target(self, index):
if self.targets is None:
return 1
target = self.targets[index]
if self.target_transform is not None:
return self.target_transform(target)
return int(target)
def _get_input(self, index):
img_path = self.fpaths[index]
img = self.loader(img_path)
if self.transform is not None:
img = self.transform(img)
return img
def __getitem__(self, index):
input_ = self._get_input(index)
target = self._get_target(index)
img_path = self.fpaths[index]
return input_, target, img_path
def __len__(self):
return len(self.fpaths)
| true | true |
7900766802225ae53d0725336087e8a594ca609e | 2,870 | py | Python | imcsdk/mometa/chassis/ChassisPowerMonitor.py | ecoen66/imcsdk | b10eaa926a5ee57cea7182ae0adc8dd1c818b0ab | [
"Apache-2.0"
] | 31 | 2016-06-14T07:23:59.000Z | 2021-09-12T17:17:26.000Z | imcsdk/mometa/chassis/ChassisPowerMonitor.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 109 | 2016-05-25T03:56:56.000Z | 2021-10-18T02:58:12.000Z | imcsdk/mometa/chassis/ChassisPowerMonitor.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 67 | 2016-05-17T05:53:56.000Z | 2022-03-24T15:52:53.000Z | """This module contains the general information for ChassisPowerMonitor ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ChassisPowerMonitorConsts:
pass
class ChassisPowerMonitor(ManagedObject):
"""This is ChassisPowerMonitor class."""
consts = ChassisPowerMonitorConsts()
naming_props = set([])
mo_meta = {
"modular": MoMeta("ChassisPowerMonitor", "chassisPowerMonitor", "pwrmonitor", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], ['equipmentChassis'], [], ["Get"])
}
prop_meta = {
"modular": {
"average": MoPropertyMeta("average", "average", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"current": MoPropertyMeta("current", "current", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"maximum": MoPropertyMeta("maximum", "maximum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"minimum": MoPropertyMeta("minimum", "minimum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"period": MoPropertyMeta("period", "period", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
},
}
prop_map = {
"modular": {
"average": "average",
"childAction": "child_action",
"current": "current",
"dn": "dn",
"maximum": "maximum",
"minimum": "minimum",
"period": "period",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.average = None
self.child_action = None
self.current = None
self.maximum = None
self.minimum = None
self.period = None
self.status = None
ManagedObject.__init__(self, "ChassisPowerMonitor", parent_mo_or_dn, **kwargs)
| 42.835821 | 234 | 0.619164 |
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ChassisPowerMonitorConsts:
pass
class ChassisPowerMonitor(ManagedObject):
consts = ChassisPowerMonitorConsts()
naming_props = set([])
mo_meta = {
"modular": MoMeta("ChassisPowerMonitor", "chassisPowerMonitor", "pwrmonitor", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], ['equipmentChassis'], [], ["Get"])
}
prop_meta = {
"modular": {
"average": MoPropertyMeta("average", "average", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"current": MoPropertyMeta("current", "current", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"maximum": MoPropertyMeta("maximum", "maximum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"minimum": MoPropertyMeta("minimum", "minimum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"period": MoPropertyMeta("period", "period", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
},
}
prop_map = {
"modular": {
"average": "average",
"childAction": "child_action",
"current": "current",
"dn": "dn",
"maximum": "maximum",
"minimum": "minimum",
"period": "period",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.average = None
self.child_action = None
self.current = None
self.maximum = None
self.minimum = None
self.period = None
self.status = None
ManagedObject.__init__(self, "ChassisPowerMonitor", parent_mo_or_dn, **kwargs)
| true | true |
790076a9d9e3131be50c6f478d1bb94c1b83f4b7 | 4,320 | py | Python | gluoncv/model_zoo/model_zoo.py | Ellinier/gluon-cv | 66d0efee41faf93a40bc53d890b9b7a5ff8e0d8e | [
"Apache-2.0"
] | null | null | null | gluoncv/model_zoo/model_zoo.py | Ellinier/gluon-cv | 66d0efee41faf93a40bc53d890b9b7a5ff8e0d8e | [
"Apache-2.0"
] | null | null | null | gluoncv/model_zoo/model_zoo.py | Ellinier/gluon-cv | 66d0efee41faf93a40bc53d890b9b7a5ff8e0d8e | [
"Apache-2.0"
] | null | null | null | # pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
__all__ = ['get_model']
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc,
'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc' : get_fcn_voc_resnet50,
'fcn_resnet101_voc' : get_fcn_voc_resnet101,
'fcn_resnet50_ade' : get_fcn_ade_resnet50,
'psp_resnet50_ade' : get_psp_ade_resnet50,
'resnet18_v1b' : resnet18_v1b,
'resnet34_v1b' : resnet34_v1b,
'resnet50_v1b' : resnet50_v1b,
'resnet101_v1b' : resnet101_v1b,
'resnet152_v1b' : resnet152_v1b,
'resnet50_v2a': resnet50_v2a,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x4d' : resnext101_32x4d,
'resnext101_64x4d' : resnext101_64x4d,
'se_resnext50_32x4d' : se_resnext50_32x4d,
'se_resnext101_32x4d' : se_resnext101_32x4d,
'se_resnext101_64x4d' : se_resnext101_64x4d,
'senet_52' : senet_52,
'senet_103' : senet_103,
'senet_154' : senet_154,
'se_resnet18_v1' : se_resnet18_v1,
'se_resnet34_v1' : se_resnet34_v1,
'se_resnet50_v1' : se_resnet50_v1,
'se_resnet101_v1' : se_resnet101_v1,
'se_resnet152_v1' : se_resnet152_v1,
'se_resnet18_v2' : se_resnet18_v2,
'se_resnet34_v2' : se_resnet34_v2,
'se_resnet50_v2' : se_resnet50_v2,
'se_resnet101_v2' : se_resnet101_v2,
'se_resnet152_v2' : se_resnet152_v2,
'darknet53': darknet53,
'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc,
'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco,
}
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
| 38.918919 | 95 | 0.693287 |
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
__all__ = ['get_model']
def get_model(name, **kwargs):
models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc,
'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc' : get_fcn_voc_resnet50,
'fcn_resnet101_voc' : get_fcn_voc_resnet101,
'fcn_resnet50_ade' : get_fcn_ade_resnet50,
'psp_resnet50_ade' : get_psp_ade_resnet50,
'resnet18_v1b' : resnet18_v1b,
'resnet34_v1b' : resnet34_v1b,
'resnet50_v1b' : resnet50_v1b,
'resnet101_v1b' : resnet101_v1b,
'resnet152_v1b' : resnet152_v1b,
'resnet50_v2a': resnet50_v2a,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x4d' : resnext101_32x4d,
'resnext101_64x4d' : resnext101_64x4d,
'se_resnext50_32x4d' : se_resnext50_32x4d,
'se_resnext101_32x4d' : se_resnext101_32x4d,
'se_resnext101_64x4d' : se_resnext101_64x4d,
'senet_52' : senet_52,
'senet_103' : senet_103,
'senet_154' : senet_154,
'se_resnet18_v1' : se_resnet18_v1,
'se_resnet34_v1' : se_resnet34_v1,
'se_resnet50_v1' : se_resnet50_v1,
'se_resnet101_v1' : se_resnet101_v1,
'se_resnet152_v1' : se_resnet152_v1,
'se_resnet18_v2' : se_resnet18_v2,
'se_resnet34_v2' : se_resnet34_v2,
'se_resnet50_v2' : se_resnet50_v2,
'se_resnet101_v2' : se_resnet101_v2,
'se_resnet152_v2' : se_resnet152_v2,
'darknet53': darknet53,
'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc,
'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco,
}
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
| true | true |
79007a6d9fdbe416ec0c8aa48e4291efd2ef1356 | 17,973 | py | Python | fairseq/tasks/translation.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 33 | 2021-01-06T18:03:55.000Z | 2022-03-28T12:07:44.000Z | fairseq/tasks/translation.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 8 | 2021-06-11T03:11:37.000Z | 2022-03-08T19:15:42.000Z | fairseq/tasks/translation.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 14 | 2021-05-17T06:55:01.000Z | 2022-03-28T12:07:42.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| 40.028953 | 95 | 0.587492 |
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| true | true |
79007ab135d543a36f1977f20c5a713eb3864f01 | 12,134 | py | Python | video_pose_ed.py | hiepnth/people-counting-pose | 8cdaab5281847c296b305643842053d496e2e4e8 | [
"Apache-2.0"
] | 161 | 2018-02-22T15:15:47.000Z | 2022-02-10T16:40:06.000Z | video_pose_ed.py | hiepnth/people-counting-pose | 8cdaab5281847c296b305643842053d496e2e4e8 | [
"Apache-2.0"
] | 15 | 2018-03-01T23:18:00.000Z | 2021-05-15T06:23:15.000Z | video_pose_ed.py | hiepnth/people-counting-pose | 8cdaab5281847c296b305643842053d496e2e4e8 | [
"Apache-2.0"
] | 41 | 2018-03-01T13:03:54.000Z | 2022-02-17T14:32:22.000Z | import sys
import os
import math
import imageio
from moviepy.editor import *
import time
def read_video(video_name):
# Read video from file
video_name_input = 'testset/' + video_name
video = VideoFileClip(video_name_input)
return video
def video2frame(video_name):
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
if not os.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
video.save_frame('testset/' + video_name + '/frame_' + str(i).zfill(video_frame_ciphers) + '.jpg', i/video.fps)
def video2poseframe(video_name):
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
################
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
if not os.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
######################
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
print('person_conf_multi: ')
print(type(person_conf_multi))
print(person_conf_multi)
# Add library to save image
image_img = Image.fromarray(image)
# Save image with points of pose
draw = ImageDraw.Draw(image_img)
people_num = 0
point_num = 17
print('person_conf_multi.size: ')
print(person_conf_multi.size)
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ')
print(people_num)
point_i = 0 # index of points
point_r = 5 # radius of points
people_real_num = 0
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_count = 0
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
if point_count > 5: # If there are more than 5 point in person, we define he/she is REAL PERSON
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
draw.ellipse((person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r), fill=point_color)
print('people_real_num: ')
print(people_real_num)
video_name_result = 'testset/' + video_name + '/frame_pose_' + str(i).zfill(video_frame_ciphers) + '.jpg'
image_img.save(video_name_result, "JPG")
def video2posevideo(video_name):
time_start = time.clock()
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 24)
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
################
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
pose_frame_list = []
point_r = 3 # radius of points
point_min = 10 # threshold of points - If there are more than point_min points in person, we define he/she is REAL PERSON
part_min = 3 # threshold of parts - If there are more than part_min parts in person, we define he/she is REAL PERSON / part means head, arm and leg
point_num = 17 # There are 17 points in 1 person
def ellipse_set(person_conf_multi, people_i, point_i):
return (person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r)
def line_set(person_conf_multi, people_i, point_i, point_j):
return (person_conf_multi[people_i][point_i][0], person_conf_multi[people_i][point_i][1], person_conf_multi[people_i][point_j][0], person_conf_multi[people_i][point_j][1])
def draw_ellipse_and_line(draw, person_conf_multi, people_i, a, b, c, point_color):
draw.ellipse(ellipse_set(person_conf_multi, people_i, a), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, b), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, c), fill=point_color)
draw.line(line_set(person_conf_multi, people_i, a, b), fill=point_color, width=5)
draw.line(line_set(person_conf_multi, people_i, b, c), fill=point_color, width=5)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
######################
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
# print('person_conf_multi: ')
# print(type(person_conf_multi))
# print(person_conf_multi)
# Add library to save image
image_img = Image.fromarray(image)
# Save image with points of pose
draw = ImageDraw.Draw(image_img)
people_num = 0
people_real_num = 0
people_part_num = 0
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ' + str(people_num))
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_list = []
point_count = 0
point_i = 0 # index of points
part_count = 0 # count of parts in THAT person
# To find rectangle which include that people - list of points x, y coordinates
people_x = []
people_y = []
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
point_list.append(point_i)
# Draw each parts
if (5 in point_list) and (7 in point_list) and (9 in point_list): # Draw left arm
draw_ellipse_and_line(draw, person_conf_multi, people_i, 5, 7, 9, point_color)
part_count = part_count + 1
if (6 in point_list) and (8 in point_list) and (10 in point_list): # Draw right arm
draw_ellipse_and_line(draw, person_conf_multi, people_i, 6, 8, 10, point_color)
part_count = part_count + 1
if (11 in point_list) and (13 in point_list) and (15 in point_list): # Draw left leg
draw_ellipse_and_line(draw, person_conf_multi, people_i, 11, 13, 15, point_color)
part_count = part_count + 1
if (12 in point_list) and (14 in point_list) and (16 in point_list): # Draw right leg
draw_ellipse_and_line(draw, person_conf_multi, people_i, 12, 14, 16, point_color)
part_count = part_count + 1
if point_count >= point_min:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
people_x.append(person_conf_multi[people_i][point_i][0])
people_y.append(person_conf_multi[people_i][point_i][1])
# Draw rectangle which include that people
draw.rectangle([min(people_x), min(people_y), max(people_x), max(people_y)], fill=point_color, outline=5)
if part_count >= part_min:
people_part_num = people_part_num + 1
draw.text((0, 0), 'People(by point): ' + str(people_real_num) + ' (threshold = ' + str(point_min) + ')', (0,0,0), font=font)
draw.text((0, 32), 'People(by line): ' + str(people_part_num) + ' (threshold = ' + str(part_min) + ')', (0,0,0), font=font)
draw.text((0, 64), 'Frame: ' + str(i) + '/' + str(video_frame_number), (0,0,0), font=font)
draw.text((0, 96), 'Total time required: ' + str(round(time.clock() - time_start, 1)) + 'sec', (0,0,0))
print('people_real_num: ' + str(people_real_num))
print('people_part_num: ' + str(people_part_num))
print('frame: ' + str(i))
image_img_numpy = np.asarray(image_img)
pose_frame_list.append(image_img_numpy)
video_pose = ImageSequenceClip(pose_frame_list, fps=video.fps)
video_pose.write_videofile("testset/" + video_name + "_pose.mp4", fps=video.fps)
print("Time(s): " + str(time.clock() - time_start))
| 41.554795 | 256 | 0.659469 | import sys
import os
import math
import imageio
from moviepy.editor import *
import time
def read_video(video_name):
video_name_input = 'testset/' + video_name
video = VideoFileClip(video_name_input)
return video
def video2frame(video_name):
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) log(video_frame_number, 10)) s.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
video.save_frame('testset/' + video_name + '/frame_' + str(i).zfill(video_frame_ciphers) + '.jpg', i/video.fps)
def video2poseframe(video_name):
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
0)) s.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
s)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
print('person_conf_multi: ')
print(type(person_conf_multi))
print(person_conf_multi)
image_img = Image.fromarray(image)
draw = ImageDraw.Draw(image_img)
people_num = 0
point_num = 17
print('person_conf_multi.size: ')
print(person_conf_multi.size)
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ')
print(people_num)
point_i = 0
point_r = 5
people_real_num = 0
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_count = 0
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0:
point_count = point_count + 1
if point_count > 5:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
draw.ellipse((person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r), fill=point_color)
print('people_real_num: ')
print(people_real_num)
video_name_result = 'testset/' + video_name + '/frame_pose_' + str(i).zfill(video_frame_ciphers) + '.jpg'
image_img.save(video_name_result, "JPG")
def video2posevideo(video_name):
time_start = time.clock()
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 24)
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
0)) me_list = []
point_r = 3
point_min = 10
part_min = 3
point_num = 17
def ellipse_set(person_conf_multi, people_i, point_i):
return (person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r)
def line_set(person_conf_multi, people_i, point_i, point_j):
return (person_conf_multi[people_i][point_i][0], person_conf_multi[people_i][point_i][1], person_conf_multi[people_i][point_j][0], person_conf_multi[people_i][point_j][1])
def draw_ellipse_and_line(draw, person_conf_multi, people_i, a, b, c, point_color):
draw.ellipse(ellipse_set(person_conf_multi, people_i, a), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, b), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, c), fill=point_color)
draw.line(line_set(person_conf_multi, people_i, a, b), fill=point_color, width=5)
draw.line(line_set(person_conf_multi, people_i, b, c), fill=point_color, width=5)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
s)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
image_img = Image.fromarray(image)
draw = ImageDraw.Draw(image_img)
people_num = 0
people_real_num = 0
people_part_num = 0
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ' + str(people_num))
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_list = []
point_count = 0
point_i = 0
part_count = 0
people_x = []
people_y = []
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0:
point_count = point_count + 1
point_list.append(point_i)
if (5 in point_list) and (7 in point_list) and (9 in point_list):
draw_ellipse_and_line(draw, person_conf_multi, people_i, 5, 7, 9, point_color)
part_count = part_count + 1
if (6 in point_list) and (8 in point_list) and (10 in point_list):
draw_ellipse_and_line(draw, person_conf_multi, people_i, 6, 8, 10, point_color)
part_count = part_count + 1
if (11 in point_list) and (13 in point_list) and (15 in point_list):
draw_ellipse_and_line(draw, person_conf_multi, people_i, 11, 13, 15, point_color)
part_count = part_count + 1
if (12 in point_list) and (14 in point_list) and (16 in point_list):
draw_ellipse_and_line(draw, person_conf_multi, people_i, 12, 14, 16, point_color)
part_count = part_count + 1
if point_count >= point_min:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0:
draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
people_x.append(person_conf_multi[people_i][point_i][0])
people_y.append(person_conf_multi[people_i][point_i][1])
draw.rectangle([min(people_x), min(people_y), max(people_x), max(people_y)], fill=point_color, outline=5)
if part_count >= part_min:
people_part_num = people_part_num + 1
draw.text((0, 0), 'People(by point): ' + str(people_real_num) + ' (threshold = ' + str(point_min) + ')', (0,0,0), font=font)
draw.text((0, 32), 'People(by line): ' + str(people_part_num) + ' (threshold = ' + str(part_min) + ')', (0,0,0), font=font)
draw.text((0, 64), 'Frame: ' + str(i) + '/' + str(video_frame_number), (0,0,0), font=font)
draw.text((0, 96), 'Total time required: ' + str(round(time.clock() - time_start, 1)) + 'sec', (0,0,0))
print('people_real_num: ' + str(people_real_num))
print('people_part_num: ' + str(people_part_num))
print('frame: ' + str(i))
image_img_numpy = np.asarray(image_img)
pose_frame_list.append(image_img_numpy)
video_pose = ImageSequenceClip(pose_frame_list, fps=video.fps)
video_pose.write_videofile("testset/" + video_name + "_pose.mp4", fps=video.fps)
print("Time(s): " + str(time.clock() - time_start))
| true | true |
79007ac162f88b3758837ebcbe8797a0418c6cbd | 13,364 | py | Python | pipuchun/jsonuz.py | SarvarRaxmonov/december-2 | 2242504ebc3d80ef3c3574f360014854ae6b4191 | [
"BSD-2-Clause"
] | null | null | null | pipuchun/jsonuz.py | SarvarRaxmonov/december-2 | 2242504ebc3d80ef3c3574f360014854ae6b4191 | [
"BSD-2-Clause"
] | null | null | null | pipuchun/jsonuz.py | SarvarRaxmonov/december-2 | 2242504ebc3d80ef3c3574f360014854ae6b4191 | [
"BSD-2-Clause"
] | null | null | null |
from attr import dataclass
#s4 teng https://t.me/shuraim1/https:/
#S5 teng https://t.me/alquran30juzsaadalghamidi/5
#s6 teng https://t.me/bandar_abdulaziz_balilah/5
#s7 teng https://t.me/Idriss_Akbar/388
#s8 teng https://t.me/yasseraldosari_mp3/2
sura = {
'0': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'1': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'2': {'s1':'43', 's2':'258', 's3':'19', 's4':'4', 's5':'6', 's6':'6', 's7':'389', 's8':'3',},
'3': {'s1':'44', 's2':'259', 's3':'20', 's4':'5', 's5':'7', 's6':'7', 's7':'390', 's8':'4',},
'4': {'s1':'45', 's2':'260', 's3':'21', 's4':'6', 's5':'8', 's6':'8', 's7':'391', 's8':'5',},
'5': {'s1':'46', 's2':'261', 's3':'22', 's4':'7', 's5':'9', 's6':'9', 's7':'392', 's8':'6',},
'6': {'s1':'47', 's2':'262', 's3':'23', 's4':'8', 's5':'10', 's6':'10', 's7':'393', 's8':'7',},
'7': {'s1':'48', 's2':'263', 's3':'24', 's4':'9', 's5':'11', 's6':'11', 's7':'394', 's8':'8',},
'8': {'s1':'49', 's2':'264', 's3':'25', 's4':'10', 's5':'12', 's6':'12', 's7':'395', 's8':'9',},
'9': {'s1':'50', 's2':'265', 's3':'26', 's4':'11', 's5':'13', 's6':'13', 's7':'396', 's8':'10',},
'10': {'s1':'51', 's2':'266', 's3':'27', 's4':'12', 's5':'14', 's6':'14', 's7':'397', 's8':'11',},
'11': {'s1': '52', 's2':'267', 's3':'28', 's4':'13', 's5':'15', 's6':'15', 's7':'398', 's8':'12',},
'12': {'s1':'53', 's2':'268', 's3':'29', 's4':'14', 's5':'16', 's6':'16', 's7':'399', 's8':'13',},
'13': {'s1': '54', 's2':'269', 's3':'30', 's4':'15', 's5':'17', 's6':'17', 's7':'401', 's8':'14',},
'14': {'s1':'55', 's2':'270', 's3':'31', 's4':'16', 's5':'18', 's6':'18', 's7':'402', 's8':'15',},
'15': {'s1':'56', 's2':'271', 's3':'32', 's4':'17', 's5':'19', 's6':'19', 's7':'403', 's8':'16',},
'16': {'s1':'59', 's2':'272', 's3':'33', 's4':'18', 's5':'20', 's6':'20', 's7':'404', 's8':'17',},
'17': {'s1':'60', 's2':'273', 's3':'34', 's4':'19', 's5':'21', 's6':'21', 's7':'405', 's8':'18',},
'18' : {'s1':'61', 's2':'274', 's3':'35', 's4':'20', 's5':'22', 's6':'22', 's7':'406', 's8':'19',},
'19': {'s1':'62', 's2':'275', 's3':'36', 's4':'21', 's5':'23', 's6':'23', 's7':'407', 's8':'20',},
'20': {'s1':'63', 's2':'276', 's3':'37', 's4':'22', 's5':'24', 's6':'24', 's7':'408', 's8':'21',},
'21': {'s1':'64', 's2':'277', 's3':'38', 's4':'23', 's5':'25', 's6':'25', 's7':'409', 's8':'22',},
'22': {'s1':'65', 's2':'278', 's3':'39', 's4':'24', 's5':'26', 's6':'26', 's7':'410', 's8':'23',},
'23': {'s1':'66', 's2':'279', 's3':'40', 's4':'25', 's5':'27', 's6':'27', 's7':'411', 's8':'24',},
'24': {'s1':'67', 's2':'280', 's3':'41', 's4':'26', 's5':'28', 's6':'28', 's7':'412', 's8':'25',},
'25': {'s1':'68', 's2':'281', 's3':'42', 's4':'27', 's5':'29', 's6':'29', 's7':'413', 's8':'26',},
'26': {'s1':'69', 's2':'282', 's3':'43', 's4':'28', 's5':'30', 's6':'30', 's7':'414', 's8':'27',},
'27': {'s1':'70', 's2':'283', 's3':'44', 's4':'29', 's5':'31', 's6':'31', 's7':'415', 's8':'28',},
'28': {'s1':'71', 's2':'284', 's3':'45', 's4':'30', 's5':'32', 's6':'32', 's7':'416', 's8':'29',},
'29': {'s1':'72', 's2':'285', 's3':'46', 's4':'31', 's5':'33', 's6':'33', 's7':'417', 's8':'30',},
'30': {'s1':'73', 's2':'286', 's3':'47', 's4':'32', 's5':'34', 's6':'34', 's7':'418', 's8':'31',},
'31': {'s1':'74', 's2':'287', 's3':'48', 's4':'33', 's5':'35', 's6':'35', 's7':'419', 's8':'32',},
'32': {'s1':'75', 's2':'288', 's3':'49', 's4':'34', 's5':'36', 's6':'36', 's7':'420', 's8':'33',},
'33': {'s1':'76', 's2':'289', 's3':'50', 's4':'35', 's5':'37', 's6':'37', 's7':'421', 's8':'34',},
'34': {'s1':'77', 's2':'290', 's3':'51', 's4':'36', 's5':'38', 's6':'38', 's7':'422', 's8':'35',},
'35': {'s1':'78', 's2':'291', 's3':'52', 's4':'37', 's5':'39', 's6':'39', 's7':'423', 's8':'36',},
'36': {'s1':'79', 's2':'292', 's3':'53', 's4':'38', 's5':'40', 's6':'40', 's7':'424', 's8':'37',},
'37': {'s1':'80', 's2':'293', 's3':'54', 's4':'39', 's5':'41', 's6':'41', 's7':'425', 's8':'38',},
'38': {'s1':'81', 's2':'294', 's3':'55', 's4':'40', 's5':'42', 's6':'42', 's7':'426', 's8':'39',},
'39': {'s1':'82', 's2':'295', 's3':'56', 's4':'41', 's5':'43', 's6':'43', 's7':'427', 's8':'40',},
'40': {'s1':'83', 's2':'296', 's3':'57', 's4':'42', 's5':'44', 's6':'44', 's7':'428', 's8':'41',},
'41': {'s1':'84', 's2':'297', 's3':'58', 's4':'43', 's5':'45', 's6':'45', 's7':'429', 's8':'42',},
'42': {'s1':'85', 's2':'298', 's3':'59', 's4':'44', 's5':'46', 's6':'46', 's7':'430', 's8':'43',},
'43': {'s1':'86', 's2':'299', 's3':'60', 's4':'45', 's5':'47', 's6':'47', 's7':'431', 's8':'44',},
'44': {'s1':'87', 's2':'300', 's3':'61', 's4':'46', 's5':'48', 's6':'48', 's7':'432', 's8':'45',},
'45': {'s1':'88', 's2':'301', 's3':'62', 's4':'47', 's5':'49', 's6':'49', 's7':'433', 's8':'46',},
'46': {'s1':'89', 's2':'302', 's3':'63', 's4':'48', 's5':'50', 's6':'50', 's7':'434', 's8':'47',},
'47': {'s1':'90', 's2':'303', 's3':'64', 's4':'49', 's5':'51', 's6':'51', 's7':'435', 's8':'48',},
'48': {'s1':'91', 's2':'304', 's3':'65', 's4':'50', 's5':'52', 's6':'52', 's7':'436', 's8':'49',},
'49': {'s1':'92', 's2':'305', 's3':'66', 's4':'51', 's5':'53', 's6':'53', 's7':'437', 's8':'50',},
'50': {'s1':'93', 's2':'306', 's3':'67', 's4':'52', 's5':'54', 's6':'54', 's7':'438', 's8':'51',},
'51': {'s1':'94', 's2':'307', 's3':'68', 's4':'53', 's5':'55', 's6':'55', 's7':'439', 's8':'52',},
'52': {'s1':'95', 's2':'308', 's3':'69', 's4':'54', 's5':'56', 's6':'56', 's7':'440', 's8':'53',},
'53': {'s1':'96', 's2':'309', 's3':'70', 's4':'55', 's5':'57', 's6':'57', 's7':'441', 's8':'54',},
'54': {'s1':'97', 's2':'310', 's3':'71', 's4':'56', 's5':'58', 's6':'58', 's7':'442', 's8':'55',},
'55': {'s1':'98', 's2':'311', 's3':'72', 's4':'57', 's5':'59', 's6':'59', 's7':'443', 's8':'56',},
'56': {'s1':'99', 's2':'312', 's3':'73', 's4':'58', 's5':'60', 's6':'60', 's7':'444', 's8':'57',},
'57': {'s1':'100', 's2':'313', 's3':'74', 's4':'59', 's5':'61', 's6':'61', 's7':'445', 's8':'58',},
'58': {'s1':'101', 's2':'314', 's3':'75', 's4':'60', 's5':'62', 's6':'62', 's7':'446', 's8':'59',},
'59': {'s1':'102', 's2':'315', 's3':'76', 's4':'61', 's5':'63', 's6':'63', 's7':'447', 's8':'60',},
'60': {'s1':'103', 's2':'316', 's3':'77', 's4':'62', 's5':'64', 's6':'64', 's7':'448', 's8':'61',},
#61 inlinekeyboard starts in here
'61': {'s1':'104', 's2':'317', 's3':'78', 's4':'63', 's5':'65', 's6':'65', 's7':'449', 's8':'62',},
'62': {'s1':'105', 's2':'318', 's3':'79', 's4':'64', 's5':'66', 's6':'66', 's7':'450', 's8':'63',},
'63': {'s1':'106', 's2':'319', 's3':'80', 's4':'65', 's5':'67', 's6':'67', 's7':'451', 's8':'64',},
'64': {'s1':'107', 's2':'320', 's3':'81', 's4':'66', 's5':'68', 's6':'68', 's7':'452', 's8':'65',},
'65': {'s1':'108', 's2':'321', 's3':'82', 's4':'67', 's5':'69', 's6':'69', 's7':'453', 's8':'66',},
'66': {'s1':'109', 's2':'322', 's3':'83', 's4':'68', 's5':'70', 's6':'70', 's7':'454', 's8':'67',},
'67': {'s1':'110', 's2':'323', 's3':'84', 's4':'69', 's5':'71', 's6':'72', 's7':'455', 's8':'68',},
'68': {'s1':'111', 's2':'324', 's3':'85', 's4':'70', 's5':'72', 's6':'73', 's7':'456', 's8':'69',},
'69': {'s1':'112', 's2':'325', 's3':'86', 's4':'71', 's5':'73', 's6':'74', 's7':'457', 's8':'70',},
'70': {'s1':'113', 's2':'326', 's3':'87', 's4':'72', 's5':'74', 's6':'75', 's7':'458', 's8':'71',},
'71': {'s1':'114', 's2':'327', 's3':'88', 's4':'73', 's5':'75', 's6':'76', 's7':'459', 's8':'72',},
'72': {'s1':'115', 's2':'328', 's3':'89', 's4':'74', 's5':'76', 's6':'77', 's7':'460', 's8':'73',},
'73': {'s1':'116', 's2':'329', 's3':'90', 's4':'75', 's5':'77', 's6':'78', 's7':'461', 's8':'74',},
'74': {'s1':'117', 's2':'330', 's3':'91', 's4':'76', 's5':'78', 's6':'79', 's7':'462', 's8':'75',},
'75': {'s1':'118', 's2':'331', 's3':'92', 's4':'77', 's5':'79', 's6':'80', 's7':'463', 's8':'76',},
'76': {'s1':'119', 's2':'332', 's3':'93', 's4':'78', 's5':'80', 's6':'81', 's7':'464', 's8':'77',},
'77': {'s1':'120', 's2':'333', 's3':'94', 's4':'79', 's5':'81', 's6':'82', 's7':'465', 's8':'78',},
'78': {'s1':'121', 's2':'334', 's3':'95', 's4':'80', 's5':'82', 's6':'83', 's7':'466', 's8':'79',},
'79': {'s1':'122', 's2':'335', 's3':'96', 's4':'81', 's5':'83', 's6':'84', 's7':'467', 's8':'80',},
'80': {'s1':'123', 's2':'336', 's3':'97', 's4':'82', 's5':'84', 's6':'85', 's7':'468', 's8':'81',},
'81': {'s1':'124', 's2':'337', 's3':'98', 's4':'83', 's5':'85', 's6':'86', 's7':'469', 's8':'82',},
'82': {'s1':'125', 's2':'338', 's3':'99', 's4':'84', 's5':'86', 's6':'87', 's7':'470', 's8':'83',},
'83': {'s1':'126', 's2':'339', 's3':'100', 's4':'85', 's5':'87', 's6':'88', 's7':'471', 's8':'84',},
'84': {'s1':'127', 's2':'340', 's3':'101', 's4':'86', 's5':'88', 's6':'89', 's7':'472', 's8':'85',},
'85': {'s1':'128', 's2':'341', 's3':'102', 's4':'87', 's5':'89', 's6':'90', 's7':'473', 's8':'86',},
'86': {'s1':'129', 's2':'342', 's3':'103', 's4':'88', 's5':'90', 's6':'91', 's7':'474', 's8':'87',},
'87': {'s1':'130', 's2':'343', 's3':'104', 's4':'89', 's5':'91', 's6':'92', 's7':'475', 's8':'88',},
'88': {'s1':'131', 's2':'344', 's3':'105', 's4':'90', 's5':'92', 's6':'93', 's7':'476', 's8':'89',},
'89': {'s1':'132', 's2':'345', 's3':'106', 's4':'91', 's5':'93', 's6':'94', 's7':'477', 's8':'90',},
'90': {'s1':'133', 's2':'346', 's3':'107', 's4':'92', 's5':'94', 's6':'95', 's7':'478', 's8':'91',},
'91': {'s1':'134', 's2':'347', 's3':'108', 's4':'93', 's5':'95', 's6':'96', 's7':'479', 's8':'92',},
'92': {'s1':'135', 's2':'348', 's3':'109', 's4':'94', 's5':'96', 's6':'97', 's7':'480', 's8':'93',},
'93': {'s1':'136', 's2':'349', 's3':'110', 's4':'95', 's5':'97', 's6':'98', 's7':'481', 's8':'94',},
'94': {'s1':'137', 's2':'350', 's3':'111', 's4':'96', 's5':'98', 's6':'99', 's7':'482', 's8':'95',},
'95': {'s1':'138', 's2':'351', 's3':'112', 's4':'97', 's5':'99', 's6':'100', 's7':'483', 's8':'96',},
'96': {'s1':'139', 's2':'352', 's3':'113', 's4':'98', 's5':'100', 's6':'101', 's7':'484', 's8':'97',},
'97': {'s1':'140', 's2':'353', 's3':'114', 's4':'99', 's5':'101', 's6':'102', 's7':'485', 's8':'98',},
'98': {'s1':'141', 's2':'354', 's3':'115', 's4':'100', 's5':'102', 's6':'103', 's7':'486', 's8':'99',},
'99': {'s1':'142', 's2':'355', 's3':'116', 's4':'101', 's5':'103', 's6':'104', 's7':'487', 's8':'100',},
'100': {'s1':'143', 's2':'356', 's3':'117', 's4':'102', 's5':'104', 's6':'105', 's7':'488', 's8':'101',},
'101': {'s1':'144', 's2':'357', 's3':'118', 's4':'103', 's5':'105', 's6':'106', 's7':'489', 's8':'102',},
'102': {'s1':'145', 's2':'358', 's3':'119', 's4':'104', 's5':'106', 's6':'107', 's7':'490', 's8':'103',},
'103': {'s1':'146', 's2':'359', 's3':'120', 's4':'105', 's5':'107', 's6':'108', 's7':'491', 's8':'104',},
'104': {'s1':'147', 's2':'360', 's3':'121', 's4':'106', 's5':'108', 's6':'109', 's7':'492', 's8':'105',},
'105': {'s1':'148', 's2':'361', 's3':'122', 's4':'107', 's5':'109', 's6':'110', 's7':'493', 's8':'106',},
'106': {'s1':'149', 's2':'362', 's3':'123', 's4':'108', 's5':'110', 's6':'111', 's7':'494', 's8':'107',},
'107': {'s1':'150', 's2':'363', 's3':'124', 's4':'109', 's5':'111', 's6':'112', 's7':'495', 's8':'108',},
'108': {'s1':'151', 's2':'364', 's3':'125', 's4':'110', 's5':'112', 's6':'113', 's7':'496', 's8':'109',},
'109': {'s1':'152', 's2':'365', 's3':'126', 's4':'111', 's5':'113', 's6':'114', 's7':'497', 's8':'110',},
'110': {'s1':'153', 's2':'366', 's3':'127', 's4':'112', 's5':'114', 's6':'115', 's7':'498', 's8':'111',},
'111': {'s1':'154', 's2':'367', 's3':'128', 's4':'113', 's5':'115', 's6':'116', 's7':'499', 's8':'112',},
'112': {'s1':'155', 's2':'368', 's3':'129', 's4':'114', 's5':'116', 's6':'117', 's7':'500', 's8':'113',},
'113': {'s1':'156', 's2':'369', 's3':'130', 's4':'115', 's5':'117', 's6':'118', 's7':'501', 's8':'114',},
'114': {'s1':'157', 's2':'370', 's3':'131', 's4':'116', 's5':'118', 's6':'119', 's7':'502', 's8':'115',}
}
bbc = {'22':'11', 'n':{
'55':'56',
'55':'58',
'55':'59',
'55':'555',
}}
hmuchun = {
'hm5':{
'rep':257,
'rep2':287,
},
'hm6':{
'rep':288,
'rep2':317,
},
'hm7':{
'rep':317,
'rep2':347,
},
'hm8':{
'rep':347,
'rep2':371,
},
'hm9':{
'rep':18,
'rep2':48,
},
'hm10':{
'rep':48,
'rep2':78,
},
'hm11':{
'rep':78,
'rep2':108,
},
'hm12':{
'rep':108,
'rep2':137,
},
}
| 74.244444 | 112 | 0.348623 |
from attr import dataclass
sura = {
'0': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'1': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'2': {'s1':'43', 's2':'258', 's3':'19', 's4':'4', 's5':'6', 's6':'6', 's7':'389', 's8':'3',},
'3': {'s1':'44', 's2':'259', 's3':'20', 's4':'5', 's5':'7', 's6':'7', 's7':'390', 's8':'4',},
'4': {'s1':'45', 's2':'260', 's3':'21', 's4':'6', 's5':'8', 's6':'8', 's7':'391', 's8':'5',},
'5': {'s1':'46', 's2':'261', 's3':'22', 's4':'7', 's5':'9', 's6':'9', 's7':'392', 's8':'6',},
'6': {'s1':'47', 's2':'262', 's3':'23', 's4':'8', 's5':'10', 's6':'10', 's7':'393', 's8':'7',},
'7': {'s1':'48', 's2':'263', 's3':'24', 's4':'9', 's5':'11', 's6':'11', 's7':'394', 's8':'8',},
'8': {'s1':'49', 's2':'264', 's3':'25', 's4':'10', 's5':'12', 's6':'12', 's7':'395', 's8':'9',},
'9': {'s1':'50', 's2':'265', 's3':'26', 's4':'11', 's5':'13', 's6':'13', 's7':'396', 's8':'10',},
'10': {'s1':'51', 's2':'266', 's3':'27', 's4':'12', 's5':'14', 's6':'14', 's7':'397', 's8':'11',},
'11': {'s1': '52', 's2':'267', 's3':'28', 's4':'13', 's5':'15', 's6':'15', 's7':'398', 's8':'12',},
'12': {'s1':'53', 's2':'268', 's3':'29', 's4':'14', 's5':'16', 's6':'16', 's7':'399', 's8':'13',},
'13': {'s1': '54', 's2':'269', 's3':'30', 's4':'15', 's5':'17', 's6':'17', 's7':'401', 's8':'14',},
'14': {'s1':'55', 's2':'270', 's3':'31', 's4':'16', 's5':'18', 's6':'18', 's7':'402', 's8':'15',},
'15': {'s1':'56', 's2':'271', 's3':'32', 's4':'17', 's5':'19', 's6':'19', 's7':'403', 's8':'16',},
'16': {'s1':'59', 's2':'272', 's3':'33', 's4':'18', 's5':'20', 's6':'20', 's7':'404', 's8':'17',},
'17': {'s1':'60', 's2':'273', 's3':'34', 's4':'19', 's5':'21', 's6':'21', 's7':'405', 's8':'18',},
'18' : {'s1':'61', 's2':'274', 's3':'35', 's4':'20', 's5':'22', 's6':'22', 's7':'406', 's8':'19',},
'19': {'s1':'62', 's2':'275', 's3':'36', 's4':'21', 's5':'23', 's6':'23', 's7':'407', 's8':'20',},
'20': {'s1':'63', 's2':'276', 's3':'37', 's4':'22', 's5':'24', 's6':'24', 's7':'408', 's8':'21',},
'21': {'s1':'64', 's2':'277', 's3':'38', 's4':'23', 's5':'25', 's6':'25', 's7':'409', 's8':'22',},
'22': {'s1':'65', 's2':'278', 's3':'39', 's4':'24', 's5':'26', 's6':'26', 's7':'410', 's8':'23',},
'23': {'s1':'66', 's2':'279', 's3':'40', 's4':'25', 's5':'27', 's6':'27', 's7':'411', 's8':'24',},
'24': {'s1':'67', 's2':'280', 's3':'41', 's4':'26', 's5':'28', 's6':'28', 's7':'412', 's8':'25',},
'25': {'s1':'68', 's2':'281', 's3':'42', 's4':'27', 's5':'29', 's6':'29', 's7':'413', 's8':'26',},
'26': {'s1':'69', 's2':'282', 's3':'43', 's4':'28', 's5':'30', 's6':'30', 's7':'414', 's8':'27',},
'27': {'s1':'70', 's2':'283', 's3':'44', 's4':'29', 's5':'31', 's6':'31', 's7':'415', 's8':'28',},
'28': {'s1':'71', 's2':'284', 's3':'45', 's4':'30', 's5':'32', 's6':'32', 's7':'416', 's8':'29',},
'29': {'s1':'72', 's2':'285', 's3':'46', 's4':'31', 's5':'33', 's6':'33', 's7':'417', 's8':'30',},
'30': {'s1':'73', 's2':'286', 's3':'47', 's4':'32', 's5':'34', 's6':'34', 's7':'418', 's8':'31',},
'31': {'s1':'74', 's2':'287', 's3':'48', 's4':'33', 's5':'35', 's6':'35', 's7':'419', 's8':'32',},
'32': {'s1':'75', 's2':'288', 's3':'49', 's4':'34', 's5':'36', 's6':'36', 's7':'420', 's8':'33',},
'33': {'s1':'76', 's2':'289', 's3':'50', 's4':'35', 's5':'37', 's6':'37', 's7':'421', 's8':'34',},
'34': {'s1':'77', 's2':'290', 's3':'51', 's4':'36', 's5':'38', 's6':'38', 's7':'422', 's8':'35',},
'35': {'s1':'78', 's2':'291', 's3':'52', 's4':'37', 's5':'39', 's6':'39', 's7':'423', 's8':'36',},
'36': {'s1':'79', 's2':'292', 's3':'53', 's4':'38', 's5':'40', 's6':'40', 's7':'424', 's8':'37',},
'37': {'s1':'80', 's2':'293', 's3':'54', 's4':'39', 's5':'41', 's6':'41', 's7':'425', 's8':'38',},
'38': {'s1':'81', 's2':'294', 's3':'55', 's4':'40', 's5':'42', 's6':'42', 's7':'426', 's8':'39',},
'39': {'s1':'82', 's2':'295', 's3':'56', 's4':'41', 's5':'43', 's6':'43', 's7':'427', 's8':'40',},
'40': {'s1':'83', 's2':'296', 's3':'57', 's4':'42', 's5':'44', 's6':'44', 's7':'428', 's8':'41',},
'41': {'s1':'84', 's2':'297', 's3':'58', 's4':'43', 's5':'45', 's6':'45', 's7':'429', 's8':'42',},
'42': {'s1':'85', 's2':'298', 's3':'59', 's4':'44', 's5':'46', 's6':'46', 's7':'430', 's8':'43',},
'43': {'s1':'86', 's2':'299', 's3':'60', 's4':'45', 's5':'47', 's6':'47', 's7':'431', 's8':'44',},
'44': {'s1':'87', 's2':'300', 's3':'61', 's4':'46', 's5':'48', 's6':'48', 's7':'432', 's8':'45',},
'45': {'s1':'88', 's2':'301', 's3':'62', 's4':'47', 's5':'49', 's6':'49', 's7':'433', 's8':'46',},
'46': {'s1':'89', 's2':'302', 's3':'63', 's4':'48', 's5':'50', 's6':'50', 's7':'434', 's8':'47',},
'47': {'s1':'90', 's2':'303', 's3':'64', 's4':'49', 's5':'51', 's6':'51', 's7':'435', 's8':'48',},
'48': {'s1':'91', 's2':'304', 's3':'65', 's4':'50', 's5':'52', 's6':'52', 's7':'436', 's8':'49',},
'49': {'s1':'92', 's2':'305', 's3':'66', 's4':'51', 's5':'53', 's6':'53', 's7':'437', 's8':'50',},
'50': {'s1':'93', 's2':'306', 's3':'67', 's4':'52', 's5':'54', 's6':'54', 's7':'438', 's8':'51',},
'51': {'s1':'94', 's2':'307', 's3':'68', 's4':'53', 's5':'55', 's6':'55', 's7':'439', 's8':'52',},
'52': {'s1':'95', 's2':'308', 's3':'69', 's4':'54', 's5':'56', 's6':'56', 's7':'440', 's8':'53',},
'53': {'s1':'96', 's2':'309', 's3':'70', 's4':'55', 's5':'57', 's6':'57', 's7':'441', 's8':'54',},
'54': {'s1':'97', 's2':'310', 's3':'71', 's4':'56', 's5':'58', 's6':'58', 's7':'442', 's8':'55',},
'55': {'s1':'98', 's2':'311', 's3':'72', 's4':'57', 's5':'59', 's6':'59', 's7':'443', 's8':'56',},
'56': {'s1':'99', 's2':'312', 's3':'73', 's4':'58', 's5':'60', 's6':'60', 's7':'444', 's8':'57',},
'57': {'s1':'100', 's2':'313', 's3':'74', 's4':'59', 's5':'61', 's6':'61', 's7':'445', 's8':'58',},
'58': {'s1':'101', 's2':'314', 's3':'75', 's4':'60', 's5':'62', 's6':'62', 's7':'446', 's8':'59',},
'59': {'s1':'102', 's2':'315', 's3':'76', 's4':'61', 's5':'63', 's6':'63', 's7':'447', 's8':'60',},
'60': {'s1':'103', 's2':'316', 's3':'77', 's4':'62', 's5':'64', 's6':'64', 's7':'448', 's8':'61',},
'61': {'s1':'104', 's2':'317', 's3':'78', 's4':'63', 's5':'65', 's6':'65', 's7':'449', 's8':'62',},
'62': {'s1':'105', 's2':'318', 's3':'79', 's4':'64', 's5':'66', 's6':'66', 's7':'450', 's8':'63',},
'63': {'s1':'106', 's2':'319', 's3':'80', 's4':'65', 's5':'67', 's6':'67', 's7':'451', 's8':'64',},
'64': {'s1':'107', 's2':'320', 's3':'81', 's4':'66', 's5':'68', 's6':'68', 's7':'452', 's8':'65',},
'65': {'s1':'108', 's2':'321', 's3':'82', 's4':'67', 's5':'69', 's6':'69', 's7':'453', 's8':'66',},
'66': {'s1':'109', 's2':'322', 's3':'83', 's4':'68', 's5':'70', 's6':'70', 's7':'454', 's8':'67',},
'67': {'s1':'110', 's2':'323', 's3':'84', 's4':'69', 's5':'71', 's6':'72', 's7':'455', 's8':'68',},
'68': {'s1':'111', 's2':'324', 's3':'85', 's4':'70', 's5':'72', 's6':'73', 's7':'456', 's8':'69',},
'69': {'s1':'112', 's2':'325', 's3':'86', 's4':'71', 's5':'73', 's6':'74', 's7':'457', 's8':'70',},
'70': {'s1':'113', 's2':'326', 's3':'87', 's4':'72', 's5':'74', 's6':'75', 's7':'458', 's8':'71',},
'71': {'s1':'114', 's2':'327', 's3':'88', 's4':'73', 's5':'75', 's6':'76', 's7':'459', 's8':'72',},
'72': {'s1':'115', 's2':'328', 's3':'89', 's4':'74', 's5':'76', 's6':'77', 's7':'460', 's8':'73',},
'73': {'s1':'116', 's2':'329', 's3':'90', 's4':'75', 's5':'77', 's6':'78', 's7':'461', 's8':'74',},
'74': {'s1':'117', 's2':'330', 's3':'91', 's4':'76', 's5':'78', 's6':'79', 's7':'462', 's8':'75',},
'75': {'s1':'118', 's2':'331', 's3':'92', 's4':'77', 's5':'79', 's6':'80', 's7':'463', 's8':'76',},
'76': {'s1':'119', 's2':'332', 's3':'93', 's4':'78', 's5':'80', 's6':'81', 's7':'464', 's8':'77',},
'77': {'s1':'120', 's2':'333', 's3':'94', 's4':'79', 's5':'81', 's6':'82', 's7':'465', 's8':'78',},
'78': {'s1':'121', 's2':'334', 's3':'95', 's4':'80', 's5':'82', 's6':'83', 's7':'466', 's8':'79',},
'79': {'s1':'122', 's2':'335', 's3':'96', 's4':'81', 's5':'83', 's6':'84', 's7':'467', 's8':'80',},
'80': {'s1':'123', 's2':'336', 's3':'97', 's4':'82', 's5':'84', 's6':'85', 's7':'468', 's8':'81',},
'81': {'s1':'124', 's2':'337', 's3':'98', 's4':'83', 's5':'85', 's6':'86', 's7':'469', 's8':'82',},
'82': {'s1':'125', 's2':'338', 's3':'99', 's4':'84', 's5':'86', 's6':'87', 's7':'470', 's8':'83',},
'83': {'s1':'126', 's2':'339', 's3':'100', 's4':'85', 's5':'87', 's6':'88', 's7':'471', 's8':'84',},
'84': {'s1':'127', 's2':'340', 's3':'101', 's4':'86', 's5':'88', 's6':'89', 's7':'472', 's8':'85',},
'85': {'s1':'128', 's2':'341', 's3':'102', 's4':'87', 's5':'89', 's6':'90', 's7':'473', 's8':'86',},
'86': {'s1':'129', 's2':'342', 's3':'103', 's4':'88', 's5':'90', 's6':'91', 's7':'474', 's8':'87',},
'87': {'s1':'130', 's2':'343', 's3':'104', 's4':'89', 's5':'91', 's6':'92', 's7':'475', 's8':'88',},
'88': {'s1':'131', 's2':'344', 's3':'105', 's4':'90', 's5':'92', 's6':'93', 's7':'476', 's8':'89',},
'89': {'s1':'132', 's2':'345', 's3':'106', 's4':'91', 's5':'93', 's6':'94', 's7':'477', 's8':'90',},
'90': {'s1':'133', 's2':'346', 's3':'107', 's4':'92', 's5':'94', 's6':'95', 's7':'478', 's8':'91',},
'91': {'s1':'134', 's2':'347', 's3':'108', 's4':'93', 's5':'95', 's6':'96', 's7':'479', 's8':'92',},
'92': {'s1':'135', 's2':'348', 's3':'109', 's4':'94', 's5':'96', 's6':'97', 's7':'480', 's8':'93',},
'93': {'s1':'136', 's2':'349', 's3':'110', 's4':'95', 's5':'97', 's6':'98', 's7':'481', 's8':'94',},
'94': {'s1':'137', 's2':'350', 's3':'111', 's4':'96', 's5':'98', 's6':'99', 's7':'482', 's8':'95',},
'95': {'s1':'138', 's2':'351', 's3':'112', 's4':'97', 's5':'99', 's6':'100', 's7':'483', 's8':'96',},
'96': {'s1':'139', 's2':'352', 's3':'113', 's4':'98', 's5':'100', 's6':'101', 's7':'484', 's8':'97',},
'97': {'s1':'140', 's2':'353', 's3':'114', 's4':'99', 's5':'101', 's6':'102', 's7':'485', 's8':'98',},
'98': {'s1':'141', 's2':'354', 's3':'115', 's4':'100', 's5':'102', 's6':'103', 's7':'486', 's8':'99',},
'99': {'s1':'142', 's2':'355', 's3':'116', 's4':'101', 's5':'103', 's6':'104', 's7':'487', 's8':'100',},
'100': {'s1':'143', 's2':'356', 's3':'117', 's4':'102', 's5':'104', 's6':'105', 's7':'488', 's8':'101',},
'101': {'s1':'144', 's2':'357', 's3':'118', 's4':'103', 's5':'105', 's6':'106', 's7':'489', 's8':'102',},
'102': {'s1':'145', 's2':'358', 's3':'119', 's4':'104', 's5':'106', 's6':'107', 's7':'490', 's8':'103',},
'103': {'s1':'146', 's2':'359', 's3':'120', 's4':'105', 's5':'107', 's6':'108', 's7':'491', 's8':'104',},
'104': {'s1':'147', 's2':'360', 's3':'121', 's4':'106', 's5':'108', 's6':'109', 's7':'492', 's8':'105',},
'105': {'s1':'148', 's2':'361', 's3':'122', 's4':'107', 's5':'109', 's6':'110', 's7':'493', 's8':'106',},
'106': {'s1':'149', 's2':'362', 's3':'123', 's4':'108', 's5':'110', 's6':'111', 's7':'494', 's8':'107',},
'107': {'s1':'150', 's2':'363', 's3':'124', 's4':'109', 's5':'111', 's6':'112', 's7':'495', 's8':'108',},
'108': {'s1':'151', 's2':'364', 's3':'125', 's4':'110', 's5':'112', 's6':'113', 's7':'496', 's8':'109',},
'109': {'s1':'152', 's2':'365', 's3':'126', 's4':'111', 's5':'113', 's6':'114', 's7':'497', 's8':'110',},
'110': {'s1':'153', 's2':'366', 's3':'127', 's4':'112', 's5':'114', 's6':'115', 's7':'498', 's8':'111',},
'111': {'s1':'154', 's2':'367', 's3':'128', 's4':'113', 's5':'115', 's6':'116', 's7':'499', 's8':'112',},
'112': {'s1':'155', 's2':'368', 's3':'129', 's4':'114', 's5':'116', 's6':'117', 's7':'500', 's8':'113',},
'113': {'s1':'156', 's2':'369', 's3':'130', 's4':'115', 's5':'117', 's6':'118', 's7':'501', 's8':'114',},
'114': {'s1':'157', 's2':'370', 's3':'131', 's4':'116', 's5':'118', 's6':'119', 's7':'502', 's8':'115',}
}
bbc = {'22':'11', 'n':{
'55':'56',
'55':'58',
'55':'59',
'55':'555',
}}
hmuchun = {
'hm5':{
'rep':257,
'rep2':287,
},
'hm6':{
'rep':288,
'rep2':317,
},
'hm7':{
'rep':317,
'rep2':347,
},
'hm8':{
'rep':347,
'rep2':371,
},
'hm9':{
'rep':18,
'rep2':48,
},
'hm10':{
'rep':48,
'rep2':78,
},
'hm11':{
'rep':78,
'rep2':108,
},
'hm12':{
'rep':108,
'rep2':137,
},
}
| true | true |
79007be5a1da74db592812aa109c49a900c86fac | 5,830 | py | Python | arviz/plots/backends/bokeh/elpdplot.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | 1 | 2020-08-09T00:16:00.000Z | 2020-08-09T00:16:00.000Z | arviz/plots/backends/bokeh/elpdplot.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | arviz/plots/backends/bokeh/elpdplot.py | StanczakDominik/arviz | ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287 | [
"Apache-2.0"
] | null | null | null | """Bokeh ELPDPlot."""
import warnings
import bokeh.plotting as bkp
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource
import bokeh.models.markers as mk
import numpy as np
from . import backend_kwarg_defaults
from .. import show_layout
from ...plot_utils import _scale_fig_size
from ....rcparams import rcParams, _validate_bokeh_marker
def plot_elpd(
ax,
models,
pointwise_data,
numvars,
figsize,
textsize,
plot_kwargs,
markersize,
xlabels,
coord_labels,
xdata,
threshold,
backend_kwargs,
show,
):
"""Bokeh elpd plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
if numvars == 2:
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 1, numvars - 1
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(**backend_kwargs)
ydata = pointwise_data[0] - pointwise_data[1]
_plot_atomic_elpd(
ax, xdata, ydata, *models, threshold, coord_labels, xlabels, True, True, plot_kwargs
)
show_layout(ax, show)
else:
max_plots = (
numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
)
vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
if vars_to_plot < numvars:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of resulting ELPD pairwise plots with these variables, generating only a "
"{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
UserWarning,
)
numvars = vars_to_plot
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 2, numvars - 2
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
ax = []
for row in range(numvars - 1):
ax_row = []
for col in range(numvars - 1):
if row == 0 and col == 0:
ax_first = bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
**backend_kwargs
)
ax_row.append(ax_first)
elif row < col:
ax_row.append(None)
else:
ax_row.append(
bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
x_range=ax_first.x_range,
y_range=ax_first.y_range,
**backend_kwargs
)
)
ax.append(ax_row)
ax = np.array(ax)
for i in range(0, numvars - 1):
var1 = pointwise_data[i]
for j in range(0, numvars - 1):
if j < i:
continue
var2 = pointwise_data[j + 1]
ydata = var1 - var2
_plot_atomic_elpd(
ax[j, i],
xdata,
ydata,
models[i],
models[j + 1],
threshold,
coord_labels,
xlabels,
j == numvars - 2,
i == 0,
plot_kwargs,
)
show_layout(ax, show)
return ax
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
marker_func = getattr(mk, marker)
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = marker_func(
x="xdata", y="ydata", size="sizes", line_color=plot_kwargs.get("color", "black")
)
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.argwhere(bool_ary).squeeze()
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=np.asarray(outlier), y=np.asarray(ydata[outlier]), text=label, text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = "{} - {}".format(model1, model2)
ax_.title = title
| 31.857923 | 100 | 0.521269 | import warnings
import bokeh.plotting as bkp
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource
import bokeh.models.markers as mk
import numpy as np
from . import backend_kwarg_defaults
from .. import show_layout
from ...plot_utils import _scale_fig_size
from ....rcparams import rcParams, _validate_bokeh_marker
def plot_elpd(
ax,
models,
pointwise_data,
numvars,
figsize,
textsize,
plot_kwargs,
markersize,
xlabels,
coord_labels,
xdata,
threshold,
backend_kwargs,
show,
):
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
if numvars == 2:
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 1, numvars - 1
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(**backend_kwargs)
ydata = pointwise_data[0] - pointwise_data[1]
_plot_atomic_elpd(
ax, xdata, ydata, *models, threshold, coord_labels, xlabels, True, True, plot_kwargs
)
show_layout(ax, show)
else:
max_plots = (
numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
)
vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
if vars_to_plot < numvars:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of resulting ELPD pairwise plots with these variables, generating only a "
"{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
UserWarning,
)
numvars = vars_to_plot
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 2, numvars - 2
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
ax = []
for row in range(numvars - 1):
ax_row = []
for col in range(numvars - 1):
if row == 0 and col == 0:
ax_first = bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
**backend_kwargs
)
ax_row.append(ax_first)
elif row < col:
ax_row.append(None)
else:
ax_row.append(
bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
x_range=ax_first.x_range,
y_range=ax_first.y_range,
**backend_kwargs
)
)
ax.append(ax_row)
ax = np.array(ax)
for i in range(0, numvars - 1):
var1 = pointwise_data[i]
for j in range(0, numvars - 1):
if j < i:
continue
var2 = pointwise_data[j + 1]
ydata = var1 - var2
_plot_atomic_elpd(
ax[j, i],
xdata,
ydata,
models[i],
models[j + 1],
threshold,
coord_labels,
xlabels,
j == numvars - 2,
i == 0,
plot_kwargs,
)
show_layout(ax, show)
return ax
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
marker_func = getattr(mk, marker)
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = marker_func(
x="xdata", y="ydata", size="sizes", line_color=plot_kwargs.get("color", "black")
)
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.argwhere(bool_ary).squeeze()
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=np.asarray(outlier), y=np.asarray(ydata[outlier]), text=label, text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = "{} - {}".format(model1, model2)
ax_.title = title
| true | true |
79007c29559197cb19fd892b091706f5a462f53b | 1,108 | py | Python | models/__init__.py | tom-bird/binary-gen-models | a9311d27d74e25eb55d1b06295ac8a121b5c1d7b | [
"MIT"
] | null | null | null | models/__init__.py | tom-bird/binary-gen-models | a9311d27d74e25eb55d1b06295ac8a121b5c1d7b | [
"MIT"
] | null | null | null | models/__init__.py | tom-bird/binary-gen-models | a9311d27d74e25eb55d1b06295ac8a121b5c1d7b | [
"MIT"
] | 1 | 2021-12-23T07:19:01.000Z | 2021-12-23T07:19:01.000Z | from models.flowpp_cifar import CifarFlowPP
from models.rvae import RVAE
from models.modules import init_mode
model_registry = {
# MNIST
'rvae_mnist': lambda **kwargs: RVAE(z_size=16, h_size=40, kl_min=0.1,
x_channels=1, **kwargs),
# CIFAR
'tiny_rvae': lambda **kwargs: RVAE(z_size=2, h_size=2, kl_min=0.1, **kwargs),
'rvae': lambda **kwargs: RVAE(z_size=8, h_size=256, kl_min=0.1, **kwargs),
'tiny_flow_pp': lambda **kwargs: CifarFlowPP(hdim=4, blocks=1, dequant_blocks=1, mix_components=1,
attn_version=False, force_float32_cond=False, **kwargs),
'flow_pp': lambda **kwargs: CifarFlowPP(hdim=120, blocks=10, dequant_blocks=2, mix_components=8, attn_version=False,
force_float32_cond=False, dropout=0.2, **kwargs),
'flow_pp_wide': lambda **kwargs: CifarFlowPP(hdim=180, blocks=10, dequant_blocks=2, mix_components=8, attn_version=False,
force_float32_cond=False, dropout=0.2, **kwargs)
}
| 48.173913 | 125 | 0.611011 | from models.flowpp_cifar import CifarFlowPP
from models.rvae import RVAE
from models.modules import init_mode
model_registry = {
'rvae_mnist': lambda **kwargs: RVAE(z_size=16, h_size=40, kl_min=0.1,
x_channels=1, **kwargs),
'tiny_rvae': lambda **kwargs: RVAE(z_size=2, h_size=2, kl_min=0.1, **kwargs),
'rvae': lambda **kwargs: RVAE(z_size=8, h_size=256, kl_min=0.1, **kwargs),
'tiny_flow_pp': lambda **kwargs: CifarFlowPP(hdim=4, blocks=1, dequant_blocks=1, mix_components=1,
attn_version=False, force_float32_cond=False, **kwargs),
'flow_pp': lambda **kwargs: CifarFlowPP(hdim=120, blocks=10, dequant_blocks=2, mix_components=8, attn_version=False,
force_float32_cond=False, dropout=0.2, **kwargs),
'flow_pp_wide': lambda **kwargs: CifarFlowPP(hdim=180, blocks=10, dequant_blocks=2, mix_components=8, attn_version=False,
force_float32_cond=False, dropout=0.2, **kwargs)
}
| true | true |
79007c38395c17a6662c23f9e9a775f69e266f08 | 618 | py | Python | src/azure-cli/azure/cli/__init__.py | KTH/azure-cli | 58aa9e320ea7c5213b4517172eaf71b3f5230fd6 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/__init__.py | KTH/azure-cli | 58aa9e320ea7c5213b4517172eaf71b3f5230fd6 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/__init__.py | KTH/azure-cli | 58aa9e320ea7c5213b4517172eaf71b3f5230fd6 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
'''The Azure Command-line tool.
This tools provides a command-line interface to Azure's management and storage
APIs.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__)
__author__ = "Microsoft Corporation <python@microsoft.com>"
__version__ = "2.0.17+dev"
| 41.2 | 94 | 0.548544 |
import pkg_resources
pkg_resources.declare_namespace(__name__)
__author__ = "Microsoft Corporation <python@microsoft.com>"
__version__ = "2.0.17+dev"
| true | true |
79007d05640bf10fdc335a8c6baaea6d43ec2d3c | 965 | py | Python | spinbot/policy/executor.py | rantav/spinnaker | 98fb0c77db8fc723fd705ae6b663a8cbbd348fdb | [
"Apache-2.0"
] | null | null | null | spinbot/policy/executor.py | rantav/spinnaker | 98fb0c77db8fc723fd705ae6b663a8cbbd348fdb | [
"Apache-2.0"
] | null | null | null | spinbot/policy/executor.py | rantav/spinnaker | 98fb0c77db8fc723fd705ae6b663a8cbbd348fdb | [
"Apache-2.0"
] | 1 | 2018-05-27T01:49:01.000Z | 2018-05-27T01:49:01.000Z | import policy
import traceback
import logging
import monitoring
import itertools
from .policy_registry import GetConfig
def ApplyPolicies(g):
config = GetConfig()
enabled = config.get('enabled', True)
if enabled is not None and not enabled:
return
monitoring_db = monitoring.GetDatabase('spinbot')
logging.info('Processing issues, repos')
for i in itertools.chain(*[g.issues(), g.repos()]):
for p in policy.Policies():
if p.applies(i):
err = None
try:
p.apply(g, i)
except Exception as _err:
logging.warn('Failure applying {} to {}: {}'.format(
p, i, traceback.format_exc()
))
err = _err
monitoring_db.write('issues_handled', { 'value': 1 }, tags={
'policy': p.id,
'error': err
})
| 29.242424 | 76 | 0.516062 | import policy
import traceback
import logging
import monitoring
import itertools
from .policy_registry import GetConfig
def ApplyPolicies(g):
config = GetConfig()
enabled = config.get('enabled', True)
if enabled is not None and not enabled:
return
monitoring_db = monitoring.GetDatabase('spinbot')
logging.info('Processing issues, repos')
for i in itertools.chain(*[g.issues(), g.repos()]):
for p in policy.Policies():
if p.applies(i):
err = None
try:
p.apply(g, i)
except Exception as _err:
logging.warn('Failure applying {} to {}: {}'.format(
p, i, traceback.format_exc()
))
err = _err
monitoring_db.write('issues_handled', { 'value': 1 }, tags={
'policy': p.id,
'error': err
})
| true | true |
79007d4daee3fde52f1097d538b2484e3f5cc41b | 257 | py | Python | nba_stats/manage.py | sparbz/nba-stats | c62437145551a90c6b4f4d2d8cfc26fb24002ab5 | [
"MIT"
] | 1 | 2021-12-13T14:55:42.000Z | 2021-12-13T14:55:42.000Z | nba_stats/manage.py | sparbz/nba-stats | c62437145551a90c6b4f4d2d8cfc26fb24002ab5 | [
"MIT"
] | null | null | null | nba_stats/manage.py | sparbz/nba-stats | c62437145551a90c6b4f4d2d8cfc26fb24002ab5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nba_stats.settings.base")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.363636 | 78 | 0.774319 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nba_stats.settings.base")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
79007d5b8ec7c37cab25ce2888d47369bb4eb40a | 10,403 | py | Python | tutorial/deprecated/tutorial_a2c_with_infinite_env/a2c.py | Purple-PI/rlstructures | 9b201b083715bbda2f3534b010c84e11dfc0a1c7 | [
"MIT"
] | 281 | 2021-01-13T14:20:23.000Z | 2022-03-23T08:46:56.000Z | tutorial/deprecated/tutorial_a2c_with_infinite_env/a2c.py | Purple-PI/rlstructures | 9b201b083715bbda2f3534b010c84e11dfc0a1c7 | [
"MIT"
] | 2 | 2021-01-22T23:28:34.000Z | 2021-04-29T22:05:42.000Z | tutorial/deprecated/tutorial_a2c_with_infinite_env/a2c.py | Purple-PI/rlstructures | 9b201b083715bbda2f3534b010c84e11dfc0a1c7 | [
"MIT"
] | 13 | 2021-01-15T14:53:32.000Z | 2022-03-22T11:12:54.000Z | #
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlstructures.logger import Logger, TFLogger
from rlstructures import DictTensor, TemporalDictTensor
from rlstructures import logging
from rlstructures.tools import weight_init
from rlstructures.batchers import EpisodeBatcher, Batcher
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from tutorial.tutorial_reinforce.agent import *
class A2C:
def __init__(self, config, create_env, create_train_env, create_agent):
self.config = config
# Creation of the Logger (that saves in tensorboard and CSV)
self.logger = TFLogger(log_dir=self.config["logdir"], hps=self.config)
self._create_env = create_env
self._create_train_env = create_train_env
self._create_agent = create_agent
# Creation of one env instance to get the dimensionnality of observations and number of actions
env = self._create_env(
self.config["n_envs"], seed=0, env_name=self.config["env_name"]
)
self.n_actions = env.action_space.n
self.obs_dim = env.reset()[0]["frame"].size()[1]
del env
def run(self):
# Instantiate the learning model abd the baseline model
self.learning_model = AgentModel(self.obs_dim, self.n_actions, 32)
self.critic_model = BaselineModel(self.obs_dim, 32)
# We create a batcher dedicated to evaluation
model = copy.deepcopy(self.learning_model)
self.evaluation_batcher = EpisodeBatcher(
n_timesteps=self.config["max_episode_steps"],
n_slots=self.config["n_evaluation_episodes"],
create_agent=self._create_agent,
create_env=self._create_env,
env_args={
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
"env_name": self.config["env_name"],
},
agent_args={"n_actions": self.n_actions, "model": model},
n_threads=self.config["n_evaluation_threads"],
seeds=[
self.config["env_seed"] + k * 10
for k in range(self.config["n_evaluation_threads"])
],
)
# Creation of the batcher for sampling complete pieces of trajectories (i.e Batcher)
# The batcher will sample n_threads*n_envs trajectories at each call
# To have a fast batcher, we have to configure it with n_timesteps=self.config["max_episode_steps"]
model = copy.deepcopy(self.learning_model)
self.train_batcher = Batcher(
n_timesteps=self.config["a2c_timesteps"],
n_slots=self.config["n_envs"] * self.config["n_threads"],
create_agent=self._create_agent,
create_env=self._create_train_env,
env_args={
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
"env_name": self.config["env_name"],
},
agent_args={"n_actions": self.n_actions, "model": model},
n_threads=self.config["n_threads"],
seeds=[
self.config["env_seed"] + k * 10
for k in range(self.config["n_threads"])
],
)
# Creation of the optimizer
optimizer = torch.optim.Adam(
nn.Sequential(self.learning_model, self.critic_model).parameters(),
lr=self.config["lr"],
)
# Training Loop:
_start_time = time.time()
self.iteration = 0
# #We launch the evaluation batcher (in deterministic mode)
n_episodes = self.config["n_evaluation_episodes"]
agent_info = DictTensor(
{"stochastic": torch.tensor([False]).repeat(n_episodes)}
)
self.evaluation_batcher.execute(n_episodes=n_episodes, agent_info=agent_info)
self.evaluation_iteration = self.iteration
# Initialize the training batcher such that agents will start to acqire pieces of episodes
self.train_batcher.update(self.learning_model.state_dict())
n_episodes = self.config["n_envs"] * self.config["n_threads"]
agent_info = DictTensor({"stochastic": torch.tensor([True]).repeat(n_episodes)})
self.train_batcher.reset(agent_info=agent_info)
while time.time() - _start_time < self.config["time_limit"]:
# Call the batcher to get a sample of trajectories
# 2) We get the pieces of episodes. Since the env is an infinite env, we will always receive a new piece of episode
self.train_batcher.execute()
trajectories = self.train_batcher.get(blocking=True)
# 3) Now, we compute the loss
dt = self.get_loss(trajectories)
[self.logger.add_scalar(k, dt[k].item(), self.iteration) for k in dt.keys()]
# Computation of final loss
ld = self.config["critic_coef"] * dt["critic_loss"]
lr = self.config["a2c_coef"] * dt["a2c_loss"]
le = self.config["entropy_coef"] * dt["entropy_loss"]
floss = ld - le - lr
floss = floss / n_episodes * trajectories.n_elems()
optimizer.zero_grad()
floss.backward()
optimizer.step()
# Update the train batcher with the updated model
self.train_batcher.update(self.learning_model.state_dict())
self.iteration += 1
# We check the evaluation batcher
evaluation_trajectories = self.evaluation_batcher.get(blocking=False)
if not evaluation_trajectories is None: # trajectories are available
# Compute the cumulated reward
cumulated_reward = (
(
evaluation_trajectories["_reward"]
* evaluation_trajectories.mask()
)
.sum(1)
.mean()
)
self.logger.add_scalar(
"evaluation_reward",
cumulated_reward.item(),
self.evaluation_iteration,
)
print(
"At iteration %d, reward is %f"
% (self.evaluation_iteration, cumulated_reward.item())
)
# We reexecute the evaluation batcher (with same value of agent_info and same number of episodes)
self.evaluation_batcher.update(self.learning_model.state_dict())
self.evaluation_iteration = self.iteration
self.evaluation_batcher.reexecute()
self.train_batcher.close()
self.evaluation_batcher.get() # To wait for the last trajectories
self.evaluation_batcher.close()
self.logger.update_csv() # To save as a CSV file in logdir
self.logger.close()
def get_loss(self, trajectories):
# First, we want to compute the cumulated reward per trajectory
# The reward is a t+1 in each iteration (since it is btained after the aaction), so we use the '_reward' field in the trajectory
# The 'reward' field corresopnds to the reward at time t
reward = trajectories["_reward"]
# We get the mask that tells which transition is in a trajectory (1) or not (0)
mask = trajectories.mask()
# We remove the reward values that are not in the trajectories
reward = reward * mask
max_length = trajectories.lengths.max().item()
# Now, we want to compute the action probabilities over the trajectories such that we will be able to do 'backward'
action_probabilities = []
for t in range(max_length):
proba = self.learning_model(trajectories["frame"][:, t])
action_probabilities.append(
proba.unsqueeze(1)
) # We append the probability, and introduces the temporal dimension (2nde dimension)
action_probabilities = torch.cat(
action_probabilities, dim=1
) # Now, we have a B x T x n_actions tensor
# We compute the critic value for t=0 to T (i.e including the very last observation)
critic = []
for t in range(max_length):
b = self.critic_model(trajectories["frame"][:, t])
critic.append(b.unsqueeze(1))
critic = torch.cat(critic + [b.unsqueeze(1)], dim=1).squeeze(
-1
) # Now, we have a B x (T+1) tensor
# We also need to compute the critic value at for the last observation of the trajectories (to compute the TD)
# It may be the last element of the trajectories (if episode is not finished), or on the last frame of the episode
idx = torch.arange(trajectories.n_elems())
last_critic = self.critic_model(
trajectories["_frame"][idx, trajectories.lengths - 1]
).squeeze(-1)
critic[idx, trajectories.lengths] = last_critic
# We compute the temporal difference
target = (
reward
+ self.config["discount_factor"]
* (1 - trajectories["_done"].float())
* critic[:, 1:].detach()
)
td = critic[:, :-1] - target
critic_loss = td ** 2
# We sum the loss for each episode (considering the mask)
critic_loss = (critic_loss * mask).sum(1) / mask.sum(1)
# We average the loss over all the trajectories
avg_critic_loss = critic_loss.mean()
# We do the same on the reinforce loss
action_distribution = torch.distributions.Categorical(action_probabilities)
log_proba = action_distribution.log_prob(trajectories["action"])
a2c_loss = -log_proba * td.detach()
a2c_loss = (a2c_loss * mask).sum(1) / mask.sum(1)
avg_a2c_loss = a2c_loss.mean()
# We compute the entropy loss
entropy = action_distribution.entropy()
entropy = (entropy * mask).sum(1) / mask.sum(1)
avg_entropy = entropy.mean()
return DictTensor(
{
"critic_loss": avg_critic_loss,
"a2c_loss": avg_a2c_loss,
"entropy_loss": avg_entropy,
}
)
| 42.461224 | 136 | 0.612323 |
from rlstructures.logger import Logger, TFLogger
from rlstructures import DictTensor, TemporalDictTensor
from rlstructures import logging
from rlstructures.tools import weight_init
from rlstructures.batchers import EpisodeBatcher, Batcher
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from tutorial.tutorial_reinforce.agent import *
class A2C:
def __init__(self, config, create_env, create_train_env, create_agent):
self.config = config
self.logger = TFLogger(log_dir=self.config["logdir"], hps=self.config)
self._create_env = create_env
self._create_train_env = create_train_env
self._create_agent = create_agent
env = self._create_env(
self.config["n_envs"], seed=0, env_name=self.config["env_name"]
)
self.n_actions = env.action_space.n
self.obs_dim = env.reset()[0]["frame"].size()[1]
del env
def run(self):
self.learning_model = AgentModel(self.obs_dim, self.n_actions, 32)
self.critic_model = BaselineModel(self.obs_dim, 32)
model = copy.deepcopy(self.learning_model)
self.evaluation_batcher = EpisodeBatcher(
n_timesteps=self.config["max_episode_steps"],
n_slots=self.config["n_evaluation_episodes"],
create_agent=self._create_agent,
create_env=self._create_env,
env_args={
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
"env_name": self.config["env_name"],
},
agent_args={"n_actions": self.n_actions, "model": model},
n_threads=self.config["n_evaluation_threads"],
seeds=[
self.config["env_seed"] + k * 10
for k in range(self.config["n_evaluation_threads"])
],
)
model = copy.deepcopy(self.learning_model)
self.train_batcher = Batcher(
n_timesteps=self.config["a2c_timesteps"],
n_slots=self.config["n_envs"] * self.config["n_threads"],
create_agent=self._create_agent,
create_env=self._create_train_env,
env_args={
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
"env_name": self.config["env_name"],
},
agent_args={"n_actions": self.n_actions, "model": model},
n_threads=self.config["n_threads"],
seeds=[
self.config["env_seed"] + k * 10
for k in range(self.config["n_threads"])
],
)
optimizer = torch.optim.Adam(
nn.Sequential(self.learning_model, self.critic_model).parameters(),
lr=self.config["lr"],
)
_start_time = time.time()
self.iteration = 0
]
agent_info = DictTensor(
{"stochastic": torch.tensor([False]).repeat(n_episodes)}
)
self.evaluation_batcher.execute(n_episodes=n_episodes, agent_info=agent_info)
self.evaluation_iteration = self.iteration
self.train_batcher.update(self.learning_model.state_dict())
n_episodes = self.config["n_envs"] * self.config["n_threads"]
agent_info = DictTensor({"stochastic": torch.tensor([True]).repeat(n_episodes)})
self.train_batcher.reset(agent_info=agent_info)
while time.time() - _start_time < self.config["time_limit"]:
self.train_batcher.execute()
trajectories = self.train_batcher.get(blocking=True)
dt = self.get_loss(trajectories)
[self.logger.add_scalar(k, dt[k].item(), self.iteration) for k in dt.keys()]
ld = self.config["critic_coef"] * dt["critic_loss"]
lr = self.config["a2c_coef"] * dt["a2c_loss"]
le = self.config["entropy_coef"] * dt["entropy_loss"]
floss = ld - le - lr
floss = floss / n_episodes * trajectories.n_elems()
optimizer.zero_grad()
floss.backward()
optimizer.step()
self.train_batcher.update(self.learning_model.state_dict())
self.iteration += 1
evaluation_trajectories = self.evaluation_batcher.get(blocking=False)
if not evaluation_trajectories is None:
cumulated_reward = (
(
evaluation_trajectories["_reward"]
* evaluation_trajectories.mask()
)
.sum(1)
.mean()
)
self.logger.add_scalar(
"evaluation_reward",
cumulated_reward.item(),
self.evaluation_iteration,
)
print(
"At iteration %d, reward is %f"
% (self.evaluation_iteration, cumulated_reward.item())
)
self.evaluation_batcher.update(self.learning_model.state_dict())
self.evaluation_iteration = self.iteration
self.evaluation_batcher.reexecute()
self.train_batcher.close()
self.evaluation_batcher.get()
self.evaluation_batcher.close()
self.logger.update_csv()
self.logger.close()
def get_loss(self, trajectories):
reward = trajectories["_reward"]
mask = trajectories.mask()
reward = reward * mask
max_length = trajectories.lengths.max().item()
action_probabilities = []
for t in range(max_length):
proba = self.learning_model(trajectories["frame"][:, t])
action_probabilities.append(
proba.unsqueeze(1)
)
action_probabilities = torch.cat(
action_probabilities, dim=1
)
critic = []
for t in range(max_length):
b = self.critic_model(trajectories["frame"][:, t])
critic.append(b.unsqueeze(1))
critic = torch.cat(critic + [b.unsqueeze(1)], dim=1).squeeze(
-1
)
idx = torch.arange(trajectories.n_elems())
last_critic = self.critic_model(
trajectories["_frame"][idx, trajectories.lengths - 1]
).squeeze(-1)
critic[idx, trajectories.lengths] = last_critic
target = (
reward
+ self.config["discount_factor"]
* (1 - trajectories["_done"].float())
* critic[:, 1:].detach()
)
td = critic[:, :-1] - target
critic_loss = td ** 2
critic_loss = (critic_loss * mask).sum(1) / mask.sum(1)
avg_critic_loss = critic_loss.mean()
action_distribution = torch.distributions.Categorical(action_probabilities)
log_proba = action_distribution.log_prob(trajectories["action"])
a2c_loss = -log_proba * td.detach()
a2c_loss = (a2c_loss * mask).sum(1) / mask.sum(1)
avg_a2c_loss = a2c_loss.mean()
entropy = action_distribution.entropy()
entropy = (entropy * mask).sum(1) / mask.sum(1)
avg_entropy = entropy.mean()
return DictTensor(
{
"critic_loss": avg_critic_loss,
"a2c_loss": avg_a2c_loss,
"entropy_loss": avg_entropy,
}
)
| true | true |
79007e3008a8365164b9152fcc8762c21b1520ff | 7,922 | py | Python | home/blocks.py | edmoss94/iogt | 14bf8d412480c56d7df2cbfed73e7f887a423ca0 | [
"BSD-2-Clause"
] | null | null | null | home/blocks.py | edmoss94/iogt | 14bf8d412480c56d7df2cbfed73e7f887a423ca0 | [
"BSD-2-Clause"
] | null | null | null | home/blocks.py | edmoss94/iogt | 14bf8d412480c56d7df2cbfed73e7f887a423ca0 | [
"BSD-2-Clause"
] | null | null | null | from django.forms.utils import flatatt
from django.utils.html import format_html, format_html_join
from django.utils.translation import gettext as _
from wagtail.core import blocks
from wagtail.core.blocks import PageChooserBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtailmarkdown.utils import render_markdown
from wagtailmedia.blocks import AbstractMediaChooserBlock
class MediaBlock(AbstractMediaChooserBlock):
def render_basic(self, value, context=None):
if not value:
return ''
video_not_supported_text = _("Your browser does not support video playback.")
audio_not_supported_text = _("Your browser does not support audio playback.")
# Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link.
download_video_text = _('If you cannot view the above video, you can'
' instead %(start_link)sdownload it%(end_link)s.') % {
'start_link': '<a href={2} download>',
'end_link': '</a>'
}
# Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link.
download_audio_text = _('If you cannot listen to the above audio, you can'
' instead %(start_link)sdownload it%(end_link)s.') % {
'start_link': '<a href={2} download>',
'end_link': '</a>'
}
if value.type == 'video':
player_code = '''
<div>
<video width="320" height="240" {1} controls>
{0}
''' + video_not_supported_text + '''
</video>
</div>
<p class='article__content--video'>''' + download_video_text + '''</p>
'''
else:
player_code = '''
<div>
<audio controls>
{0}
''' + audio_not_supported_text + '''
</audio>
</div>
<p class='article__content--audio'>''' + download_audio_text + '''</p>
'''
thumbnail = f'poster={value.thumbnail.url}' if value.thumbnail else ''
return format_html(player_code, format_html_join(
'\n', "<source{0}>",
[[flatatt(s)] for s in value.sources]
), thumbnail, value.url)
class SocialMediaLinkBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=255)
link = blocks.URLBlock()
image = ImageChooserBlock(template='blocks/image.html')
class Meta:
icon = 'site'
class SocialMediaShareButtonBlock(blocks.StructBlock):
platform = blocks.CharBlock(max_length=255)
is_active = blocks.BooleanBlock(required=False)
image = ImageChooserBlock(template='blocks/image.html', required=False)
class Meta:
icon = 'site'
class EmbeddedQuestionnaireChooserBlock(blocks.PageChooserBlock):
class Meta:
icon = 'form'
class EmbeddedQuestionnaireBlock(blocks.StructBlock):
direct_display = blocks.BooleanBlock(required=False)
class EmbeddedPollBlock(EmbeddedQuestionnaireBlock):
poll = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Poll')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
poll = value.get('poll')
if poll and poll.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': poll.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class EmbeddedSurveyBlock(EmbeddedQuestionnaireBlock):
survey = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Survey')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
survey = value.get('survey')
if survey and survey.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': survey.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class EmbeddedQuizBlock(EmbeddedQuestionnaireBlock):
quiz = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Quiz')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
quiz = value.get('quiz')
if quiz and quiz.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': quiz.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class PageButtonBlock(blocks.StructBlock):
page = blocks.PageChooserBlock()
text = blocks.CharBlock(required=False, max_length=255)
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
button_page = value.get('page')
if button_page and button_page.live:
context.update({
'button_page': button_page.specific,
'text': value.get('text') or button_page.title
})
return context
class Meta:
template = 'blocks/page_button.html'
class ArticleBlock(blocks.StructBlock):
display_section_title = blocks.BooleanBlock(required=False)
article = PageChooserBlock(target_model='home.Article')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
article = value.get('article')
if article and article.live:
context.update({
'display_section_title': value['display_section_title'],
'article': article.specific,
})
return context
class Meta:
template = 'blocks/article.html'
class NumberedListBlock(blocks.ListBlock):
def render_basic(self, value, context=None):
children = format_html_join(
'\n', '<li>{0}</li>',
[
(self.child_block.render(child_value, context=context),)
for child_value in value
]
)
return format_html("<ol>{0}</ol>", children)
class RawHTMLBlock(blocks.RawHTMLBlock):
def render_basic(self, value, context=None):
result = super(RawHTMLBlock, self).render_basic(value, context)
return render_markdown(result)
class OfflineAppButtonBlock(blocks.StructBlock):
smartphone_text = blocks.CharBlock(
help_text=_('This text appears when it is possible for the user to install the app on their phone.'))
feature_phone_text = blocks.CharBlock(required=False,
help_text=_('This text appears when the user is using a feature phone and thus cannot install the app '
'(the button will be disabled in this case). [Currently not implemented]'))
offline_text = blocks.CharBlock(required=False,
help_text=_('This text appears when the user is navigating the site via the offline app and '
'thus it doesn\'t make sense to install the offline app again '
'(the button will be disabled in this case). [Currently not implemented]'))
class Meta:
template = 'blocks/offline_app_button.html'
| 38.086538 | 314 | 0.639611 | from django.forms.utils import flatatt
from django.utils.html import format_html, format_html_join
from django.utils.translation import gettext as _
from wagtail.core import blocks
from wagtail.core.blocks import PageChooserBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtailmarkdown.utils import render_markdown
from wagtailmedia.blocks import AbstractMediaChooserBlock
class MediaBlock(AbstractMediaChooserBlock):
def render_basic(self, value, context=None):
if not value:
return ''
video_not_supported_text = _("Your browser does not support video playback.")
audio_not_supported_text = _("Your browser does not support audio playback.")
download_video_text = _('If you cannot view the above video, you can'
' instead %(start_link)sdownload it%(end_link)s.') % {
'start_link': '<a href={2} download>',
'end_link': '</a>'
}
# Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link.
download_audio_text = _('If you cannot listen to the above audio, you can'
' instead %(start_link)sdownload it%(end_link)s.') % {
'start_link': '<a href={2} download>',
'end_link': '</a>'
}
if value.type == 'video':
player_code = '''
<div>
<video width="320" height="240" {1} controls>
{0}
''' + video_not_supported_text + '''
</video>
</div>
<p class='article__content--video'>''' + download_video_text + '''</p>
'''
else:
player_code = '''
<div>
<audio controls>
{0}
''' + audio_not_supported_text + '''
</audio>
</div>
<p class='article__content--audio'>''' + download_audio_text + '''</p>
'''
thumbnail = f'poster={value.thumbnail.url}' if value.thumbnail else ''
return format_html(player_code, format_html_join(
'\n', "<source{0}>",
[[flatatt(s)] for s in value.sources]
), thumbnail, value.url)
class SocialMediaLinkBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=255)
link = blocks.URLBlock()
image = ImageChooserBlock(template='blocks/image.html')
class Meta:
icon = 'site'
class SocialMediaShareButtonBlock(blocks.StructBlock):
platform = blocks.CharBlock(max_length=255)
is_active = blocks.BooleanBlock(required=False)
image = ImageChooserBlock(template='blocks/image.html', required=False)
class Meta:
icon = 'site'
class EmbeddedQuestionnaireChooserBlock(blocks.PageChooserBlock):
class Meta:
icon = 'form'
class EmbeddedQuestionnaireBlock(blocks.StructBlock):
direct_display = blocks.BooleanBlock(required=False)
class EmbeddedPollBlock(EmbeddedQuestionnaireBlock):
poll = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Poll')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
poll = value.get('poll')
if poll and poll.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': poll.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class EmbeddedSurveyBlock(EmbeddedQuestionnaireBlock):
survey = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Survey')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
survey = value.get('survey')
if survey and survey.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': survey.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class EmbeddedQuizBlock(EmbeddedQuestionnaireBlock):
quiz = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Quiz')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
quiz = value.get('quiz')
if quiz and quiz.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': quiz.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class PageButtonBlock(blocks.StructBlock):
page = blocks.PageChooserBlock()
text = blocks.CharBlock(required=False, max_length=255)
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
button_page = value.get('page')
if button_page and button_page.live:
context.update({
'button_page': button_page.specific,
'text': value.get('text') or button_page.title
})
return context
class Meta:
template = 'blocks/page_button.html'
class ArticleBlock(blocks.StructBlock):
display_section_title = blocks.BooleanBlock(required=False)
article = PageChooserBlock(target_model='home.Article')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
article = value.get('article')
if article and article.live:
context.update({
'display_section_title': value['display_section_title'],
'article': article.specific,
})
return context
class Meta:
template = 'blocks/article.html'
class NumberedListBlock(blocks.ListBlock):
def render_basic(self, value, context=None):
children = format_html_join(
'\n', '<li>{0}</li>',
[
(self.child_block.render(child_value, context=context),)
for child_value in value
]
)
return format_html("<ol>{0}</ol>", children)
class RawHTMLBlock(blocks.RawHTMLBlock):
def render_basic(self, value, context=None):
result = super(RawHTMLBlock, self).render_basic(value, context)
return render_markdown(result)
class OfflineAppButtonBlock(blocks.StructBlock):
smartphone_text = blocks.CharBlock(
help_text=_('This text appears when it is possible for the user to install the app on their phone.'))
feature_phone_text = blocks.CharBlock(required=False,
help_text=_('This text appears when the user is using a feature phone and thus cannot install the app '
'(the button will be disabled in this case). [Currently not implemented]'))
offline_text = blocks.CharBlock(required=False,
help_text=_('This text appears when the user is navigating the site via the offline app and '
'thus it doesn\'t make sense to install the offline app again '
'(the button will be disabled in this case). [Currently not implemented]'))
class Meta:
template = 'blocks/offline_app_button.html'
| true | true |
79007e4badd498a950c662525c89abe52545285a | 750 | py | Python | Chapter16/B07333_16.py | monocilindro/Mastering-Geospatial-Analysis-with-Python | 2cee571403aa0d96f6c2eb7400792286a81dc7e9 | [
"MIT"
] | 64 | 2018-05-04T16:54:59.000Z | 2022-03-22T11:26:21.000Z | Chapter16/B07333_16.py | monocilindro/Mastering-Geospatial-Analysis-with-Python | 2cee571403aa0d96f6c2eb7400792286a81dc7e9 | [
"MIT"
] | 1 | 2020-05-31T00:45:28.000Z | 2020-05-31T18:29:07.000Z | Chapter16/B07333_16.py | monocilindro/Mastering-Geospatial-Analysis-with-Python | 2cee571403aa0d96f6c2eb7400792286a81dc7e9 | [
"MIT"
] | 41 | 2018-05-10T21:31:44.000Z | 2022-03-23T11:12:33.000Z | from pywebhdfs.webhdfs import PyWebHdfsClient as h
hdfs=h(host='sandbox.hortonworks.com',port='50070',user_name='raj_ops')
ls=hdfs.list_dir('/')
ls['FileStatuses']['FileStatus'][0]
hdfs.make_dir('/samples',permission=755)
f=open('/home/pcrickard/sample.csv')
d=f.read()
hdfs.create_file('/samples/sample.csv',d)
hdfs.read_file('/samples/sample.csv')
hdfs.get_file_dir_status('/samples/sample.csv')
from pyhive import hive
c=hive.connect('sandbox.hortonworks.com').cursor()
c.execute('CREATE TABLE FromPython (age int, name string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ","')
c.execute("LOAD DATA INPATH '/samples/sample.csv' OVERWRITE INTO TABLE FromPython")
c.execute("SELECT * FROM FromPython")
result=c.fetchall()
| 24.193548 | 108 | 0.736 | from pywebhdfs.webhdfs import PyWebHdfsClient as h
hdfs=h(host='sandbox.hortonworks.com',port='50070',user_name='raj_ops')
ls=hdfs.list_dir('/')
ls['FileStatuses']['FileStatus'][0]
hdfs.make_dir('/samples',permission=755)
f=open('/home/pcrickard/sample.csv')
d=f.read()
hdfs.create_file('/samples/sample.csv',d)
hdfs.read_file('/samples/sample.csv')
hdfs.get_file_dir_status('/samples/sample.csv')
from pyhive import hive
c=hive.connect('sandbox.hortonworks.com').cursor()
c.execute('CREATE TABLE FromPython (age int, name string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ","')
c.execute("LOAD DATA INPATH '/samples/sample.csv' OVERWRITE INTO TABLE FromPython")
c.execute("SELECT * FROM FromPython")
result=c.fetchall()
| true | true |
79007e94ecd758fc62cd5859d388611b64fbad63 | 3,475 | py | Python | compute_cross_validation_accuracy.py | imatge-upc/sentiment-2015-asm | 50cc4793f8dc49fbe6cc9fb489b7bac986ad1769 | [
"MIT"
] | 23 | 2016-03-17T01:23:32.000Z | 2020-10-20T21:26:52.000Z | compute_cross_validation_accuracy.py | imatge-upc/sentiment-2015-asm | 50cc4793f8dc49fbe6cc9fb489b7bac986ad1769 | [
"MIT"
] | null | null | null | compute_cross_validation_accuracy.py | imatge-upc/sentiment-2015-asm | 50cc4793f8dc49fbe6cc9fb489b7bac986ad1769 | [
"MIT"
] | 11 | 2016-03-17T00:29:02.000Z | 2020-03-11T10:17:22.000Z | import os
import sys
import numpy as np
import caffe
import argparse
parser = argparse.ArgumentParser(description='Computes 5-fold cross-validation results over Twitter five-agrees dataset')
parser.add_argument('-ov', '--oversampling', help='Enables (1) or disables (0) oversampling')
args = parser.parse_args()
if args.oversampling == 0:
oversampling = False
elif args.oversampling == 1:
oversampling = True
else:
sys.exit("oversampling must be 0 or 1")
subsets = ['test1', 'test2', 'test3', 'test4', 'test5']
mean_file = 'ilsvrc_2012_mean.npy'
accuracies = []
output_string = ""
for subset in subsets:
# Update paths for this subset
deploy_path = 'sentiment_deploy.prototxt'
caffemodel_path = 'twitter_finetuned_' + subset + '_iter_180.caffemodel'
ground_truth = 'ground_truth/' + subset + '/test.txt'
instanceList = []
correctLabels = 0
incorrectLabels = 0
positiveLabels = 0
negativeLabels = 0
positivePredictions = 0
negativePredictions = 0
gt_file = open(ground_truth, "r")
# Store images in a list
while (True):
line = gt_file.readline()
# Check if we have reached the end
if (len(line) == 0):
break
# Add the line to the list
instanceList.append(line)
# Load network
net = caffe.Classifier(deploy_path,
caffemodel_path,
mean=np.load(mean_file).mean(1).mean(1),
image_dims=(256, 256),
channel_swap=(2, 1, 0),
raw_scale=255)
# Loop through the ground truth file, predict each image's label and store the wrong ones
counter = 0
for instance in instanceList:
values = instance.split()
image_path = values[0]
sentiment = int(values[1])
# Load image
im = caffe.io.load_image(image_path)
# Make a forward pass and get the score
prediction = net.predict([im], oversample=oversampling)
# Check if the prediction was correct or not
if prediction[0].argmax() == sentiment:
correctLabels += 1
else:
incorrectLabels += 1
# Update label counter
if sentiment == 0:
negativeLabels += 1
else:
positiveLabels += 1
# Update prediction counter (negative = 0, positive = 1)
if prediction[0].argmax() == 0:
negativePredictions += 1
else:
positivePredictions += 1
counter += 1
if counter % 40 == 0:
print subset + ', ' + str(counter)
sys.stdout.flush()
gt_file.close()
accuracy = 100. * correctLabels / (correctLabels + incorrectLabels)
accuracies.append(accuracy)
# Print accuracy results
print '------------- ' + subset + ' -------------'
print 'Accuracy = ', str(accuracy)
print '---------------------------------'
output_string += 'Subset: {0}: \n Positive images: {1}\n Negative images: {2}\n Positive predictions: {3}\n Negative predictions: {4}\n'.format(
subset, str(positiveLabels), str(negativeLabels), str(positivePredictions), str(negativePredictions))
print '\nRESULTS:'
for i in range(0, 5):
print subsets[i] + ': ' + str(accuracies[i]) + '%'
print '\nMean accuracy = ' + str(1. * sum(accuracies) / len(accuracies))
print "\n-------------------------------------\n"
print output_string
| 31.590909 | 160 | 0.58964 | import os
import sys
import numpy as np
import caffe
import argparse
parser = argparse.ArgumentParser(description='Computes 5-fold cross-validation results over Twitter five-agrees dataset')
parser.add_argument('-ov', '--oversampling', help='Enables (1) or disables (0) oversampling')
args = parser.parse_args()
if args.oversampling == 0:
oversampling = False
elif args.oversampling == 1:
oversampling = True
else:
sys.exit("oversampling must be 0 or 1")
subsets = ['test1', 'test2', 'test3', 'test4', 'test5']
mean_file = 'ilsvrc_2012_mean.npy'
accuracies = []
output_string = ""
for subset in subsets:
deploy_path = 'sentiment_deploy.prototxt'
caffemodel_path = 'twitter_finetuned_' + subset + '_iter_180.caffemodel'
ground_truth = 'ground_truth/' + subset + '/test.txt'
instanceList = []
correctLabels = 0
incorrectLabels = 0
positiveLabels = 0
negativeLabels = 0
positivePredictions = 0
negativePredictions = 0
gt_file = open(ground_truth, "r")
while (True):
line = gt_file.readline()
if (len(line) == 0):
break
instanceList.append(line)
net = caffe.Classifier(deploy_path,
caffemodel_path,
mean=np.load(mean_file).mean(1).mean(1),
image_dims=(256, 256),
channel_swap=(2, 1, 0),
raw_scale=255)
counter = 0
for instance in instanceList:
values = instance.split()
image_path = values[0]
sentiment = int(values[1])
# Load image
im = caffe.io.load_image(image_path)
# Make a forward pass and get the score
prediction = net.predict([im], oversample=oversampling)
# Check if the prediction was correct or not
if prediction[0].argmax() == sentiment:
correctLabels += 1
else:
incorrectLabels += 1
# Update label counter
if sentiment == 0:
negativeLabels += 1
else:
positiveLabels += 1
# Update prediction counter (negative = 0, positive = 1)
if prediction[0].argmax() == 0:
negativePredictions += 1
else:
positivePredictions += 1
counter += 1
if counter % 40 == 0:
print subset + ', ' + str(counter)
sys.stdout.flush()
gt_file.close()
accuracy = 100. * correctLabels / (correctLabels + incorrectLabels)
accuracies.append(accuracy)
# Print accuracy results
print '------------- ' + subset + ' -------------'
print 'Accuracy = ', str(accuracy)
print '---------------------------------'
output_string += 'Subset: {0}: \n Positive images: {1}\n Negative images: {2}\n Positive predictions: {3}\n Negative predictions: {4}\n'.format(
subset, str(positiveLabels), str(negativeLabels), str(positivePredictions), str(negativePredictions))
print '\nRESULTS:'
for i in range(0, 5):
print subsets[i] + ': ' + str(accuracies[i]) + '%'
print '\nMean accuracy = ' + str(1. * sum(accuracies) / len(accuracies))
print "\n-------------------------------------\n"
print output_string
| false | true |
79007ebec61d88afa730d5d80b21c80553ae66fe | 2,849 | py | Python | examples/undocumented/python/classifier_multiclass_ecoc.py | shiyi001/shogun | 287f02d11d5914ded2d410ab9c6f38712e11ca2b | [
"BSD-3-Clause"
] | 1 | 2019-08-17T21:19:20.000Z | 2019-08-17T21:19:20.000Z | examples/undocumented/python/classifier_multiclass_ecoc.py | shiyi001/shogun | 287f02d11d5914ded2d410ab9c6f38712e11ca2b | [
"BSD-3-Clause"
] | null | null | null | examples/undocumented/python/classifier_multiclass_ecoc.py | shiyi001/shogun | 287f02d11d5914ded2d410ab9c6f38712e11ca2b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import re
import time
from tools.multiclass_shared import prepare_data
# run with toy data
[traindat, label_traindat, testdat, label_testdat] = prepare_data()
# run with opt-digits if available
#[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)
parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5]]
def classifier_multiclass_ecoc (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,lawidth=2.1,C=1,epsilon=1e-5):
import shogun
from shogun import ECOCStrategy, LinearMulticlassMachine
from shogun import MulticlassAccuracy
from shogun import MulticlassLabels
import shogun as sg
def nonabstract_class(name):
try:
getattr(shogun, name)()
except TypeError:
return False
return True
encoders = [x for x in dir(shogun)
if re.match(r'ECOC.+Encoder', x) and nonabstract_class(x)]
decoders = [x for x in dir(shogun)
if re.match(r'ECOC.+Decoder', x) and nonabstract_class(x)]
fea_train = sg.features(fm_train_real)
fea_test = sg.features(fm_test_real)
gnd_train = MulticlassLabels(label_train_multiclass)
if label_test_multiclass is None:
gnd_test = None
else:
gnd_test = MulticlassLabels(label_test_multiclass)
base_classifier = sg.machine("LibLinear",
liblinear_solver_type="L2R_L2LOSS_SVC",
use_bias=True)
#print('Testing with %d encoders and %d decoders' % (len(encoders), len(decoders)))
#print('-' * 70)
#format_str = '%%15s + %%-10s %%-10%s %%-10%s %%-10%s'
#print((format_str % ('s', 's', 's')) % ('encoder', 'decoder', 'codelen', 'time', 'accuracy'))
def run_ecoc(ier, idr):
encoder = getattr(shogun, encoders[ier])()
decoder = getattr(shogun, decoders[idr])()
# whether encoder is data dependent
if hasattr(encoder, 'set_labels'):
encoder.set_labels(gnd_train)
encoder.set_features(fea_train)
strategy = ECOCStrategy(encoder, decoder)
classifier = LinearMulticlassMachine(strategy, fea_train, base_classifier, gnd_train)
classifier.train()
label_pred = classifier.apply(fea_test)
if gnd_test is not None:
evaluator = MulticlassAccuracy()
acc = evaluator.evaluate(label_pred, gnd_test)
else:
acc = None
return (classifier.get_num_machines(), acc)
for ier in range(len(encoders)):
for idr in range(len(decoders)):
t_begin = time.clock()
(codelen, acc) = run_ecoc(ier, idr)
if acc is None:
acc_fmt = 's'
acc = 'N/A'
else:
acc_fmt = '.4f'
t_elapse = time.clock() - t_begin
#print((format_str % ('d', '.3f', acc_fmt)) %
# (encoders[ier][4:-7], decoders[idr][4:-7], codelen, t_elapse, acc))
if __name__=='__main__':
print('MulticlassECOC')
classifier_multiclass_ecoc(*parameter_list[0])
| 31.655556 | 180 | 0.703054 |
import re
import time
from tools.multiclass_shared import prepare_data
[traindat, label_traindat, testdat, label_testdat] = prepare_data()
parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5]]
def classifier_multiclass_ecoc (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,lawidth=2.1,C=1,epsilon=1e-5):
import shogun
from shogun import ECOCStrategy, LinearMulticlassMachine
from shogun import MulticlassAccuracy
from shogun import MulticlassLabels
import shogun as sg
def nonabstract_class(name):
try:
getattr(shogun, name)()
except TypeError:
return False
return True
encoders = [x for x in dir(shogun)
if re.match(r'ECOC.+Encoder', x) and nonabstract_class(x)]
decoders = [x for x in dir(shogun)
if re.match(r'ECOC.+Decoder', x) and nonabstract_class(x)]
fea_train = sg.features(fm_train_real)
fea_test = sg.features(fm_test_real)
gnd_train = MulticlassLabels(label_train_multiclass)
if label_test_multiclass is None:
gnd_test = None
else:
gnd_test = MulticlassLabels(label_test_multiclass)
base_classifier = sg.machine("LibLinear",
liblinear_solver_type="L2R_L2LOSS_SVC",
use_bias=True)
def run_ecoc(ier, idr):
encoder = getattr(shogun, encoders[ier])()
decoder = getattr(shogun, decoders[idr])()
if hasattr(encoder, 'set_labels'):
encoder.set_labels(gnd_train)
encoder.set_features(fea_train)
strategy = ECOCStrategy(encoder, decoder)
classifier = LinearMulticlassMachine(strategy, fea_train, base_classifier, gnd_train)
classifier.train()
label_pred = classifier.apply(fea_test)
if gnd_test is not None:
evaluator = MulticlassAccuracy()
acc = evaluator.evaluate(label_pred, gnd_test)
else:
acc = None
return (classifier.get_num_machines(), acc)
for ier in range(len(encoders)):
for idr in range(len(decoders)):
t_begin = time.clock()
(codelen, acc) = run_ecoc(ier, idr)
if acc is None:
acc_fmt = 's'
acc = 'N/A'
else:
acc_fmt = '.4f'
t_elapse = time.clock() - t_begin
if __name__=='__main__':
print('MulticlassECOC')
classifier_multiclass_ecoc(*parameter_list[0])
| true | true |
79007f2aede10988ac32470b1deeb150a8562e17 | 304 | py | Python | 2016/day5/p1.py | CheyenneWills/adventofcode | 7ab9e57420225df121c1702a144e659c4aa93abb | [
"MIT"
] | null | null | null | 2016/day5/p1.py | CheyenneWills/adventofcode | 7ab9e57420225df121c1702a144e659c4aa93abb | [
"MIT"
] | null | null | null | 2016/day5/p1.py | CheyenneWills/adventofcode | 7ab9e57420225df121c1702a144e659c4aa93abb | [
"MIT"
] | null | null | null | #!/usr/bin/python
import hashlib
import sys
v = sys.argv[1]
index = 0
pw = ''
i = 0
while True:
suffix = str(i)
h = hashlib.md5(v+suffix).hexdigest()
if h.startswith("00000"):
pw += h[5]
print(v+suffix,h,pw)
if len(pw) == 8:
break
i += 1
print(pw)
| 13.818182 | 41 | 0.519737 |
import hashlib
import sys
v = sys.argv[1]
index = 0
pw = ''
i = 0
while True:
suffix = str(i)
h = hashlib.md5(v+suffix).hexdigest()
if h.startswith("00000"):
pw += h[5]
print(v+suffix,h,pw)
if len(pw) == 8:
break
i += 1
print(pw)
| true | true |
79007f52c35861bbe1c7777310341169d517f95e | 1,539 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractMichilunWordpressCom(item):
'''
Parser for 'michilun.wordpress.com'
'''
bad = [
'Recommendations and Reviews',
]
if any([tmp in item['tags'] for tmp in bad]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'),
('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'),
('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'),
('At the End of the Wish', 'At the End of the Wish', 'translated'),
('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'),
('Side Projects - The Flame\'s Daughter', 'The Flame\'s Daughter', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 48.09375 | 145 | 0.499675 | def extractMichilunWordpressCom(item):
bad = [
'Recommendations and Reviews',
]
if any([tmp in item['tags'] for tmp in bad]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'),
('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'),
('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'),
('At the End of the Wish', 'At the End of the Wish', 'translated'),
('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'),
('Side Projects - The Flame\'s Daughter', 'The Flame\'s Daughter', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | true | true |
790080edbe85d5229310c1da80875e5f0b52e45e | 2,381 | py | Python | python/venv/lib/python2.7/site-packages/glanceclient/tests/unit/v2/test_tags.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/glanceclient/tests/unit/v2/test_tags.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/glanceclient/tests/unit/v2/test_tags.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from glanceclient.tests import utils
from glanceclient.v2 import image_tags
IMAGE = '3a4560a1-e585-443e-9b39-553b46ec92d1'
TAG = 'tag01'
data_fixtures = {
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): {
'DELETE': (
{},
None,
),
'PUT': (
{},
{
'image_id': IMAGE,
'tag_value': TAG
}
),
}
}
schema_fixtures = {
'tag': {
'GET': (
{},
{'name': 'image', 'properties': {'image_id': {}, 'tags': {}}}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
self.controller = image_tags.Controller(self.api, self.schema_api)
def test_update_image_tag(self):
image_id = IMAGE
tag_value = TAG
self.controller.update(image_id, tag_value)
expect = [
('PUT',
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE,
tag_value=TAG),
{},
None)]
self.assertEqual(expect, self.api.calls)
def test_delete_image_tag(self):
image_id = IMAGE
tag_value = TAG
self.controller.delete(image_id, tag_value)
expect = [
('DELETE',
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE,
tag_value=TAG),
{},
None)]
self.assertEqual(expect, self.api.calls)
| 29.036585 | 79 | 0.563209 |
import testtools
from glanceclient.tests import utils
from glanceclient.v2 import image_tags
IMAGE = '3a4560a1-e585-443e-9b39-553b46ec92d1'
TAG = 'tag01'
data_fixtures = {
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): {
'DELETE': (
{},
None,
),
'PUT': (
{},
{
'image_id': IMAGE,
'tag_value': TAG
}
),
}
}
schema_fixtures = {
'tag': {
'GET': (
{},
{'name': 'image', 'properties': {'image_id': {}, 'tags': {}}}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
self.controller = image_tags.Controller(self.api, self.schema_api)
def test_update_image_tag(self):
image_id = IMAGE
tag_value = TAG
self.controller.update(image_id, tag_value)
expect = [
('PUT',
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE,
tag_value=TAG),
{},
None)]
self.assertEqual(expect, self.api.calls)
def test_delete_image_tag(self):
image_id = IMAGE
tag_value = TAG
self.controller.delete(image_id, tag_value)
expect = [
('DELETE',
'/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE,
tag_value=TAG),
{},
None)]
self.assertEqual(expect, self.api.calls)
| true | true |
79008235953bbbe582044bf8ee8caeea5c772543 | 268 | py | Python | reports/urls.py | peachman05/Pwcrew | 6aa340a92ed5833c34f7d3d5c27b132ab413aebb | [
"MIT"
] | null | null | null | reports/urls.py | peachman05/Pwcrew | 6aa340a92ed5833c34f7d3d5c27b132ab413aebb | [
"MIT"
] | 2 | 2020-02-12T00:20:27.000Z | 2020-06-05T18:05:39.000Z | reports/urls.py | peachman05/Pwcrew | 6aa340a92ed5833c34f7d3d5c27b132ab413aebb | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
app_name = 'reports'
urlpatterns = [
# url(r'^graph/', views.graph, name='graph'),
url(r'^graph/', views.statistics, name='graph'),
url(r'^csv_export/', views.csv_export, name='csv_export'),
]
| 19.142857 | 63 | 0.641791 | from django.conf.urls import url
from . import views
app_name = 'reports'
urlpatterns = [
url(r'^graph/', views.statistics, name='graph'),
url(r'^csv_export/', views.csv_export, name='csv_export'),
]
| true | true |
790082a1c02ad39abeb0db3be487b020fa424e09 | 7,292 | py | Python | gym_collision_avoidance/envs/policies/SOCIALFORCEPolicy.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | 3 | 2021-12-16T05:39:14.000Z | 2022-02-25T06:07:51.000Z | gym_collision_avoidance/envs/policies/SOCIALFORCEPolicy.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | null | null | null | gym_collision_avoidance/envs/policies/SOCIALFORCEPolicy.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | 1 | 2021-11-09T18:04:01.000Z | 2021-11-09T18:04:01.000Z | import os
import math
import sys
import torch
import numpy as np
from gym_collision_avoidance.envs.policies.InternalPolicy import InternalPolicy
from gym_collision_avoidance.envs import Config
from gym_collision_avoidance.envs.util import *
from gym_collision_avoidance.envs.policies import socialforce
import copy
import argparse
# Filter list by Boolean list
# Using itertools.compress
from itertools import compress
class SOCIALFORCEPolicy(InternalPolicy):
def __init__(self):
InternalPolicy.__init__(self, str="SOCIALFORCE")
self.dt = Config.DT
self.obs_seq_len = 8
self.is_init = False
def init(self,agents):
self.total_agents_num = [None]*self.n_agents
self.is_init = True
def find_next_action(self, obs, agents, i , full_agent_list = None, active_agent_mask = None):
agent_index = i
#check if elements before index contains non active agents, if yes, remove them, thus calculate the index shift
before_index = np.array(active_agent_mask)[:agent_index]
#see how many non active agents are before index, minus them calculate index shift
agent_index = agent_index - len( before_index[ before_index==False ] )
agents = list(compress(full_agent_list, active_agent_mask))
observation_array = [] #observation array for social force, consist of N row of agents, each row = vector (x, y, v_x, v_y, d_x, d_y, [tau])
if not self.is_init: #Execute one time per init (complete simulation iteration)
self.n_agents = len(agents)
self.init(agents)
#initialize the observation vector because when starts, social force seems to require a starting vel for agents to move
for a in range(self.n_agents):
pos_difference = agents[a].goal_global_frame - agents[a].pos_global_frame
dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[a].pref_speed )
vel_next_waypoint = dist_next_waypoint
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], vel_next_waypoint[0], vel_next_waypoint[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
else:
##added for dynamic num of agents compatibility
self.n_agents = len(agents)
self.init(agents)
for a in range(self.n_agents):
if agents[a].speed_global_frame<= agents[a].pref_speed/3:
pos_difference = agents[a].goal_global_frame - agents[a].pos_global_frame
dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[a].pref_speed )
vel_next_waypoint = dist_next_waypoint
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], vel_next_waypoint[0], vel_next_waypoint[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
else:
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], agents[a].vel_global_frame[0], agents[a].vel_global_frame[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
#print("goal")
#print(agents[agent_index].goal_global_frame)
initial_state = np.array( observation_array )
s=None
#s = socialforce.Simulator(initial_state, delta_t=0.1)
s = socialforce.Simulator(initial_state, delta_t=0.1)
states = np.stack([s.step().state.copy() for _ in range(1)]) #step one time only
#print("states")
#print(states)
next_waypoint_x = states[:, agent_index, 0][0]
next_waypoint_y = states[:, agent_index, 1][0]
next_waypoint_vel_x = states[:, agent_index, 2][0]
next_waypoint_vel_y = states[:, agent_index, 3][0]
self.next_waypoint = np.array( [ next_waypoint_x , next_waypoint_y ] )
goal_direction = self.next_waypoint - agents[agent_index].pos_global_frame
self.dist_to_goal = math.sqrt(goal_direction[0]**2 + goal_direction[1]**2)
if self.dist_to_goal > 1e-8:
ref_prll = goal_direction / agents[agent_index].dist_to_goal
else:
ref_prll = goal_direction
ref_orth = np.array([-ref_prll[1], ref_prll[0]]) # rotate by 90 deg
ref_prll_angle_global_frame = np.arctan2(ref_prll[1],
ref_prll[0])
heading_ego_frame = wrap( agents[agent_index].heading_global_frame -
ref_prll_angle_global_frame)
vel_global_frame = np.array( [ next_waypoint_vel_x , next_waypoint_vel_y ] )#( self.next_waypoint - agents[agent_index].pos_global_frame) / agents[agent_index].dt_nominal
speed_global_frame = np.linalg.norm(vel_global_frame)
if speed_global_frame > agents[agent_index].pref_speed: speed_global_frame = agents[agent_index].pref_speed
#But in reality, the format of action is [speed, heading_delta]
action = np.array([speed_global_frame, -heading_ego_frame])
#print("action")
#print(action)
return action
#agents[agent_index].set_state( next_waypoint_x , next_waypoint_y, next_waypoint_vel_x, next_waypoint_vel_y )
#resultant_speed_global_frame = agents[agent_index].speed_global_frame
#resultant_delta_heading_global_frame = agents[agent_index].delta_heading_global_frame
###########################################################POSITION APPROACH##########################################################################
## print("position")
## print(agents[agent_index].pos_global_frame)
## next_waypoint_x = states[:, agent_index, 0][0]
## next_waypoint_y = states[:, agent_index, 1][0]
##
## next_waypoint = np.array( [ next_waypoint_x, next_waypoint_y ] )
## print("next_waypoint")
## print(next_waypoint)
##
##
##
## pos_difference = next_waypoint - agents[agent_index].pos_global_frame
## dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[agent_index].pref_speed * 0.1)
##
## position_x = agents[agent_index].pos_global_frame[0] + dist_next_waypoint[0]
## position_y = agents[agent_index].pos_global_frame[1] + dist_next_waypoint[1]
## agents[agent_index].set_state( position_x , position_y )
##
## resultant_speed_global_frame = agents[agent_index].speed_global_frame
## resultant_delta_heading_global_frame = agents[agent_index].delta_heading_global_frame
#Although documentation and code comment mentioned that action is consisted with [heading delta, speed]
#But in reality, the format of action is [speed, heading_delta]
###########################################################################################################################################
| 44.193939 | 242 | 0.63124 | import os
import math
import sys
import torch
import numpy as np
from gym_collision_avoidance.envs.policies.InternalPolicy import InternalPolicy
from gym_collision_avoidance.envs import Config
from gym_collision_avoidance.envs.util import *
from gym_collision_avoidance.envs.policies import socialforce
import copy
import argparse
from itertools import compress
class SOCIALFORCEPolicy(InternalPolicy):
def __init__(self):
InternalPolicy.__init__(self, str="SOCIALFORCE")
self.dt = Config.DT
self.obs_seq_len = 8
self.is_init = False
def init(self,agents):
self.total_agents_num = [None]*self.n_agents
self.is_init = True
def find_next_action(self, obs, agents, i , full_agent_list = None, active_agent_mask = None):
agent_index = i
before_index = np.array(active_agent_mask)[:agent_index]
agent_index = agent_index - len( before_index[ before_index==False ] )
agents = list(compress(full_agent_list, active_agent_mask))
observation_array = []
if not self.is_init:
self.n_agents = len(agents)
self.init(agents)
for a in range(self.n_agents):
pos_difference = agents[a].goal_global_frame - agents[a].pos_global_frame
dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[a].pref_speed )
vel_next_waypoint = dist_next_waypoint
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], vel_next_waypoint[0], vel_next_waypoint[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
else:
self.init(agents)
for a in range(self.n_agents):
if agents[a].speed_global_frame<= agents[a].pref_speed/3:
pos_difference = agents[a].goal_global_frame - agents[a].pos_global_frame
dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[a].pref_speed )
vel_next_waypoint = dist_next_waypoint
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], vel_next_waypoint[0], vel_next_waypoint[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
else:
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], agents[a].vel_global_frame[0], agents[a].vel_global_frame[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
initial_state = np.array( observation_array )
s=None
s = socialforce.Simulator(initial_state, delta_t=0.1)
states = np.stack([s.step().state.copy() for _ in range(1)])
next_waypoint_x = states[:, agent_index, 0][0]
next_waypoint_y = states[:, agent_index, 1][0]
next_waypoint_vel_x = states[:, agent_index, 2][0]
next_waypoint_vel_y = states[:, agent_index, 3][0]
self.next_waypoint = np.array( [ next_waypoint_x , next_waypoint_y ] )
goal_direction = self.next_waypoint - agents[agent_index].pos_global_frame
self.dist_to_goal = math.sqrt(goal_direction[0]**2 + goal_direction[1]**2)
if self.dist_to_goal > 1e-8:
ref_prll = goal_direction / agents[agent_index].dist_to_goal
else:
ref_prll = goal_direction
ref_orth = np.array([-ref_prll[1], ref_prll[0]])
ref_prll_angle_global_frame = np.arctan2(ref_prll[1],
ref_prll[0])
heading_ego_frame = wrap( agents[agent_index].heading_global_frame -
ref_prll_angle_global_frame)
vel_global_frame = np.array( [ next_waypoint_vel_x , next_waypoint_vel_y ] )
speed_global_frame = np.linalg.norm(vel_global_frame)
if speed_global_frame > agents[agent_index].pref_speed: speed_global_frame = agents[agent_index].pref_speed
action = np.array([speed_global_frame, -heading_ego_frame])
return action
| true | true |
7900838e788ae6b4d57aa7073fd1b705cd756d64 | 3,743 | py | Python | deeptutor/scripts/run.py | ManavR123/cs_285_project | 2a0496345c2a8de06338ae7e44ca3775a9291f4c | [
"MIT"
] | null | null | null | deeptutor/scripts/run.py | ManavR123/cs_285_project | 2a0496345c2a8de06338ae7e44ca3775a9291f4c | [
"MIT"
] | null | null | null | deeptutor/scripts/run.py | ManavR123/cs_285_project | 2a0496345c2a8de06338ae7e44ca3775a9291f4c | [
"MIT"
] | null | null | null | import os
import pickle
import numpy as np
from tqdm import tqdm
from deeptutor.envs.DashEnv import *
from deeptutor.envs.EFCEnv import EFCEnv
from deeptutor.envs.HRLEnv import *
from deeptutor.infrastructure.utils import *
from deeptutor.tutors.LeitnerTutor import LeitnerTutor
from deeptutor.tutors.RandTutor import RandTutor
from deeptutor.tutors.PPOTutor import PPOTutor
from deeptutor.tutors.SACTutor import SACTutor
from deeptutor.tutors.DQNTutor import DQNTutor
from deeptutor.tutors.MLPTRPOTutor import MLPTRPOTutor
from deeptutor.tutors.GRUTRPOTutor import GRUTRPOTutor
from deeptutor.tutors.SuperMnemoTutor import SuperMnemoTutor
from deeptutor.tutors.ThresholdTutor import ThresholdTutor
def load_rewards(tutor_name, data_dir):
filename = os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl")
if not os.path.exists(filename):
return {}
with open(filename, "rb") as f:
return pickle.load(f)["rewards"]
def main():
override = True # override existing data
data_dir = os.path.join(os.getcwd(), "data")
n_steps = 200
n_items = 30
const_delay = 5
discount = 0.99
n_reps = 10
n_eps = 100
env_kwargs = {
"n_items": n_items,
"n_steps": n_steps,
"discount": discount,
"sample_delay": sample_const_delay(const_delay),
}
reward_funcs = [
"likelihood",
"log_likelihood"
]
envs = [
("EFC", EFCEnv),
("HLR", HLREnv),
("DASH", DASHEnv)
]
tutor_builders = [
# ("Random", RandTutor),
# ("Leitner", LeitnerTutor),
# ("SuperMnemo", SuperMnemoTutor),
# ("Threshold", ThresholdTutor),
# ("MLPTRPO", MLPTRPOTutor),
# ("GRUTRPO", GRUTRPOTutor),
# ("PPO", PPOTutor),
("DQN", DQNTutor),
]
rl_tutors = [MLPTRPOTutor, GRUTRPOTutor, PPOTutor, DQNTutor]
reward_logs = {
"n_steps": n_steps,
"n_items": n_items,
"discount": discount,
"const_delay": const_delay,
"n_reps": n_reps,
"n_eps": n_eps,
"reward_funcs": reward_funcs,
}
for i, (tutor_name, build_tutor) in enumerate(tutor_builders):
print(f"Training {tutor_name}")
rewards = load_rewards(tutor_name, data_dir)
for h, (base_env_name, base_env) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
env_name = (
base_env_name + "-" + ("L" if reward_func == "likelihood" else "LL")
)
print(f"Environment: {env_name}")
if env_name in rewards.keys() and not override:
print("Skipping\n")
continue
R = np.zeros((n_eps, n_reps))
for j in tqdm(range(n_reps)):
np.random.seed(j)
env = base_env(**env_kwargs, reward_func=reward_func)
if build_tutor in rl_tutors:
rl_env = make_rl_student_env(env)
agent = build_tutor(n_items)
R[:, j] = agent.train(rl_env, n_eps=n_eps, seed=j)
else:
if "Thresh" in tutor_name:
agent = build_tutor(n_items, env=env)
else:
agent = build_tutor(n_items)
R[:, j] = agent.train(env, n_eps=n_eps)
rewards[env_name] = R
reward_logs["rewards"] = rewards
with open(os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl"), "wb") as f:
pickle.dump(reward_logs, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| 33.419643 | 94 | 0.583222 | import os
import pickle
import numpy as np
from tqdm import tqdm
from deeptutor.envs.DashEnv import *
from deeptutor.envs.EFCEnv import EFCEnv
from deeptutor.envs.HRLEnv import *
from deeptutor.infrastructure.utils import *
from deeptutor.tutors.LeitnerTutor import LeitnerTutor
from deeptutor.tutors.RandTutor import RandTutor
from deeptutor.tutors.PPOTutor import PPOTutor
from deeptutor.tutors.SACTutor import SACTutor
from deeptutor.tutors.DQNTutor import DQNTutor
from deeptutor.tutors.MLPTRPOTutor import MLPTRPOTutor
from deeptutor.tutors.GRUTRPOTutor import GRUTRPOTutor
from deeptutor.tutors.SuperMnemoTutor import SuperMnemoTutor
from deeptutor.tutors.ThresholdTutor import ThresholdTutor
def load_rewards(tutor_name, data_dir):
filename = os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl")
if not os.path.exists(filename):
return {}
with open(filename, "rb") as f:
return pickle.load(f)["rewards"]
def main():
override = True
data_dir = os.path.join(os.getcwd(), "data")
n_steps = 200
n_items = 30
const_delay = 5
discount = 0.99
n_reps = 10
n_eps = 100
env_kwargs = {
"n_items": n_items,
"n_steps": n_steps,
"discount": discount,
"sample_delay": sample_const_delay(const_delay),
}
reward_funcs = [
"likelihood",
"log_likelihood"
]
envs = [
("EFC", EFCEnv),
("HLR", HLREnv),
("DASH", DASHEnv)
]
tutor_builders = [
("DQN", DQNTutor),
]
rl_tutors = [MLPTRPOTutor, GRUTRPOTutor, PPOTutor, DQNTutor]
reward_logs = {
"n_steps": n_steps,
"n_items": n_items,
"discount": discount,
"const_delay": const_delay,
"n_reps": n_reps,
"n_eps": n_eps,
"reward_funcs": reward_funcs,
}
for i, (tutor_name, build_tutor) in enumerate(tutor_builders):
print(f"Training {tutor_name}")
rewards = load_rewards(tutor_name, data_dir)
for h, (base_env_name, base_env) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
env_name = (
base_env_name + "-" + ("L" if reward_func == "likelihood" else "LL")
)
print(f"Environment: {env_name}")
if env_name in rewards.keys() and not override:
print("Skipping\n")
continue
R = np.zeros((n_eps, n_reps))
for j in tqdm(range(n_reps)):
np.random.seed(j)
env = base_env(**env_kwargs, reward_func=reward_func)
if build_tutor in rl_tutors:
rl_env = make_rl_student_env(env)
agent = build_tutor(n_items)
R[:, j] = agent.train(rl_env, n_eps=n_eps, seed=j)
else:
if "Thresh" in tutor_name:
agent = build_tutor(n_items, env=env)
else:
agent = build_tutor(n_items)
R[:, j] = agent.train(env, n_eps=n_eps)
rewards[env_name] = R
reward_logs["rewards"] = rewards
with open(os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl"), "wb") as f:
pickle.dump(reward_logs, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| true | true |
790083cffbbe481ab01c97be8c70d2138b48d2bd | 765 | py | Python | UFV---Python/Trabalho Mat. Disc/grafo_4675.py | Vith-MCB/UFV | 9d96fecdc9ffde2563f9f397bcdb39d95aaf7e69 | [
"MIT"
] | 1 | 2022-01-25T16:52:26.000Z | 2022-01-25T16:52:26.000Z | UFV---Python/Trabalho Mat. Disc/grafo_4675.py | Vith-MCB/UFV | 9d96fecdc9ffde2563f9f397bcdb39d95aaf7e69 | [
"MIT"
] | null | null | null | UFV---Python/Trabalho Mat. Disc/grafo_4675.py | Vith-MCB/UFV | 9d96fecdc9ffde2563f9f397bcdb39d95aaf7e69 | [
"MIT"
] | null | null | null | n = int(input())
c = int(input())
lista = input().split()
graph = [[0 for i in range(n)] for j in range(n)]
cont = 0
for i in range(n):
for j in range(n):
graph[i][j] = int(lista[cont])
cont += 1
if i == j:
graph[i][j] = 0
listaMemoria = [c]
contaminados = []
contaminados.append(c)
k = 1
while True:
veLinha = listaMemoria[-1]
check = 0
for i in range(n):
if graph[veLinha][i] == 1:
graph[veLinha][i] = 0
graph[i][veLinha] = 0
listaMemoria.append(i)
contaminados.append(i)
check = 1
k += 1
break
if check == 0:
if listaMemoria[-1] == c:
break
else:
listaMemoria.pop()
print(k) | 23.181818 | 49 | 0.48366 | n = int(input())
c = int(input())
lista = input().split()
graph = [[0 for i in range(n)] for j in range(n)]
cont = 0
for i in range(n):
for j in range(n):
graph[i][j] = int(lista[cont])
cont += 1
if i == j:
graph[i][j] = 0
listaMemoria = [c]
contaminados = []
contaminados.append(c)
k = 1
while True:
veLinha = listaMemoria[-1]
check = 0
for i in range(n):
if graph[veLinha][i] == 1:
graph[veLinha][i] = 0
graph[i][veLinha] = 0
listaMemoria.append(i)
contaminados.append(i)
check = 1
k += 1
break
if check == 0:
if listaMemoria[-1] == c:
break
else:
listaMemoria.pop()
print(k) | true | true |
7900842d1de4c679c0408de823a276a7a34c2f18 | 514 | py | Python | api/migrations/0003_alter_profile_year.py | EricLiclair/testapi | f8a139a0fe4e3ada0256ef62a4603c3e9f682b3e | [
"MIT"
] | 1 | 2021-05-19T18:06:14.000Z | 2021-05-19T18:06:14.000Z | api/migrations/0003_alter_profile_year.py | EricLiclair/testapi | f8a139a0fe4e3ada0256ef62a4603c3e9f682b3e | [
"MIT"
] | null | null | null | api/migrations/0003_alter_profile_year.py | EricLiclair/testapi | f8a139a0fe4e3ada0256ef62a4603c3e9f682b3e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-19 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20210519_0849'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='year',
field=models.CharField(blank=True, choices=[('FR', 'Freshman'), ('SO', 'Sophomore'), ('JR', 'Junior'), ('SR', 'Senior')], default='FR', max_length=2, verbose_name='year'),
),
]
| 27.052632 | 183 | 0.589494 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20210519_0849'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='year',
field=models.CharField(blank=True, choices=[('FR', 'Freshman'), ('SO', 'Sophomore'), ('JR', 'Junior'), ('SR', 'Senior')], default='FR', max_length=2, verbose_name='year'),
),
]
| true | true |
790084f9ace02acaaccaabb3d8aea61f0232d7d6 | 6,536 | py | Python | WikidataClaims/wikidata_utils.py | gabrielmaia7/WDV | 13810bd80e2c64956018b5ae508f6eb582deaf3c | [
"CC0-1.0"
] | null | null | null | WikidataClaims/wikidata_utils.py | gabrielmaia7/WDV | 13810bd80e2c64956018b5ae508f6eb582deaf3c | [
"CC0-1.0"
] | null | null | null | WikidataClaims/wikidata_utils.py | gabrielmaia7/WDV | 13810bd80e2c64956018b5ae508f6eb582deaf3c | [
"CC0-1.0"
] | null | null | null | import json
import random
import uuid
import numpy as np
import time
import requests
import traceback
import pdb
import math
import ast
import pandas as pd
import pickle
from qwikidata.linked_data_interface import get_entity_dict_from_api
from qwikidata.sparql import return_sparql_query_results
from urllib3.exceptions import MaxRetryError, ConnectionError
from qwikidata.linked_data_interface import LdiResponseNotOk
import hashlib
class CachedWikidataAPI():
def __init__(self, cache_path = 'entity_cache.p', save_every_x_queries=1):
self.save_every_x_queries = save_every_x_queries
self.x_queries_passed = 0
self.languages = ['en','fr','es','pt','pt-br','it','de']
self.cache_path = cache_path
try:
with open(self.cache_path,'rb') as f:
self.entity_cache = pickle.load(f)
except FileNotFoundError:
self.entity_cache = {}
def get_unique_id_from_str(self, my_str):
return hashlib.md5(str.encode(my_str)).hexdigest()
def save_entity_cache(self, force=False):
if force:
self.x_queries_passed = self.save_every_x_queries
self.x_queries_passed = self.x_queries_passed+1
if self.x_queries_passed >= self.save_every_x_queries:
with open(self.cache_path,'wb') as f:
pickle.dump(self.entity_cache,f)
self.x_queries_passed = 0
def get_entity(self, item_id):
if item_id in self.entity_cache:
return self.entity_cache[item_id]
while True:
try:
entity = get_entity_dict_from_api(item_id)
self.entity_cache[item_id] = entity
self.save_entity_cache()
return entity
except (ConnectionError, MaxRetryError) as e:
#traceback.print_exc()
time.sleep(1)
continue
except LdiResponseNotOk:
#traceback.print_exc()
self.entity_cache[item_id] = 'deleted'
self.save_entity_cache()
return 'deleted'
def get_label(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return (entity, 'none')
labels = entity['labels' if 'labels' in entity else 'lemmas']
elif type(item) == dict:
if 'labels' in item:
labels = item['labels']
elif 'lemmas' in item:
labels = item['lemmas']
for l in self.languages:
if l in labels:
return (labels[l]['value'], l)
if non_language_set:
all_labels = list(labels.keys())
if len(all_labels)>0:
return (labels[all_labels[0]]['value'], all_labels[0])
return ('no-label', 'none')
def get_desc(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return (entity, 'none')
descriptions = entity['descriptions']
elif type(item) == dict:
if 'descriptions' in item:
descriptions = item['descriptions']
for l in self.languages:
if l in descriptions:
return (descriptions[l]['value'], l)
if non_language_set:
all_descriptions = list(descriptions.keys())
if len(all_descriptions)>0:
return (descriptions[all_descriptions[0]]['value'], all_descriptions[0])
return ('no-desc', 'none')
def get_alias(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return ([entity], 'none')
aliases = entity['aliases']
elif type(item) == dict:
if 'aliases' in item:
aliases = item['aliases']
for l in self.languages:
if l in aliases:
return ([alias['value'] for alias in aliases[l]], l)
if non_language_set:
all_aliases = list(aliases.keys())
if len(all_aliases)>0:
return (aliases[all_aliases[0]]['value'], all_aliases[0])
return ([alias['value'] for alias in aliases[all_aliases[0]]], all_aliases[0])
return ('no-alias', 'none')
def get_datatype(self, item):
try:
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return entity
datatype = entity['datatype']
elif type(item) == dict:
datatype = item['datatype']
return datatype
except KeyError:
return 'none'
def get_claim_values_of(self, item, property_id):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return entity
claims = entity['claims']
elif type(item) == dict:
claims = item['claims']
if property_id in claims:
instance_of_claims = claims[property_id]
return [i['mainsnak']['datavalue']['value']['id'] for i in instance_of_claims]
else:
return []
def query_sparql_endpoint(self, sparql_query):
sparql_query_id = self.get_unique_id_from_str(sparql_query)
if sparql_query_id in self.entity_cache:
return self.entity_cache[sparql_query_id]
else:
wikidata_sparql_url = 'https://query.wikidata.org/sparql'
try:
while True:
res = requests.get(wikidata_sparql_url, params={"query": sparql_query, "format": "json"})
if res.status_code in (429,504):
time.sleep(1)
continue
elif res.status_code == 200:
res = res.json()
self.entity_cache[sparql_query_id] = res
self.save_entity_cache()
return res
else:
print(res.status_code)
raise Exception
except json.JSONDecodeError as e:
#pdb.set_trace()
print(res, res.__dict__)
raise e
| 37.780347 | 109 | 0.54896 | import json
import random
import uuid
import numpy as np
import time
import requests
import traceback
import pdb
import math
import ast
import pandas as pd
import pickle
from qwikidata.linked_data_interface import get_entity_dict_from_api
from qwikidata.sparql import return_sparql_query_results
from urllib3.exceptions import MaxRetryError, ConnectionError
from qwikidata.linked_data_interface import LdiResponseNotOk
import hashlib
class CachedWikidataAPI():
def __init__(self, cache_path = 'entity_cache.p', save_every_x_queries=1):
self.save_every_x_queries = save_every_x_queries
self.x_queries_passed = 0
self.languages = ['en','fr','es','pt','pt-br','it','de']
self.cache_path = cache_path
try:
with open(self.cache_path,'rb') as f:
self.entity_cache = pickle.load(f)
except FileNotFoundError:
self.entity_cache = {}
def get_unique_id_from_str(self, my_str):
return hashlib.md5(str.encode(my_str)).hexdigest()
def save_entity_cache(self, force=False):
if force:
self.x_queries_passed = self.save_every_x_queries
self.x_queries_passed = self.x_queries_passed+1
if self.x_queries_passed >= self.save_every_x_queries:
with open(self.cache_path,'wb') as f:
pickle.dump(self.entity_cache,f)
self.x_queries_passed = 0
def get_entity(self, item_id):
if item_id in self.entity_cache:
return self.entity_cache[item_id]
while True:
try:
entity = get_entity_dict_from_api(item_id)
self.entity_cache[item_id] = entity
self.save_entity_cache()
return entity
except (ConnectionError, MaxRetryError) as e:
time.sleep(1)
continue
except LdiResponseNotOk:
self.entity_cache[item_id] = 'deleted'
self.save_entity_cache()
return 'deleted'
def get_label(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return (entity, 'none')
labels = entity['labels' if 'labels' in entity else 'lemmas']
elif type(item) == dict:
if 'labels' in item:
labels = item['labels']
elif 'lemmas' in item:
labels = item['lemmas']
for l in self.languages:
if l in labels:
return (labels[l]['value'], l)
if non_language_set:
all_labels = list(labels.keys())
if len(all_labels)>0:
return (labels[all_labels[0]]['value'], all_labels[0])
return ('no-label', 'none')
def get_desc(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return (entity, 'none')
descriptions = entity['descriptions']
elif type(item) == dict:
if 'descriptions' in item:
descriptions = item['descriptions']
for l in self.languages:
if l in descriptions:
return (descriptions[l]['value'], l)
if non_language_set:
all_descriptions = list(descriptions.keys())
if len(all_descriptions)>0:
return (descriptions[all_descriptions[0]]['value'], all_descriptions[0])
return ('no-desc', 'none')
def get_alias(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return ([entity], 'none')
aliases = entity['aliases']
elif type(item) == dict:
if 'aliases' in item:
aliases = item['aliases']
for l in self.languages:
if l in aliases:
return ([alias['value'] for alias in aliases[l]], l)
if non_language_set:
all_aliases = list(aliases.keys())
if len(all_aliases)>0:
return (aliases[all_aliases[0]]['value'], all_aliases[0])
return ([alias['value'] for alias in aliases[all_aliases[0]]], all_aliases[0])
return ('no-alias', 'none')
def get_datatype(self, item):
try:
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return entity
datatype = entity['datatype']
elif type(item) == dict:
datatype = item['datatype']
return datatype
except KeyError:
return 'none'
def get_claim_values_of(self, item, property_id):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return entity
claims = entity['claims']
elif type(item) == dict:
claims = item['claims']
if property_id in claims:
instance_of_claims = claims[property_id]
return [i['mainsnak']['datavalue']['value']['id'] for i in instance_of_claims]
else:
return []
def query_sparql_endpoint(self, sparql_query):
sparql_query_id = self.get_unique_id_from_str(sparql_query)
if sparql_query_id in self.entity_cache:
return self.entity_cache[sparql_query_id]
else:
wikidata_sparql_url = 'https://query.wikidata.org/sparql'
try:
while True:
res = requests.get(wikidata_sparql_url, params={"query": sparql_query, "format": "json"})
if res.status_code in (429,504):
time.sleep(1)
continue
elif res.status_code == 200:
res = res.json()
self.entity_cache[sparql_query_id] = res
self.save_entity_cache()
return res
else:
print(res.status_code)
raise Exception
except json.JSONDecodeError as e:
print(res, res.__dict__)
raise e
| true | true |
790085365ebd23f819887fcda5629c8a482ab07c | 428 | py | Python | python_learn/cli.py | Zhazhanan/python_learn | 8eb797ae4f85abbb31a9dadd570172538b05d68f | [
"MIT"
] | null | null | null | python_learn/cli.py | Zhazhanan/python_learn | 8eb797ae4f85abbb31a9dadd570172538b05d68f | [
"MIT"
] | null | null | null | python_learn/cli.py | Zhazhanan/python_learn | 8eb797ae4f85abbb31a9dadd570172538b05d68f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for python_learn."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for python_learn."""
click.echo("Replace this message by putting your code into "
"python_learn.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 22.526316 | 68 | 0.649533 |
import sys
import click
@click.command()
def main(args=None):
click.echo("Replace this message by putting your code into "
"python_learn.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
7900859c019d2d11547ae17267d55ba82a881c23 | 1,251 | py | Python | froide/georegion/admin.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | froide/georegion/admin.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | froide/georegion/admin.py | lanmarc77/froide | bddc8bb27c8a7c2a959003dda724194948bc381a | [
"MIT"
] | null | null | null | from django.contrib.gis import admin
from django import forms
from django.utils.translation import gettext_lazy as _
from treebeard.forms import movenodeform_factory
from froide.helper.admin_utils import ForeignKeyFilter
from froide.helper.forms import get_fk_raw_id_widget
from .models import GeoRegion
class GeoRegionAdminForm(movenodeform_factory(GeoRegion)):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
widget = get_fk_raw_id_widget(GeoRegion, admin.site, field_name='id')
self.fields['_ref_node_id'] = forms.IntegerField(
required=False, label=_("Relative to"),
widget=widget
)
@classmethod
def mk_dropdown_tree(cls, model, for_node=None):
return []
class GeoRegionMixin(object):
form = GeoRegionAdminForm
search_fields = ['name', 'region_identifier']
list_display = ('name', 'kind', 'kind_detail', 'region_identifier')
list_filter = (
'kind', 'kind_detail',
('part_of', ForeignKeyFilter),
)
raw_id_fields = ('part_of',)
readonly_fields = ('depth', 'numchild', 'path')
class GeoRegionAdmin(GeoRegionMixin, admin.GeoModelAdmin):
pass
admin.site.register(GeoRegion, GeoRegionAdmin)
| 26.617021 | 77 | 0.704237 | from django.contrib.gis import admin
from django import forms
from django.utils.translation import gettext_lazy as _
from treebeard.forms import movenodeform_factory
from froide.helper.admin_utils import ForeignKeyFilter
from froide.helper.forms import get_fk_raw_id_widget
from .models import GeoRegion
class GeoRegionAdminForm(movenodeform_factory(GeoRegion)):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
widget = get_fk_raw_id_widget(GeoRegion, admin.site, field_name='id')
self.fields['_ref_node_id'] = forms.IntegerField(
required=False, label=_("Relative to"),
widget=widget
)
@classmethod
def mk_dropdown_tree(cls, model, for_node=None):
return []
class GeoRegionMixin(object):
form = GeoRegionAdminForm
search_fields = ['name', 'region_identifier']
list_display = ('name', 'kind', 'kind_detail', 'region_identifier')
list_filter = (
'kind', 'kind_detail',
('part_of', ForeignKeyFilter),
)
raw_id_fields = ('part_of',)
readonly_fields = ('depth', 'numchild', 'path')
class GeoRegionAdmin(GeoRegionMixin, admin.GeoModelAdmin):
pass
admin.site.register(GeoRegion, GeoRegionAdmin)
| true | true |
790086155fac3be5e7a46c4cd0fb18fbd1b1b996 | 3,702 | py | Python | yardstick/dispatcher/http.py | kkltcjk/kklt | 5388eb439616a442dde496ef77ba6b71169369e0 | [
"Apache-2.0"
] | null | null | null | yardstick/dispatcher/http.py | kkltcjk/kklt | 5388eb439616a442dde496ef77ba6b71169369e0 | [
"Apache-2.0"
] | null | null | null | yardstick/dispatcher/http.py | kkltcjk/kklt | 5388eb439616a442dde496ef77ba6b71169369e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# yardstick comment: this is a modified copy of
# ceilometer/ceilometer/dispatcher/http.py
from __future__ import absolute_import
import logging
import os
from oslo_serialization import jsonutils
import requests
from oslo_config import cfg
from yardstick.dispatcher.base import Base as DispatchBase
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
http_dispatcher_opts = [
cfg.StrOpt('target',
default='http://127.0.0.1:8000/results',
help='The target where the http request will be sent. '
'If this is not set, no data will be posted. For '
'example: target = http://hostname:1234/path'),
cfg.IntOpt('timeout',
default=5,
help='The max time in seconds to wait for a request to '
'timeout.'),
]
CONF.register_opts(http_dispatcher_opts, group="dispatcher_http")
class HttpDispatcher(DispatchBase):
"""Dispatcher class for posting data into a http target.
"""
__dispatcher_type__ = "Http"
def __init__(self, conf):
super(HttpDispatcher, self).__init__(conf)
self.headers = {'Content-type': 'application/json'}
self.timeout = CONF.dispatcher_http.timeout
self.target = CONF.dispatcher_http.target
self.raw_result = []
self.result = {
"project_name": "yardstick",
"description": "yardstick test cases result",
"pod_name": os.environ.get('NODE_NAME', 'unknown'),
"installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
"version": os.environ.get('YARDSTICK_VERSION', 'unknown')
}
def record_result_data(self, data):
self.raw_result.append(data)
def flush_result_data(self):
if self.target == '':
# if the target was not set, do not do anything
LOG.error('Dispatcher target was not set, no data will'
'be posted.')
return
self.result["details"] = self.raw_result
case_name = ""
for v in self.raw_result:
if isinstance(v, dict) and "scenario_cfg" in v:
case_name = v["scenario_cfg"]["tc"]
break
if case_name == "":
LOG.error('Test result : %s',
jsonutils.dump_as_bytes(self.result))
LOG.error('The case_name cannot be found, no data will be posted.')
return
self.result["case_name"] = case_name
try:
LOG.debug('Test result : %s',
jsonutils.dump_as_bytes(self.result))
res = requests.post(self.target,
data=jsonutils.dump_as_bytes(self.result),
headers=self.headers,
timeout=self.timeout)
LOG.debug('Test result posting finished with status code'
' %d.' % res.status_code)
except Exception as err:
LOG.exception('Failed to record result data: %s',
err)
| 35.257143 | 79 | 0.603998 |
from __future__ import absolute_import
import logging
import os
from oslo_serialization import jsonutils
import requests
from oslo_config import cfg
from yardstick.dispatcher.base import Base as DispatchBase
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
http_dispatcher_opts = [
cfg.StrOpt('target',
default='http://127.0.0.1:8000/results',
help='The target where the http request will be sent. '
'If this is not set, no data will be posted. For '
'example: target = http://hostname:1234/path'),
cfg.IntOpt('timeout',
default=5,
help='The max time in seconds to wait for a request to '
'timeout.'),
]
CONF.register_opts(http_dispatcher_opts, group="dispatcher_http")
class HttpDispatcher(DispatchBase):
__dispatcher_type__ = "Http"
def __init__(self, conf):
super(HttpDispatcher, self).__init__(conf)
self.headers = {'Content-type': 'application/json'}
self.timeout = CONF.dispatcher_http.timeout
self.target = CONF.dispatcher_http.target
self.raw_result = []
self.result = {
"project_name": "yardstick",
"description": "yardstick test cases result",
"pod_name": os.environ.get('NODE_NAME', 'unknown'),
"installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
"version": os.environ.get('YARDSTICK_VERSION', 'unknown')
}
def record_result_data(self, data):
self.raw_result.append(data)
def flush_result_data(self):
if self.target == '':
LOG.error('Dispatcher target was not set, no data will'
'be posted.')
return
self.result["details"] = self.raw_result
case_name = ""
for v in self.raw_result:
if isinstance(v, dict) and "scenario_cfg" in v:
case_name = v["scenario_cfg"]["tc"]
break
if case_name == "":
LOG.error('Test result : %s',
jsonutils.dump_as_bytes(self.result))
LOG.error('The case_name cannot be found, no data will be posted.')
return
self.result["case_name"] = case_name
try:
LOG.debug('Test result : %s',
jsonutils.dump_as_bytes(self.result))
res = requests.post(self.target,
data=jsonutils.dump_as_bytes(self.result),
headers=self.headers,
timeout=self.timeout)
LOG.debug('Test result posting finished with status code'
' %d.' % res.status_code)
except Exception as err:
LOG.exception('Failed to record result data: %s',
err)
| true | true |
7900865b0a88c5c3cbe2f710190bbfd3e535d0ca | 1,159 | py | Python | test.py | linkinpark213/pytorch-lstm-toy | a89eba74a3606dab125d394e63e4a585319227f1 | [
"MIT"
] | 1 | 2019-03-04T12:52:48.000Z | 2019-03-04T12:52:48.000Z | test.py | linkinpark213/pytorch-lstm-toy | a89eba74a3606dab125d394e63e4a585319227f1 | [
"MIT"
] | null | null | null | test.py | linkinpark213/pytorch-lstm-toy | a89eba74a3606dab125d394e63e4a585319227f1 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import torch.utils.data
from net import SurnameLSTM
from data import SurnameDataset
if __name__ == '__main__':
net = SurnameLSTM()
state_dict = torch.load('model.pth')
net.load_state_dict(state_dict)
dataset = SurnameDataset(subset='val')
data_loader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True)
sample = iter(data_loader).__next__()
pred = np.argmax(net(sample['values']).detach().numpy(), axis=1)
gt = np.array(sample['raw_label'])
accuracy = np.average(np.where(pred == gt, 1, 0))
print('Accuracy on the validation data: {:.1f} %'.format(accuracy * 100))
print('Please enter a surname to val:')
input_name = input()
name = input_name.lower()
name_ascii = np.array([ord(c) for c in name])
name_ascii = np.pad(name_ascii, ((0, 12 - name_ascii.__len__())), mode='constant', constant_values=0).astype(
np.float32)
name_ascii = torch.tensor([name_ascii])
pred = np.argmax(net(name_ascii).detach().numpy(), axis=1)
print('Mr / Ms. {}, I guess you are {}!'.format(input_name, ['English', 'Chinese', 'Japanese'][pred[0]]))
| 36.21875 | 113 | 0.670406 | import torch
import numpy as np
import torch.utils.data
from net import SurnameLSTM
from data import SurnameDataset
if __name__ == '__main__':
net = SurnameLSTM()
state_dict = torch.load('model.pth')
net.load_state_dict(state_dict)
dataset = SurnameDataset(subset='val')
data_loader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True)
sample = iter(data_loader).__next__()
pred = np.argmax(net(sample['values']).detach().numpy(), axis=1)
gt = np.array(sample['raw_label'])
accuracy = np.average(np.where(pred == gt, 1, 0))
print('Accuracy on the validation data: {:.1f} %'.format(accuracy * 100))
print('Please enter a surname to val:')
input_name = input()
name = input_name.lower()
name_ascii = np.array([ord(c) for c in name])
name_ascii = np.pad(name_ascii, ((0, 12 - name_ascii.__len__())), mode='constant', constant_values=0).astype(
np.float32)
name_ascii = torch.tensor([name_ascii])
pred = np.argmax(net(name_ascii).detach().numpy(), axis=1)
print('Mr / Ms. {}, I guess you are {}!'.format(input_name, ['English', 'Chinese', 'Japanese'][pred[0]]))
| true | true |
79008666505b0b62a5773e50f987def871a3d604 | 33 | py | Python | snowflake/__init__.py | ronmorgen1/snowflake | f038179c7188021757085938a0de5aadf6dc5e5b | [
"MIT"
] | null | null | null | snowflake/__init__.py | ronmorgen1/snowflake | f038179c7188021757085938a0de5aadf6dc5e5b | [
"MIT"
] | null | null | null | snowflake/__init__.py | ronmorgen1/snowflake | f038179c7188021757085938a0de5aadf6dc5e5b | [
"MIT"
] | null | null | null | from .snowflake import Snowflake
| 16.5 | 32 | 0.848485 | from .snowflake import Snowflake
| true | true |
79008843893bdfe70a74c1a6220f5194a815a42a | 7,876 | py | Python | djangocms_content_expiry/filters.py | Aiky30/djangocms-content-expiry | da7d348bcdafbf1a9862e4cc69a8363b3305a31a | [
"BSD-3-Clause"
] | null | null | null | djangocms_content_expiry/filters.py | Aiky30/djangocms-content-expiry | da7d348bcdafbf1a9862e4cc69a8363b3305a31a | [
"BSD-3-Clause"
] | null | null | null | djangocms_content_expiry/filters.py | Aiky30/djangocms-content-expiry | da7d348bcdafbf1a9862e4cc69a8363b3305a31a | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from djangocms_versioning.constants import PUBLISHED, VERSION_STATES
from djangocms_versioning.versionables import _cms_extension
from polymorphic.utils import get_base_polymorphic_model
from rangefilter.filters import DateRangeFilter
from .helpers import get_rangefilter_expires_default
class SimpleListMultiselectFilter(admin.SimpleListFilter):
def value_as_list(self):
return self.value().split(',') if self.value() else []
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
class ContentTypeFilter(SimpleListMultiselectFilter):
title = _("Content Type")
parameter_name = "content_type"
template = 'djangocms_content_expiry/multiselect_filter.html'
def lookups(self, request, model_admin):
lookup_list = []
for content_model in _cms_extension().versionables_by_content:
# Only add references to the inherited concrete model i.e. not referenced polymorphic models
if hasattr(content_model, "polymorphic_ctype"):
content_model = get_base_polymorphic_model(content_model)
# Create an entry
content_type = ContentType.objects.get_for_model(content_model)
lookup_list_entry = (content_type.pk, content_type)
# Only add unique entries
if lookup_list_entry not in lookup_list:
lookup_list.append(lookup_list_entry)
return lookup_list
def queryset(self, request, queryset):
content_types = self.value()
if not content_types:
return queryset
return queryset.filter(version__content_type__in=content_types.split(','))
def choices(self, changelist):
yield {
'selected': self.value() is None,
'query_string': changelist.get_query_string(remove=[self.parameter_name]),
'display': 'All',
'initial': True,
}
for lookup, title in self.lookup_choices:
yield {
'selected': str(lookup) in self.value_as_list(),
'query_string': changelist.get_query_string({self.parameter_name: lookup}),
'include_query_string': self._update_query(changelist, include=str(lookup)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup)),
'display': title,
}
class VersionStateFilter(SimpleListMultiselectFilter):
title = _("Version State")
parameter_name = "state"
default_filter_value = PUBLISHED
show_all_param_value = "_all_"
template = 'djangocms_content_expiry/multiselect_filter.html'
def _is_default(self, filter_value):
if self.default_filter_value == filter_value and self.value() is None:
return True
return False
def _get_all_query_string(self, changelist):
"""
If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.
"""
# Default setting in use
if self.default_filter_value:
return changelist.get_query_string(
{self.parameter_name: self.show_all_param_value}
)
# Default setting not in use
return changelist.get_query_string(remove=[self.parameter_name])
def _is_all_selected(self):
state = self.value()
# Default setting in use
if self.default_filter_value and state == self.show_all_param_value:
return True
# Default setting not in use
elif not self.default_filter_value and not state:
return True
return False
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if self.show_all_param_value in selected_list:
selected_list.remove(self.show_all_param_value)
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
def lookups(self, request, model_admin):
return VERSION_STATES
def queryset(self, request, queryset):
state = self.value()
# Default setting in use
if self.default_filter_value:
if not state:
return queryset.filter(version__state=self.default_filter_value)
elif state != "_all_":
return queryset.filter(version__state__in=state.split(','))
# Default setting not in use
elif not self.default_filter_value and state:
return queryset.filter(version__state__in=state.split(','))
return queryset
def choices(self, changelist):
yield {
"selected": self._is_all_selected(),
"query_string": self._get_all_query_string(changelist),
"display": _("All"),
'initial': True,
}
for lookup, title in self.lookup_choices:
lookup_value = str(lookup)
yield {
"selected": str(lookup) in self.value_as_list() or self._is_default(lookup_value),
"query_string": changelist.get_query_string(
{self.parameter_name: lookup}
),
'include_query_string': self._update_query(changelist, include=str(lookup_value)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup_value)),
"display": title,
}
class AuthorFilter(admin.SimpleListFilter):
"""
An author filter limited to those users who have added expiration dates
"""
title = _("Version Author")
parameter_name = "created_by"
def lookups(self, request, model_admin):
from django.utils.encoding import force_text
User = get_user_model()
options = []
qs = model_admin.get_queryset(request)
authors = qs.values_list('version__created_by', flat=True).distinct()
users = User.objects.filter(pk__in=authors)
for user in users:
options.append(
(force_text(user.pk), user.get_full_name() or user.get_username())
)
return options
def queryset(self, request, queryset):
if self.value():
return queryset.filter(created_by=self.value()).distinct()
return queryset
class ContentExpiryDateRangeFilter(DateRangeFilter):
def queryset(self, request, queryset):
queryset = super().queryset(request, queryset)
# By default the widget should default to show a default duration and not all content
# expiry records
if not any('expires__range' in seed for seed in request.GET):
default_gte, default_lte = get_rangefilter_expires_default()
queryset = queryset.filter(expires__range=(default_gte, default_lte))
return queryset
| 39.38 | 104 | 0.660995 | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from djangocms_versioning.constants import PUBLISHED, VERSION_STATES
from djangocms_versioning.versionables import _cms_extension
from polymorphic.utils import get_base_polymorphic_model
from rangefilter.filters import DateRangeFilter
from .helpers import get_rangefilter_expires_default
class SimpleListMultiselectFilter(admin.SimpleListFilter):
def value_as_list(self):
return self.value().split(',') if self.value() else []
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
class ContentTypeFilter(SimpleListMultiselectFilter):
title = _("Content Type")
parameter_name = "content_type"
template = 'djangocms_content_expiry/multiselect_filter.html'
def lookups(self, request, model_admin):
lookup_list = []
for content_model in _cms_extension().versionables_by_content:
if hasattr(content_model, "polymorphic_ctype"):
content_model = get_base_polymorphic_model(content_model)
content_type = ContentType.objects.get_for_model(content_model)
lookup_list_entry = (content_type.pk, content_type)
if lookup_list_entry not in lookup_list:
lookup_list.append(lookup_list_entry)
return lookup_list
def queryset(self, request, queryset):
content_types = self.value()
if not content_types:
return queryset
return queryset.filter(version__content_type__in=content_types.split(','))
def choices(self, changelist):
yield {
'selected': self.value() is None,
'query_string': changelist.get_query_string(remove=[self.parameter_name]),
'display': 'All',
'initial': True,
}
for lookup, title in self.lookup_choices:
yield {
'selected': str(lookup) in self.value_as_list(),
'query_string': changelist.get_query_string({self.parameter_name: lookup}),
'include_query_string': self._update_query(changelist, include=str(lookup)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup)),
'display': title,
}
class VersionStateFilter(SimpleListMultiselectFilter):
title = _("Version State")
parameter_name = "state"
default_filter_value = PUBLISHED
show_all_param_value = "_all_"
template = 'djangocms_content_expiry/multiselect_filter.html'
def _is_default(self, filter_value):
if self.default_filter_value == filter_value and self.value() is None:
return True
return False
def _get_all_query_string(self, changelist):
if self.default_filter_value:
return changelist.get_query_string(
{self.parameter_name: self.show_all_param_value}
)
return changelist.get_query_string(remove=[self.parameter_name])
def _is_all_selected(self):
state = self.value()
if self.default_filter_value and state == self.show_all_param_value:
return True
elif not self.default_filter_value and not state:
return True
return False
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if self.show_all_param_value in selected_list:
selected_list.remove(self.show_all_param_value)
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
def lookups(self, request, model_admin):
return VERSION_STATES
def queryset(self, request, queryset):
state = self.value()
if self.default_filter_value:
if not state:
return queryset.filter(version__state=self.default_filter_value)
elif state != "_all_":
return queryset.filter(version__state__in=state.split(','))
elif not self.default_filter_value and state:
return queryset.filter(version__state__in=state.split(','))
return queryset
def choices(self, changelist):
yield {
"selected": self._is_all_selected(),
"query_string": self._get_all_query_string(changelist),
"display": _("All"),
'initial': True,
}
for lookup, title in self.lookup_choices:
lookup_value = str(lookup)
yield {
"selected": str(lookup) in self.value_as_list() or self._is_default(lookup_value),
"query_string": changelist.get_query_string(
{self.parameter_name: lookup}
),
'include_query_string': self._update_query(changelist, include=str(lookup_value)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup_value)),
"display": title,
}
class AuthorFilter(admin.SimpleListFilter):
title = _("Version Author")
parameter_name = "created_by"
def lookups(self, request, model_admin):
from django.utils.encoding import force_text
User = get_user_model()
options = []
qs = model_admin.get_queryset(request)
authors = qs.values_list('version__created_by', flat=True).distinct()
users = User.objects.filter(pk__in=authors)
for user in users:
options.append(
(force_text(user.pk), user.get_full_name() or user.get_username())
)
return options
def queryset(self, request, queryset):
if self.value():
return queryset.filter(created_by=self.value()).distinct()
return queryset
class ContentExpiryDateRangeFilter(DateRangeFilter):
def queryset(self, request, queryset):
queryset = super().queryset(request, queryset)
if not any('expires__range' in seed for seed in request.GET):
default_gte, default_lte = get_rangefilter_expires_default()
queryset = queryset.filter(expires__range=(default_gte, default_lte))
return queryset
| true | true |
790088f8725b3e8ddf7030d651106d346da97c47 | 2,299 | py | Python | main.py | ronaldosvieira/rl | 01e7ac1a6fabe7a74171ce45e220232fdb23280b | [
"MIT"
] | null | null | null | main.py | ronaldosvieira/rl | 01e7ac1a6fabe7a74171ce45e220232fdb23280b | [
"MIT"
] | null | null | null | main.py | ronaldosvieira/rl | 01e7ac1a6fabe7a74171ce45e220232fdb23280b | [
"MIT"
] | null | null | null | import numpy as np
class Reward:
pass
class StaticReward(Reward):
def __init__(self, value):
self.value = value
def get(self):
return value
class NormalReward(Reward):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def get(self):
return np.random.normal(self.mean, self.std)
class Bandit:
def __init__(self, arms):
self.no_of_arms = arms
self.arms = [np.random.normal(0, 1) for _ in range(arms)]
def step(self, arm):
return np.random.normal(self.arms[arm], 1)
class MDP:
"""
Represents a Markov Decision Process.
"""
def __init__(self, S, A, R, p):
"""
Parameters
----------
S : int
Number of states
A : matrix
A[s][a] is True iff a is permitted in s
R : list
A list of reward generators
p : matrix
p[s][a][s'] = p(s'|s,a)
"""
self.S = list(range(S))
self.A, self.R, self.p = A, R, p
self.no_of_states = S
self.no_of_actions = len(A[0])
def step(self, s, a):
"""Given a state and an action, returns a new state and a reward.
Parameters
----------
s : int
Current state
a : int
Action to take
"""
s_prime = np.random.choice(self.no_of_states, p = self.p[s][a])
r = self.R[s_prime].get()
return s_prime, r
def epsilon_greedy(no_of_arms, epsilon, Q, N):
if np.random.random() > epsilon:
# greedy
action = np.argmax(Q)
else:
# random
action = np.random.choice(no_of_arms)
return action
def main():
no_of_arms = 10
no_of_steps = 1000
epsilon = 0.1
no_of_runs = 2000
#bandit = Bandit(no_of_arms)
arms = np.random.normal(0, 1, no_of_arms)
S = 1
A = [[True] * no_of_arms]
R = [NormalReward(m, 1) for m in arms]
p = [[[1] for _ in range(no_of_arms)]]
bandit = MDP(S, A, R, p)
#optimal_action = np.argmax(bandit.arms)
optimal_action = np.argmax(arms)
np.random.seed(1)
Q = [[0] * no_of_arms] * no_of_runs
N = [[0] * no_of_arms] * no_of_runs
mean_rewards = [0] * no_of_steps
for j in range(no_of_steps):
for i in range(no_of_runs):
action = epsilon_greedy(no_of_arms, epsilon, Q[i], N[i])
#reward = bandit.step(action)
_, reward = bandit.step(0, action)
mean_rewards[j] += reward
N[i][action] += 1
Q[i][action] += (1 / N[i][action]) * (reward - Q[i][action])
mean_rewards[j] /= no_of_runs
if __name__ == '__main__':
main() | 19 | 67 | 0.634624 | import numpy as np
class Reward:
pass
class StaticReward(Reward):
def __init__(self, value):
self.value = value
def get(self):
return value
class NormalReward(Reward):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def get(self):
return np.random.normal(self.mean, self.std)
class Bandit:
def __init__(self, arms):
self.no_of_arms = arms
self.arms = [np.random.normal(0, 1) for _ in range(arms)]
def step(self, arm):
return np.random.normal(self.arms[arm], 1)
class MDP:
def __init__(self, S, A, R, p):
self.S = list(range(S))
self.A, self.R, self.p = A, R, p
self.no_of_states = S
self.no_of_actions = len(A[0])
def step(self, s, a):
s_prime = np.random.choice(self.no_of_states, p = self.p[s][a])
r = self.R[s_prime].get()
return s_prime, r
def epsilon_greedy(no_of_arms, epsilon, Q, N):
if np.random.random() > epsilon:
action = np.argmax(Q)
else:
action = np.random.choice(no_of_arms)
return action
def main():
no_of_arms = 10
no_of_steps = 1000
epsilon = 0.1
no_of_runs = 2000
arms = np.random.normal(0, 1, no_of_arms)
S = 1
A = [[True] * no_of_arms]
R = [NormalReward(m, 1) for m in arms]
p = [[[1] for _ in range(no_of_arms)]]
bandit = MDP(S, A, R, p)
optimal_action = np.argmax(arms)
np.random.seed(1)
Q = [[0] * no_of_arms] * no_of_runs
N = [[0] * no_of_arms] * no_of_runs
mean_rewards = [0] * no_of_steps
for j in range(no_of_steps):
for i in range(no_of_runs):
action = epsilon_greedy(no_of_arms, epsilon, Q[i], N[i])
_, reward = bandit.step(0, action)
mean_rewards[j] += reward
N[i][action] += 1
Q[i][action] += (1 / N[i][action]) * (reward - Q[i][action])
mean_rewards[j] /= no_of_runs
if __name__ == '__main__':
main() | true | true |
7900898f9f8008a4e46806f3f1252a15b99d1937 | 1,362 | py | Python | Scripts/simulation/objects/gardening/gardening_commands.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/objects/gardening/gardening_commands.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/objects/gardening/gardening_commands.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\gardening\gardening_commands.py
# Compiled at: 2017-11-18 00:09:10
# Size of source mod 2**32: 1465 bytes
from objects.components import types
from objects.components.types import GARDENING_COMPONENT
from objects.gardening.gardening_component_fruit import GardeningFruitComponent
import services, sims4.commands
@sims4.commands.Command('gardening.cleanup_gardening_objects')
def cleanup_gardening_objects(_connection=None):
for obj in services.object_manager().get_all_objects_with_component_gen(GARDENING_COMPONENT):
gardening_component = obj.get_component(types.GARDENING_COMPONENT)
if not isinstance(gardening_component, GardeningFruitComponent):
continue
if obj.parent is None:
obj.is_in_inventory() or obj.is_on_active_lot() or sims4.commands.output('Destroyed object {} on open street was found without a parent at position {}, parent_type {}.'.format(obj, obj.position, obj.parent_type), _connection)
obj.destroy(source=obj, cause='Fruit/Flower with no parent on open street')
sims4.commands.output('Gardening cleanup complete', _connection)
return True | 59.217391 | 237 | 0.769457 |
from objects.components import types
from objects.components.types import GARDENING_COMPONENT
from objects.gardening.gardening_component_fruit import GardeningFruitComponent
import services, sims4.commands
@sims4.commands.Command('gardening.cleanup_gardening_objects')
def cleanup_gardening_objects(_connection=None):
for obj in services.object_manager().get_all_objects_with_component_gen(GARDENING_COMPONENT):
gardening_component = obj.get_component(types.GARDENING_COMPONENT)
if not isinstance(gardening_component, GardeningFruitComponent):
continue
if obj.parent is None:
obj.is_in_inventory() or obj.is_on_active_lot() or sims4.commands.output('Destroyed object {} on open street was found without a parent at position {}, parent_type {}.'.format(obj, obj.position, obj.parent_type), _connection)
obj.destroy(source=obj, cause='Fruit/Flower with no parent on open street')
sims4.commands.output('Gardening cleanup complete', _connection)
return True | true | true |
790089f17aaa7dc2c04e2eee9c2f24433b4d5faa | 4,326 | py | Python | test/sampleData/micropython/MCP4725.py | polfeliu/cyanobyte | 94fb8b8ea6c226e7e5f2a42d39356c75693d4093 | [
"Apache-2.0"
] | 70 | 2019-03-15T03:38:00.000Z | 2022-03-16T20:31:10.000Z | test/sampleData/micropython/MCP4725.py | polfeliu/cyanobyte | 94fb8b8ea6c226e7e5f2a42d39356c75693d4093 | [
"Apache-2.0"
] | 193 | 2019-03-15T21:33:40.000Z | 2021-06-04T22:19:02.000Z | test/sampleData/micropython/MCP4725.py | polfeliu/cyanobyte | 94fb8b8ea6c226e7e5f2a42d39356c75693d4093 | [
"Apache-2.0"
] | 40 | 2019-03-15T21:41:51.000Z | 2021-06-18T14:56:33.000Z | # Copyright (C) 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for MCP4725 v0.1.0.
# Generated from peripherals/MCP4725.yaml using Cyanobyte Codegen v0.1.0
"""
Class for MCP4725
"""
from machine import I2C
DIGITALOUT_GND = 0 # Ground
DIGITALOUT_VCC = 4095 # Vcc (full power)
def _swap_endian(val, length):
"""
Swap the endianness of a number
"""
if length <= 8:
return val
if length <= 16:
return (val & 0xFF00) >> 8 | (val & 0xFF) << 8
if length <= 32:
return ((val & 0xFF000000) >> 24 |
(val & 0x00FF0000) >> 8 |
(val & 0x0000FF00) << 8 |
(val & 0x000000FF) << 24)
raise Exception('Cannot swap endianness for length ' + length)
class MCP4725:
"""
Microchip 4725 Digital-to-Analog Converter
"""
device_address = 98
REGISTER_EEPROM = 96
REGISTER_VOUT = 64
def __init__(self, i2c):
# Initialize connection to peripheral
self.i2c = i2c
def get_eeprom(self):
"""
If EEPROM is set, the saved voltage output will
be loaded from power-on.
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_EEPROM,
1,
addrsize=12
)
val = 0
val = val << 8 | byte_list[0]
val = _swap_endian(val, 12)
return val
def set_eeprom(self, data):
"""
If EEPROM is set, the saved voltage output will
be loaded from power-on.
"""
data = _swap_endian(data, 12)
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_EEPROM,
buffer,
addrsize=12
)
def get_vout(self):
"""
VOut = (Vcc * value) / 4096
The output is a range between 0 and Vcc with
steps of Vcc/4096.
In a 3.3v system, each step is 800 microvolts.
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_VOUT,
1,
addrsize=12
)
val = 0
val = val << 8 | byte_list[0]
val = _swap_endian(val, 12)
return val
def set_vout(self, data):
"""
VOut = (Vcc * value) / 4096
The output is a range between 0 and Vcc with
steps of Vcc/4096.
In a 3.3v system, each step is 800 microvolts.
"""
data = _swap_endian(data, 12)
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_VOUT,
buffer,
addrsize=12
)
def get_digitalout(self):
"""
Only allows you to send fully on or off
"""
# Read register data
# '#/registers/EEPROM' > 'EEPROM'
val = self.get_eeprom()
# Mask register value
val = val & 0b0001111111111111
return val
def set_digitalout(self, data):
"""
Only allows you to send fully on or off
"""
# Read current register data
# '#/registers/EEPROM' > 'EEPROM'
register_data = self.get_eeprom()
register_data = register_data | data
self.set_eeprom(register_data)
def getvout_asvoltage(self, vcc):
"""
get vout
"""
voltage = None # Variable declaration
# Read value of register into a variable
value = self.get_eeprom()
voltage = value / 4096 * vcc
return voltage
def setvout_asvoltage(self, output, vcc):
"""
set vout
"""
output = output / vcc * 4096
self.set_eeprom(output)
| 25.597633 | 74 | 0.561258 |
from machine import I2C
DIGITALOUT_GND = 0
DIGITALOUT_VCC = 4095
def _swap_endian(val, length):
if length <= 8:
return val
if length <= 16:
return (val & 0xFF00) >> 8 | (val & 0xFF) << 8
if length <= 32:
return ((val & 0xFF000000) >> 24 |
(val & 0x00FF0000) >> 8 |
(val & 0x0000FF00) << 8 |
(val & 0x000000FF) << 24)
raise Exception('Cannot swap endianness for length ' + length)
class MCP4725:
device_address = 98
REGISTER_EEPROM = 96
REGISTER_VOUT = 64
def __init__(self, i2c):
self.i2c = i2c
def get_eeprom(self):
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_EEPROM,
1,
addrsize=12
)
val = 0
val = val << 8 | byte_list[0]
val = _swap_endian(val, 12)
return val
def set_eeprom(self, data):
data = _swap_endian(data, 12)
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_EEPROM,
buffer,
addrsize=12
)
def get_vout(self):
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_VOUT,
1,
addrsize=12
)
val = 0
val = val << 8 | byte_list[0]
val = _swap_endian(val, 12)
return val
def set_vout(self, data):
data = _swap_endian(data, 12)
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_VOUT,
buffer,
addrsize=12
)
def get_digitalout(self):
val = self.get_eeprom()
val = val & 0b0001111111111111
return val
def set_digitalout(self, data):
register_data = self.get_eeprom()
register_data = register_data | data
self.set_eeprom(register_data)
def getvout_asvoltage(self, vcc):
voltage = None
value = self.get_eeprom()
voltage = value / 4096 * vcc
return voltage
def setvout_asvoltage(self, output, vcc):
output = output / vcc * 4096
self.set_eeprom(output)
| true | true |
79008a2efedb45c51d08c60502ccbaf1876ca9f7 | 920 | py | Python | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/11_select_multiple_sheets.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/11_select_multiple_sheets.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/11_select_multiple_sheets.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | 1 | 2021-03-10T09:40:05.000Z | 2021-03-10T09:40:05.000Z | # Load both the 2016 and 2017 sheets by name
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = ['2016', '2017'])
# View the data type of all_survey_data
print(type(all_survey_data))
'''
<script.py> output:
<class 'collections.OrderedDict'>
'''
# Load all sheets in the Excel file
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = [0, '2017'])
# View the sheet names in all_survey_data
print(all_survey_data.keys())
'''
<script.py> output:
odict_keys([0, '2017'])
'''
# Load all sheets in the Excel file
all_survey_data = pd.read_excel("fcc_survey.xlsx",
sheet_name = None)
# View the sheet names in all_survey_data
print(all_survey_data.keys())
'''
<script.py> output:
odict_keys(['2016', '2017'])
'''
# Notice that if you load a sheet by its index position, the resulting data frame's name is also the index number, not the sheet name. | 24.210526 | 134 | 0.691304 |
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = ['2016', '2017'])
print(type(all_survey_data))
all_survey_data = pd.read_excel("fcc_survey.xlsx", sheet_name = [0, '2017'])
print(all_survey_data.keys())
all_survey_data = pd.read_excel("fcc_survey.xlsx",
sheet_name = None)
print(all_survey_data.keys())
| true | true |
79008c449364184a0d0e8188613d9ef7d1068736 | 304 | py | Python | Jogo_pong/telas/Telas.py | LivioAlvarenga/Tutoriais_Kivy_KivyMD | b6225578e764eaf0312afafbb2f76dc06f92342d | [
"MIT"
] | null | null | null | Jogo_pong/telas/Telas.py | LivioAlvarenga/Tutoriais_Kivy_KivyMD | b6225578e764eaf0312afafbb2f76dc06f92342d | [
"MIT"
] | null | null | null | Jogo_pong/telas/Telas.py | LivioAlvarenga/Tutoriais_Kivy_KivyMD | b6225578e764eaf0312afafbb2f76dc06f92342d | [
"MIT"
] | null | null | null | from kivy.uix.screenmanager import Screen
# Declara o Menu do Jogo
class TelaMenu(Screen):
pass
# Declara a Tela do Pong
class TelaJogo(Screen):
pass
# Declara a Tela do Vencedor 3
class TelaVencedor1(Screen):
pass
# Declara a Tela do Vencedor 2
class TelaVencedor2(Screen):
pass
| 13.818182 | 41 | 0.723684 | from kivy.uix.screenmanager import Screen
class TelaMenu(Screen):
pass
class TelaJogo(Screen):
pass
class TelaVencedor1(Screen):
pass
class TelaVencedor2(Screen):
pass
| true | true |
79008de7f081da56651a4cbca42a20e74befdd80 | 405 | py | Python | cryptocurrency/asgi.py | deepanshu-jain1999/cryptocurrencytracking | 1feb8f14e7615406b0658138d23314188f8f0e8b | [
"Apache-2.0"
] | null | null | null | cryptocurrency/asgi.py | deepanshu-jain1999/cryptocurrencytracking | 1feb8f14e7615406b0658138d23314188f8f0e8b | [
"Apache-2.0"
] | null | null | null | cryptocurrency/asgi.py | deepanshu-jain1999/cryptocurrencytracking | 1feb8f14e7615406b0658138d23314188f8f0e8b | [
"Apache-2.0"
] | null | null | null | """
ASGI config for cryptocurrency project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptocurrency.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptocurrency.settings')
application = get_asgi_application()
| true | true |
79008def7b93a1c135e4d7e9a69307b976e509be | 738 | py | Python | task04_a.py | mboehn/aoc2017 | 1bf5302c6e566e8454d3e567cfac38945c8fe955 | [
"MIT"
] | null | null | null | task04_a.py | mboehn/aoc2017 | 1bf5302c6e566e8454d3e567cfac38945c8fe955 | [
"MIT"
] | null | null | null | task04_a.py | mboehn/aoc2017 | 1bf5302c6e566e8454d3e567cfac38945c8fe955 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import csv
import itertools
from pprint import pprint
import func
INPUTFILE = './task04.input'
def main():
accept = 0
with open(INPUTFILE, mode='r') as csvfile:
reader = csv.reader(csvfile, delimiter=" ")
lines = list(reader)
for line in lines:
reject_line = False
for word in line:
if line.count(word) > 1:
reject_line = True
break
if not reject_line:
accept = accept + 1
print("file {} has {} lines".format(INPUTFILE, len(lines)))
print("we accept {} of them".format(accept))
if __name__ == '__main__':
main()
| 20.5 | 67 | 0.533875 |
import math
import csv
import itertools
from pprint import pprint
import func
INPUTFILE = './task04.input'
def main():
accept = 0
with open(INPUTFILE, mode='r') as csvfile:
reader = csv.reader(csvfile, delimiter=" ")
lines = list(reader)
for line in lines:
reject_line = False
for word in line:
if line.count(word) > 1:
reject_line = True
break
if not reject_line:
accept = accept + 1
print("file {} has {} lines".format(INPUTFILE, len(lines)))
print("we accept {} of them".format(accept))
if __name__ == '__main__':
main()
| true | true |
79008e11a6fb20f17dc3953d8d2d9eae4c92b402 | 1,216 | py | Python | manager/setup.py | dadiboyena/data-science-toolbox | 7ebeb4525eb394e33f60711181d48a42d14dfd15 | [
"BSD-2-Clause"
] | 3 | 2016-05-27T15:28:38.000Z | 2019-12-21T20:24:31.000Z | manager/setup.py | dadiboyena/data-science-toolbox | 7ebeb4525eb394e33f60711181d48a42d14dfd15 | [
"BSD-2-Clause"
] | null | null | null | manager/setup.py | dadiboyena/data-science-toolbox | 7ebeb4525eb394e33f60711181d48a42d14dfd15 | [
"BSD-2-Clause"
] | 4 | 2017-02-25T12:20:26.000Z | 2018-08-01T06:26:43.000Z | from setuptools import setup
setup(
name='dst',
version='0.1.5',
author='Jeroen Janssens',
author_email='jeroen@jeroenjanssens.com',
packages=['dst'],
url='http://datasciencetoolbox.org',
license='BSD',
description='Data Science Toolbox -- Start doing data science in minutes.',
long_description=open('README.txt').read(),
install_requires=[
"ansible >= 1.5",
],
entry_points={
'console_scripts': ['dst = dst.dst:main']
},
classifiers=[ # https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Programming Language :: Python :: 2.7',
'Programming Language :: Unix Shell',
],
)
| 34.742857 | 79 | 0.60773 | from setuptools import setup
setup(
name='dst',
version='0.1.5',
author='Jeroen Janssens',
author_email='jeroen@jeroenjanssens.com',
packages=['dst'],
url='http://datasciencetoolbox.org',
license='BSD',
description='Data Science Toolbox -- Start doing data science in minutes.',
long_description=open('README.txt').read(),
install_requires=[
"ansible >= 1.5",
],
entry_points={
'console_scripts': ['dst = dst.dst:main']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Programming Language :: Python :: 2.7',
'Programming Language :: Unix Shell',
],
)
| true | true |
7900914a31a095a3108d7d97b8d979b136b28f38 | 3,611 | py | Python | setup.py | MilanPecov/drf-yasg | b99306f71c6a5779b62189df7d9c1f5ea1c794ef | [
"BSD-3-Clause"
] | 1 | 2021-05-09T01:28:42.000Z | 2021-05-09T01:28:42.000Z | setup.py | MilanPecov/drf-yasg | b99306f71c6a5779b62189df7d9c1f5ea1c794ef | [
"BSD-3-Clause"
] | null | null | null | setup.py | MilanPecov/drf-yasg | b99306f71c6a5779b62189df7d9c1f5ea1c794ef | [
"BSD-3-Clause"
] | 1 | 2021-07-16T09:12:23.000Z | 2021-07-16T09:12:23.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import io
import os
import sys
from setuptools import find_packages, setup
def read_req(req_file):
with open(os.path.join('requirements', req_file)) as req:
return [line.strip() for line in req.readlines() if line.strip() and not line.strip().startswith('#')]
with io.open('README.rst', encoding='utf-8') as readme:
description = readme.read()
requirements = read_req('base.txt')
requirements_validation = read_req('validation.txt')
def find_versions_from_readme(prefix):
for line in description.splitlines():
line = line.strip()
if line.startswith(prefix):
versions = [v.strip() for v in line[len(prefix):].split(',')]
if versions:
return versions
raise RuntimeError("failed to find supported versions list for '{}'".format(prefix))
python_versions = find_versions_from_readme("- **Python**: ")
django_versions = find_versions_from_readme("- **Django**: ")
python_requires = ">=" + python_versions[0]
python_classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
] + ['Programming Language :: Python :: {}'.format(v) for v in python_versions]
django_classifiers = [
'Framework :: Django',
] + ['Framework :: Django :: {}'.format(v) for v in django_versions]
def drf_yasg_setup(**kwargs):
setup(
name='drf-yasg',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
extras_require={
'validation': requirements_validation,
},
license='BSD License',
description='Automated generation of real Swagger/OpenAPI 2.0 schemas from Django Rest Framework code.',
long_description=description,
long_description_content_type='text/x-rst',
url='https://github.com/axnsan12/drf-yasg',
author='Cristi V.',
author_email='cristi@cvjd.me',
keywords='drf django django-rest-framework schema swagger openapi codegen swagger-codegen '
'documentation drf-yasg django-rest-swagger drf-openapi',
python_requires=python_requires,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Topic :: Documentation',
'Topic :: Software Development :: Code Generators',
] + python_classifiers + django_classifiers,
**kwargs
)
try:
# noinspection PyUnresolvedReferences
import setuptools_scm # noqa: F401
drf_yasg_setup(use_scm_version=True)
except (ImportError, LookupError) as e:
if os.getenv('CI', 'false') == 'true' or os.getenv('TRAVIS', 'false') == 'true':
# don't silently fail on travis - we don't want to accidentally push a dummy version to PyPI
raise
err_msg = str(e)
if 'setuptools-scm' in err_msg or 'setuptools_scm' in err_msg:
import time
import traceback
timestamp_ms = int(time.time() * 1000)
timestamp_str = hex(timestamp_ms)[2:].zfill(16)
dummy_version = '1!0.0.0.dev0+noscm.' + timestamp_str
drf_yasg_setup(version=dummy_version)
traceback.print_exc(file=sys.stderr)
print("failed to detect version, package was built with dummy version " + dummy_version, file=sys.stderr)
else:
raise
| 33.747664 | 113 | 0.649405 |
from __future__ import print_function
import io
import os
import sys
from setuptools import find_packages, setup
def read_req(req_file):
with open(os.path.join('requirements', req_file)) as req:
return [line.strip() for line in req.readlines() if line.strip() and not line.strip().startswith('#')]
with io.open('README.rst', encoding='utf-8') as readme:
description = readme.read()
requirements = read_req('base.txt')
requirements_validation = read_req('validation.txt')
def find_versions_from_readme(prefix):
for line in description.splitlines():
line = line.strip()
if line.startswith(prefix):
versions = [v.strip() for v in line[len(prefix):].split(',')]
if versions:
return versions
raise RuntimeError("failed to find supported versions list for '{}'".format(prefix))
python_versions = find_versions_from_readme("- **Python**: ")
django_versions = find_versions_from_readme("- **Django**: ")
python_requires = ">=" + python_versions[0]
python_classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
] + ['Programming Language :: Python :: {}'.format(v) for v in python_versions]
django_classifiers = [
'Framework :: Django',
] + ['Framework :: Django :: {}'.format(v) for v in django_versions]
def drf_yasg_setup(**kwargs):
setup(
name='drf-yasg',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
extras_require={
'validation': requirements_validation,
},
license='BSD License',
description='Automated generation of real Swagger/OpenAPI 2.0 schemas from Django Rest Framework code.',
long_description=description,
long_description_content_type='text/x-rst',
url='https://github.com/axnsan12/drf-yasg',
author='Cristi V.',
author_email='cristi@cvjd.me',
keywords='drf django django-rest-framework schema swagger openapi codegen swagger-codegen '
'documentation drf-yasg django-rest-swagger drf-openapi',
python_requires=python_requires,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Topic :: Documentation',
'Topic :: Software Development :: Code Generators',
] + python_classifiers + django_classifiers,
**kwargs
)
try:
import setuptools_scm
drf_yasg_setup(use_scm_version=True)
except (ImportError, LookupError) as e:
if os.getenv('CI', 'false') == 'true' or os.getenv('TRAVIS', 'false') == 'true':
raise
err_msg = str(e)
if 'setuptools-scm' in err_msg or 'setuptools_scm' in err_msg:
import time
import traceback
timestamp_ms = int(time.time() * 1000)
timestamp_str = hex(timestamp_ms)[2:].zfill(16)
dummy_version = '1!0.0.0.dev0+noscm.' + timestamp_str
drf_yasg_setup(version=dummy_version)
traceback.print_exc(file=sys.stderr)
print("failed to detect version, package was built with dummy version " + dummy_version, file=sys.stderr)
else:
raise
| true | true |
79009201b0e4eff7fec0864ca373b523090333ed | 3,107 | py | Python | benchmark/startCirq2615.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2615.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2615.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=39
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.Z.on(input_qubit[3])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=23
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=34
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=36
c.append(cirq.X.on(input_qubit[0])) # number=37
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=38
c.append(cirq.X.on(input_qubit[0])) # number=32
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2615.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 36.127907 | 77 | 0.679112 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
from cirq.contrib.svg import SVGCircuit
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.Y.on(input_qubit[2]))
c.append(cirq.Z.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.X.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.X.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.X.on(input_qubit[0]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2615.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
790092ce89e8abaddfefcac60002134d71e88e4f | 402 | py | Python | setup.py | berglen/qualprep | 1d1265da1300d301a4f62b6431eefe3d77621115 | [
"BSD-2-Clause"
] | null | null | null | setup.py | berglen/qualprep | 1d1265da1300d301a4f62b6431eefe3d77621115 | [
"BSD-2-Clause"
] | null | null | null | setup.py | berglen/qualprep | 1d1265da1300d301a4f62b6431eefe3d77621115 | [
"BSD-2-Clause"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='qualprep',
packages=find_packages(include=['qualprep']),
version='0.1.1',
description='Python library to prepare data',
author='Lena Berger',
license='MIT',
install_requires=['pandas', 'numpy', 'tqdm'],
setup_requires=['pytest-runner'],
tests_require=['pytest==4.4.1'],
test_suite='tests',
) | 28.714286 | 50 | 0.641791 | from setuptools import find_packages, setup
setup(
name='qualprep',
packages=find_packages(include=['qualprep']),
version='0.1.1',
description='Python library to prepare data',
author='Lena Berger',
license='MIT',
install_requires=['pandas', 'numpy', 'tqdm'],
setup_requires=['pytest-runner'],
tests_require=['pytest==4.4.1'],
test_suite='tests',
) | true | true |
790093ef012f9ba21519a7748f67e9b88ea1a8bc | 887 | py | Python | static_test/main.py | DefenderOfSockets/Calibration_IMU_MPU6050 | 46cb142ae4449fcf0f5441ea083713eba8bca884 | [
"MIT"
] | null | null | null | static_test/main.py | DefenderOfSockets/Calibration_IMU_MPU6050 | 46cb142ae4449fcf0f5441ea083713eba8bca884 | [
"MIT"
] | null | null | null | static_test/main.py | DefenderOfSockets/Calibration_IMU_MPU6050 | 46cb142ae4449fcf0f5441ea083713eba8bca884 | [
"MIT"
] | null | null | null | from data_processing_calibration import DataProcessingCalibration
if __name__ == "__main__":
# Start processing
dp_ST = DataProcessingCalibration()
print("Initialize is successful.")
# Open .csv file with data
data_from_sensor = dp_ST.openFile('C://static_test.csv')
print("Data was got.")
# Filter and processing, and convert data in Euler angles
data_orientation_ST = dp_ST.processFile(data_from_sensor)
print("Data was converted.")
# Use method of Allan Variation for data
tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw = dp_ST.deviationAllan(data_orientation_ST, rate=31)
print("Using method of Allan Variation was successful.")
# Create plots
dp_ST.plotDataFromFile(data_orientation_ST, tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw)
print("Plots creating was successful.")
| 36.958333 | 113 | 0.722661 | from data_processing_calibration import DataProcessingCalibration
if __name__ == "__main__":
dp_ST = DataProcessingCalibration()
print("Initialize is successful.")
data_from_sensor = dp_ST.openFile('C://static_test.csv')
print("Data was got.")
data_orientation_ST = dp_ST.processFile(data_from_sensor)
print("Data was converted.")
tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw = dp_ST.deviationAllan(data_orientation_ST, rate=31)
print("Using method of Allan Variation was successful.")
dp_ST.plotDataFromFile(data_orientation_ST, tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw)
print("Plots creating was successful.")
| true | true |
790093fca60b14b07965e0139d66480b62804977 | 4,588 | py | Python | nova/console/vmrc.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 7 | 2017-06-19T19:37:00.000Z | 2019-06-16T02:06:14.000Z | nova/console/vmrc.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | null | null | null | nova/console/vmrc.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 6 | 2015-06-20T16:07:28.000Z | 2020-08-19T14:57:59.000Z | # Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VMRC console drivers."""
import base64
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
vmrc_opts = [
cfg.IntOpt('console_vmrc_port',
default=443,
help="Port for VMware VMRC connections"),
cfg.IntOpt('console_vmrc_error_retries',
default=10,
help="Number of retries for retrieving VMRC information"),
]
CONF = cfg.CONF
CONF.register_opts(vmrc_opts)
class VMRCConsole(object):
"""VMRC console driver with ESX credentials."""
def __init__(self):
super(VMRCConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+credentials'
def get_port(self, context):
"""Get available port for consoles."""
return CONF.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
pass
def teardown_console(self, context, console):
"""Tears down console."""
pass
def init_host(self):
"""Perform console initialization."""
pass
def fix_pool_password(self, password):
"""Encode password."""
# TODO(sateesh): Encrypt pool password
return password
def generate_password(self, vim_session, pool, instance_name):
"""Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
"""
username, password = pool['username'], pool['password']
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if prop.name == 'name':
vm_name = prop.val
elif prop.name == 'config.files.vmPathName':
ds_path_name = prop.val
if vm_name == instance_name:
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = jsonutils.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return False
class VMRCSessionConsole(VMRCConsole):
"""VMRC console driver with VMRC One Time Sessions."""
def __init__(self):
super(VMRCSessionConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+session'
def generate_password(self, vim_session, pool, instance_name):
"""Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
"""
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = vim_session._call_method(
vim_session._get_vim(),
'AcquireCloneTicket',
vim_session._get_vim().get_service_content().sessionManager)
json_data = jsonutils.dumps({'vm_id': str(vm_ref.value),
'username': virtual_machine_ticket,
'password': virtual_machine_ticket})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return True
| 32.309859 | 79 | 0.607672 |
import base64
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
vmrc_opts = [
cfg.IntOpt('console_vmrc_port',
default=443,
help="Port for VMware VMRC connections"),
cfg.IntOpt('console_vmrc_error_retries',
default=10,
help="Number of retries for retrieving VMRC information"),
]
CONF = cfg.CONF
CONF.register_opts(vmrc_opts)
class VMRCConsole(object):
def __init__(self):
super(VMRCConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+credentials'
def get_port(self, context):
return CONF.console_vmrc_port
def setup_console(self, context, console):
pass
def teardown_console(self, context, console):
pass
def init_host(self):
pass
def fix_pool_password(self, password):
return password
def generate_password(self, vim_session, pool, instance_name):
username, password = pool['username'], pool['password']
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if prop.name == 'name':
vm_name = prop.val
elif prop.name == 'config.files.vmPathName':
ds_path_name = prop.val
if vm_name == instance_name:
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = jsonutils.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
return base64.b64encode(json_data)
def is_otp(self):
return False
class VMRCSessionConsole(VMRCConsole):
def __init__(self):
super(VMRCSessionConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+session'
def generate_password(self, vim_session, pool, instance_name):
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = vim_session._call_method(
vim_session._get_vim(),
'AcquireCloneTicket',
vim_session._get_vim().get_service_content().sessionManager)
json_data = jsonutils.dumps({'vm_id': str(vm_ref.value),
'username': virtual_machine_ticket,
'password': virtual_machine_ticket})
return base64.b64encode(json_data)
def is_otp(self):
return True
| true | true |
7900954e9b27e49fd025c56ca1ab18a6e8667b43 | 14,230 | py | Python | train_pose_euler_crop.py | msieb1/LTCN | c9432891327774edf8193e885cc4f10f53fcaa60 | [
"MIT"
] | 1 | 2020-08-21T03:47:33.000Z | 2020-08-21T03:47:33.000Z | train_pose_euler_crop.py | msieb1/LTCN | c9432891327774edf8193e885cc4f10f53fcaa60 | [
"MIT"
] | null | null | null | train_pose_euler_crop.py | msieb1/LTCN | c9432891327774edf8193e885cc4f10f53fcaa60 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import os
from os.path import join
import argparse
import torch
import numpy as np
import pickle
import sys
import datetime
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder
from utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped
from utils.vocabulary import Vocabulary
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
import torchvision.utils as vutils
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from shutil import copy2
import importlib
from pyquaternion import Quaternion
from models.pose_predictor_euler_crop import define_model
from utils.plot_utils import plot_mean
from utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \
isRotationMatrix, eulerAnglesToRotationMatrix, \
norm_sincos, sincos2rotm
from utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\
loss_quat, loss_quat_single, euler_XYZ_to_reparam, loss_quat_huber
from utils.plot_utils import plot_mean
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]= "1,2,3"
IMAGE_SIZE = (299, 299)
NUM_VIEWS = 1
SAMPLE_SIZE = 40
VAL_SEQS =5
TRAIN_SEQS_PER_EPOCH = 80
LOSS_FN = loss_euler_reparametrize
EXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments'
sys.path.append(EXP_ROOT_DIR)
class Trainer(object):
def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True):
self.use_cuda = use_cuda
self.load_model = load_model
self.model_folder = model_folder
self.validation_directory = validation_directory
self.train_directory = train_directory
self.args = args
self.builder = builder
self.loss_fn = loss_fn
self.logdir = join(model_folder, 'logs')
self.writer = SummaryWriter(self.logdir)
self.logger = Logger(self.args.log_file)
self.itr = 0
# Create Model
self.model = self.create_model()
if multi_gpu:
self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
# Build validation set
validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)
validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)]
validation_set = ConcatDataset(validation_set)
self.len_validation_set = len(validation_set)
del validation_builder
self.validation_loader = DataLoader(
validation_set,
batch_size=8,
shuffle=False,
pin_memory=self.use_cuda,
)
self.validation_calls = 0
# Build Training Set
self.triplet_builder = builder(self.args.n_views, \
train_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)
self.training_queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True)
dataset_builder_process.start()
# Get Logger
# Model specific setup
# self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9)
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08)
# This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler
self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
# self.criterion = nn.CrossEntropyLoss()
def train(self):
trn_losses_ = []
val_losses_= []
val_acc_ = []
trn_acc_ = []
for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs):
print("=" * 20)
self.logger.info("Starting epoch: {0} ".format(epoch))
dataset = self.training_queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=self.args.minibatch_size, # batch_size(epoch, self.args.max_minibatch_size),
shuffle=True,
pin_memory=self.use_cuda,
)
train_embedding_features_buffer = []
train_images_buffer = []
train_labels = []
correct = 0
for _ in range(0, 1):
losses = []
for minibatch in data_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
#anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix
anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix
# frames = Variable(minibatch)
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)
losses.append(loss.data.cpu().numpy())
correct += (torch.norm(a_pred - anchor_quats, 2) < 1).data.cpu().numpy().sum() # print(gradcheck(loss_fn, (tcn, minibatch,)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Add embeddings
train_labels.append(anchor_quats)
train_embedding_features_buffer.append(anchor_quats)
train_images_buffer.append(anchor_frames)
print("logging to {}".format(self.logdir))
self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr)
self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr)
self.itr += 1
trn_losses_.append(np.mean(losses))
self.logger.info('train loss: ', np.mean(losses))
self.logger.info("Training score correct {correct}/{total}".format(
correct=correct,
total=len(data_loader)
))
trn_acc_.append(correct)
self.writer.add_image('frame_1', minibatch[0][0], self.itr)
# self.writer.add_image('pose1', str(minibatch[1][0].data.detach().cpu().numpy()), self.itr)
self.writer.add_image('frame_2', minibatch[0][1], self.itr)
# self.writer.add_image('pose_2', str(minibatch[1][1].data.detach().cpu().numpy()), self.itr)
self.writer.add_image('frame_3', minibatch[0][2], self.itr)
# self.writer.add_image('pose_3', str(minibatch[1][2].data.detach().cpu().numpy()), self.itr)
self.writer.add_image('frame_4', minibatch[0][3], self.itr)
# self.writer.add_image('pose_4', str(minibatch[1][3].data.detach().cpu().numpy()), self.itr)
# Get embeddings
features = torch.cat(train_embedding_features_buffer[:30]).squeeze_()
labels = torch.cat(train_labels[:30]).squeeze_()
# features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1)
# label = torch.Tensor(np.asarray(label_buffer))
images = torch.cat(train_images_buffer[:30]).squeeze_()#/255.0, [0, 3, 1, 2]
self.writer.add_embedding(features, metadata=labels, label_img=images, global_step=epoch)
if epoch % 1 == 0:
loss, correct = self.validate()
self.learning_rate_scheduler.step(loss)
val_losses_.append(loss)
val_acc_.append(correct)
if epoch % self.args.save_every == 0 and epoch != 0:
self.logger.info('Saving model.')
self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files'))
print("logging to {}".format(self.logdir))
plot_mean(trn_losses_, self.model_folder, 'train_loss')
plot_mean(val_losses_, self.model_folder, 'validation_loss')
plot_mean(trn_acc_, self.model_folder, 'train_acc')
plot_mean(val_acc_, self.model_folder, 'validation_accuracy')
# plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin')
def validate(self):
# Run model on validation data and log results
correct = 0
losses = []
for minibatch in self.validation_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
#anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix
anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)
losses.append(loss.data.cpu().numpy())
correct += (torch.norm(a_pred - anchor_quats, 2) < 0.1).data.cpu().numpy().sum()
self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls)
self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls)
self.validation_calls += 1
loss = np.mean(losses)
self.logger.info("Validation score correct {correct}/{total}".format(
correct=correct,
total=self.len_validation_set
))
self.logger.info('val loss: ',loss)
return loss, correct
def model_filename(self, model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(self, model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(self, queue, triplet_builder, log):
while 1:
datasets = []
for i in range(TRAIN_SEQS_PER_EPOCH):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
# log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def create_model(self):
model = define_model(pretrained=True)
# model = PosNet()
if self.load_model:
model_path = os.path.join(
self.model_folder,
self.load_model
)
# map_location allows us to load models trained on cuda to cpu.
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if self.use_cuda:
model = model.cuda()
return model
def batch_size(self, epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
def main(args):
# module = importlib.import_module(args.exp_name + '.config')
# conf = getattr(module, 'Config_Isaac_Server')()
# EXP_DIR = conf.EXP_DIR
# MODEL_FOLDER = conf.MODEL_FOLDER
# GPU Configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
# Load model
model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped())
if not os.path.exists(model_folder):
os.makedirs(model_folder)
# Get data loader builder and loss function
builder = getattr(importlib.import_module('utils.builders'), args.builder)
loss_fn = LOSS_FN
# Define train and validation directories
train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/')
validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/')
# Copies of executed config
if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'):
os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments')
copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder)
copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder)
# Build training class
trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=10)
parser.add_argument('--load-model', type=str, required=False)
parser.add_argument('--minibatch-size', type=int, default=8)
parser.add_argument('--model-name', type=str, default='tcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.001)
parser.add_argument('--n-views', type=int, default=NUM_VIEWS)
parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss')
# Model parameters
# Path parameters
parser.add_argument('--exp-name', type=str, required=True)
parser.add_argument('--run-name', type=str, required=True)
parser.add_argument('--builder', type=str, required=True)
args = parser.parse_args()
print(args)
main(args)
| 43.650307 | 169 | 0.646381 | import matplotlib
matplotlib.use('Agg')
import os
from os.path import join
import argparse
import torch
import numpy as np
import pickle
import sys
import datetime
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder
from utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped
from utils.vocabulary import Vocabulary
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
import torchvision.utils as vutils
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from shutil import copy2
import importlib
from pyquaternion import Quaternion
from models.pose_predictor_euler_crop import define_model
from utils.plot_utils import plot_mean
from utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \
isRotationMatrix, eulerAnglesToRotationMatrix, \
norm_sincos, sincos2rotm
from utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\
loss_quat, loss_quat_single, euler_XYZ_to_reparam, loss_quat_huber
from utils.plot_utils import plot_mean
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" environ["CUDA_VISIBLE_DEVICES"]= "1,2,3"
IMAGE_SIZE = (299, 299)
NUM_VIEWS = 1
SAMPLE_SIZE = 40
VAL_SEQS =5
TRAIN_SEQS_PER_EPOCH = 80
LOSS_FN = loss_euler_reparametrize
EXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments'
sys.path.append(EXP_ROOT_DIR)
class Trainer(object):
def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True):
self.use_cuda = use_cuda
self.load_model = load_model
self.model_folder = model_folder
self.validation_directory = validation_directory
self.train_directory = train_directory
self.args = args
self.builder = builder
self.loss_fn = loss_fn
self.logdir = join(model_folder, 'logs')
self.writer = SummaryWriter(self.logdir)
self.logger = Logger(self.args.log_file)
self.itr = 0
self.model = self.create_model()
if multi_gpu:
self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)
validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)]
validation_set = ConcatDataset(validation_set)
self.len_validation_set = len(validation_set)
del validation_builder
self.validation_loader = DataLoader(
validation_set,
batch_size=8,
shuffle=False,
pin_memory=self.use_cuda,
)
self.validation_calls = 0
self.triplet_builder = builder(self.args.n_views, \
train_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)
self.training_queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True)
dataset_builder_process.start()
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08)
self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
def train(self):
trn_losses_ = []
val_losses_= []
val_acc_ = []
trn_acc_ = []
for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs):
print("=" * 20)
self.logger.info("Starting epoch: {0} ".format(epoch))
dataset = self.training_queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=self.args.minibatch_size,
shuffle=True,
pin_memory=self.use_cuda,
)
train_embedding_features_buffer = []
train_images_buffer = []
train_labels = []
correct = 0
for _ in range(0, 1):
losses = []
for minibatch in data_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
or_quats = minibatch[1].cuda()
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)
losses.append(loss.data.cpu().numpy())
correct += (torch.norm(a_pred - anchor_quats, 2) < 1).data.cpu().numpy().sum()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_labels.append(anchor_quats)
train_embedding_features_buffer.append(anchor_quats)
train_images_buffer.append(anchor_frames)
print("logging to {}".format(self.logdir))
self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr)
self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr)
self.itr += 1
trn_losses_.append(np.mean(losses))
self.logger.info('train loss: ', np.mean(losses))
self.logger.info("Training score correct {correct}/{total}".format(
correct=correct,
total=len(data_loader)
))
trn_acc_.append(correct)
self.writer.add_image('frame_1', minibatch[0][0], self.itr)
self.writer.add_image('frame_2', minibatch[0][1], self.itr)
self.writer.add_image('frame_3', minibatch[0][2], self.itr)
self.writer.add_image('frame_4', minibatch[0][3], self.itr)
features = torch.cat(train_embedding_features_buffer[:30]).squeeze_()
labels = torch.cat(train_labels[:30]).squeeze_()
images = torch.cat(train_images_buffer[:30]).squeeze_()
self.writer.add_embedding(features, metadata=labels, label_img=images, global_step=epoch)
if epoch % 1 == 0:
loss, correct = self.validate()
self.learning_rate_scheduler.step(loss)
val_losses_.append(loss)
val_acc_.append(correct)
if epoch % self.args.save_every == 0 and epoch != 0:
self.logger.info('Saving model.')
self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files'))
print("logging to {}".format(self.logdir))
plot_mean(trn_losses_, self.model_folder, 'train_loss')
plot_mean(val_losses_, self.model_folder, 'validation_loss')
plot_mean(trn_acc_, self.model_folder, 'train_acc')
plot_mean(val_acc_, self.model_folder, 'validation_accuracy')
def validate(self):
correct = 0
losses = []
for minibatch in self.validation_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
= minibatch[1].cuda()
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)
losses.append(loss.data.cpu().numpy())
correct += (torch.norm(a_pred - anchor_quats, 2) < 0.1).data.cpu().numpy().sum()
self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls)
self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls)
self.validation_calls += 1
loss = np.mean(losses)
self.logger.info("Validation score correct {correct}/{total}".format(
correct=correct,
total=self.len_validation_set
))
self.logger.info('val loss: ',loss)
return loss, correct
def model_filename(self, model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(self, model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(self, queue, triplet_builder, log):
while 1:
datasets = []
for i in range(TRAIN_SEQS_PER_EPOCH):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
queue.put(dataset)
def create_model(self):
model = define_model(pretrained=True)
if self.load_model:
model_path = os.path.join(
self.model_folder,
self.load_model
)
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if self.use_cuda:
model = model.cuda()
return model
def batch_size(self, epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
def main(args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped())
if not os.path.exists(model_folder):
os.makedirs(model_folder)
builder = getattr(importlib.import_module('utils.builders'), args.builder)
loss_fn = LOSS_FN
train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/')
validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/')
if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'):
os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments')
copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder)
copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder)
trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=10)
parser.add_argument('--load-model', type=str, required=False)
parser.add_argument('--minibatch-size', type=int, default=8)
parser.add_argument('--model-name', type=str, default='tcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.001)
parser.add_argument('--n-views', type=int, default=NUM_VIEWS)
parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss')
parser.add_argument('--exp-name', type=str, required=True)
parser.add_argument('--run-name', type=str, required=True)
parser.add_argument('--builder', type=str, required=True)
args = parser.parse_args()
print(args)
main(args)
| true | true |
7900956706440cfd1fba67f66ccbf492b57d7cac | 9,991 | py | Python | corpus/text_cleaner.py | senisioi/Romanian-Transformers | 45f4c4513fd0a5a81f4a20a5d63b4b9cd1d10b43 | [
"MIT"
] | null | null | null | corpus/text_cleaner.py | senisioi/Romanian-Transformers | 45f4c4513fd0a5a81f4a20a5d63b4b9cd1d10b43 | [
"MIT"
] | null | null | null | corpus/text_cleaner.py | senisioi/Romanian-Transformers | 45f4c4513fd0a5a81f4a20a5d63b4b9cd1d10b43 | [
"MIT"
] | null | null | null | import re, multiprocessing
from tqdm import tqdm
import numpy as np
class Cleaner():
def __init__(self, num_threads=1): # right now, it's single threaded
self.num_threads = min(num_threads, int(multiprocessing.cpu_count()/2))
"""
S- ar putea să fie necesar să- l recitiţi.
"""
self.r1 = re.compile(r"([\w]+-)[\s]([\w]+)", re.IGNORECASE)
"""
{LL/ AAAA}
Humalog Mix50 100 U/ ml
"""
self.r2 = re.compile(r"([\w]+/)\s([\w]+)", re.IGNORECASE)
"""
All unicode dashes to normal '-', see https://www.fileformat.info/info/unicode/category/Pd/list.htm
includes bull : • \u2022
"""
self.r3 = re.compile(r"([■\u2022\u007E\u00AD\u058A\u05BE\u1400\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2053\u207B\u208B\u2212\u2E17\u2E3A\u2E3B\u301C\u3030\u30A0\uFE31\uFE32\uFE63\uFF0D]+)", re.UNICODE)
"""
spaces after comma in numbers: 1, 4% -> 1,4%
"""
self.r4 = re.compile(r"([\d]+,)\s([\d]+)", re.IGNORECASE)
"""
soft hyphens #\u00AD
"""
self.r5 = re.compile(r"[\u00AD]")
"""
remove URLS
"""
self.r6 = re.compile(r'(?:www|http)\S+|<\S+|\w+\/*>')
"""
remove emails
"""
self.r7 = re.compile(r'([^@]+@[^@]+\.[^@]+)')
"""
table separators
"""
self.r8 = re.compile(r'[\─\─]+')
self.r9 = re.compile(r'[\-\-]+')
"""
multiple spaces
"""
self.space = re.compile(' +')
"""
forbiden chars that cause a lot of bad sentences
"""
self.forbidden_chars = "ºþÈ™ÓÑÄÈîƒ"
def process(self, lines, percent_max_numeric=0.7, percent_max_non_ascii=0.40, min_line_length=20, verbose=False, disable_pbar=True):
skipped_because_min_length = np.array([0,0], dtype=np.uint64)
skipped_alpha_count = np.array([0,0], dtype=np.uint64)
skipped_because_max_numeric = np.array([0,0], dtype=np.uint64)
skipped_because_max_non_ascii = np.array([0,0], dtype=np.uint64)
skipped_because_forbidden_chars = np.array([0,0], dtype=np.uint64)
total_original_length = 0
total_clean_length = 0
output = []
for line in tqdm(lines, disable = disable_pbar):
line = line.strip()
# get stats about line
length = len(line)
total_original_length += length
if length < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
line = bytes(line, 'utf-8').decode('utf-8', 'ignore') # strip not utf-8 chars
digit_count = 0
alpha_count = 0
ascii_count = 0
forbidden_char = False
for char in line:
if char in self.forbidden_chars:
forbidden_char = True
break
if char.isnumeric():
digit_count+=1
if char.isalpha():
alpha_count+=1
if char.isascii():
ascii_count+=1
# reject if forbidden char
if forbidden_char:
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
continue
# reject if number of letters is too small
if alpha_count == 0 or alpha_count / length < 0.5:
skipped_alpha_count += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping alpha={:.3f}: [{}]".format(alpha_count / length, line))
continue
# reject if too many numbers
if digit_count / alpha_count >= percent_max_numeric and digit_count > 6:
skipped_because_max_numeric += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping digit={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
# reject if too many non-ascii
if ascii_count / alpha_count < percent_max_non_ascii and length > 15:
skipped_because_max_non_ascii += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping ascii={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
#skip lines that appear to be ascii tables │
if (line.strip()[0] == '|' and line.count('|') > 2) or (line.strip()[0] == '│' and line.count('│') > 2):
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping table line: [{}]".format(line))
continue
# clean line
#print("\nbef: {}".format(line))
line = self.r1.sub(r"\1\2", line)
line = self.r2.sub(r"\1\2", line)
line = self.r3.sub("-", line)
line = self.r4.sub(r"\1\2", line)
line = self.r5.sub("", line)
line = self.r6.sub("", line)
line = self.r7.sub("", line)
# separators
line = self.r8.sub("", line)
line = self.r9.sub("", line)
line = line.replace("( ă)", "(ă)")
line = line.replace("ţ", "ț")
line = line.replace("ş", "ș")
line = line.replace("Ţ", "Ț")
line = line.replace("Ş", "Ș")
line = line.replace("â", "â")
#print("aft: {}".format(line))
line = self.space.sub(' ', line).strip()
# check that after processing the line is not too short
if len(line) < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
total_clean_length += len(line)
output.append(line+"\n")
# pack stats
stats = {}
stats["skipped_because_min_length"] = skipped_because_min_length
stats["skipped_alpha_count"] = skipped_alpha_count
stats["skipped_because_max_numeric"] = skipped_because_max_numeric
stats["skipped_because_max_non_ascii"] = skipped_because_max_non_ascii
stats["skipped_because_forbidden_chars"] = skipped_because_forbidden_chars
stats["total_original_length"] = total_original_length
stats["total_clean_length"] = total_clean_length
return output, stats
def add_stats(self, a, b):
"""
Add two stats dict that are returned by the process function.
This is used for multiple files
:param a: stats dict
:param b: stats dict
:return: stats dict
"""
stats = {}
stats["skipped_because_min_length"] = a["skipped_because_min_length"] + b["skipped_because_min_length"]
stats["skipped_alpha_count"] = a["skipped_alpha_count"] + b["skipped_alpha_count"]
stats["skipped_because_max_numeric"] = a["skipped_because_max_numeric"] + b["skipped_because_max_numeric"]
stats["skipped_because_max_non_ascii"] = a["skipped_because_max_non_ascii"] + b["skipped_because_max_non_ascii"]
stats["skipped_because_forbidden_chars"] = a["skipped_because_forbidden_chars"] + b["skipped_because_forbidden_chars"]
stats["total_original_length"] = a["total_original_length"] + b["total_original_length"]
stats["total_clean_length"] = a["total_clean_length"] + b["total_clean_length"]
return stats
def print_stats(self, stats):
print("\nCleaning statistics:")
print("Total original length (chars) = {}".format(stats["total_original_length"]))
print("Total length after cleaning (chars) = {}".format(stats["total_clean_length"]))
print("Percent data kept = {:.3f} %".format(100.*stats["total_clean_length"]/stats["total_original_length"]))
print("Skipped because line length was below minimum (lines/chars): {} ".format(stats["skipped_because_min_length"]))
print("Skipped because line had forbidden characters (lines/chars): {} ".format(stats["skipped_because_forbidden_chars"]))
print("Skipped because alpha count was below minimum (lines/chars): {} ".format(stats["skipped_alpha_count"]))
print("Skipped because digit count was above maximum (lines/chars): {} ".format(stats["skipped_because_max_numeric"]))
print("Skipped because too many non-ascii characters (lines/chars): {} ".format(stats["skipped_because_max_non_ascii"]))
text = [" - ~~~~~Păstraţi acest prospect. S- ar putea să fie necesar să- l recitiţi.",
"- Dacă aveţi orice întrebări suplimentare, adresaţi- vă medicului dumneavoastră sau farmacistului.\n",
"{LL/ AAAA}\n",
"MANUALUL UTILIZATORULUI\n",
"Vezi textul manualului mai jos.\n",
"303 Informaţii detaliate privind acest medicament sunt disponibile pe website- ul Agenţiei Europene a Medicamentului (EMEA): http: // www. emea. europa. eu /.\n",
"304 PROSPECT: \n",
"INFORMAŢII PENTRU UTILIZATOR",
"Humalog Mix50 100 U/ ml • • • ~~~~",
"Τηλ: +30 210 629 4600 España Lilly S. A.",
"Tel: + 34- 91 663 50 00 France Lilly France S. A. S.",
"Tél: +33 - (0) 1 55 49 34 34 Ireland Eli Lilly and Company (Ireland) Limited Tel: + 353 - (0) 1 661 4377 Ísland Icepharma hf.",
"Sími + 354 540 8000 Italia Eli Lilly Italia S. p. A.",
"Tel: + 39 - 055 42571 Κύπρος Phadisco Ltd Τηλ: +357 22 715000 ",
"Luxembourg/ Luxemburg Eli Lilly Benelux S. A.",
"Tél/ Tel: + 32 - (0) 2 548 84 84 Magyarország Lilly Hungária Kft.",
"Tel: + 36 1 328 5100 Malta Charles de Giorgio Ltd.",
"Κύπρος Βαρνάβας Χατζηπαναγής Λτδ 7 Ανδροκλέους CY- 1060 Λευκωσία Tηλ"]
#tt = []
#for i in range(100000):
# tt.extend(text)
#print(len(tt))
"""
c = Cleaner(1)
lines, s1 = c.process(text)
lines, s2 = c.process(text)
stats = c.add_stats(s1, s2)
c.print_stats(s1)
c.print_stats(s2)
c.print_stats(stats)
print("DONE")
"""
| 40.946721 | 212 | 0.586828 | import re, multiprocessing
from tqdm import tqdm
import numpy as np
class Cleaner():
def __init__(self, num_threads=1):
self.num_threads = min(num_threads, int(multiprocessing.cpu_count()/2))
self.r1 = re.compile(r"([\w]+-)[\s]([\w]+)", re.IGNORECASE)
self.r2 = re.compile(r"([\w]+/)\s([\w]+)", re.IGNORECASE)
self.r3 = re.compile(r"([■\u2022\u007E\u00AD\u058A\u05BE\u1400\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2053\u207B\u208B\u2212\u2E17\u2E3A\u2E3B\u301C\u3030\u30A0\uFE31\uFE32\uFE63\uFF0D]+)", re.UNICODE)
self.r4 = re.compile(r"([\d]+,)\s([\d]+)", re.IGNORECASE)
self.r5 = re.compile(r"[\u00AD]")
self.r6 = re.compile(r'(?:www|http)\S+|<\S+|\w+\/*>')
self.r7 = re.compile(r'([^@]+@[^@]+\.[^@]+)')
self.r8 = re.compile(r'[\─\─]+')
self.r9 = re.compile(r'[\-\-]+')
self.space = re.compile(' +')
self.forbidden_chars = "ºþÈ™ÓÑÄÈîƒ"
def process(self, lines, percent_max_numeric=0.7, percent_max_non_ascii=0.40, min_line_length=20, verbose=False, disable_pbar=True):
skipped_because_min_length = np.array([0,0], dtype=np.uint64)
skipped_alpha_count = np.array([0,0], dtype=np.uint64)
skipped_because_max_numeric = np.array([0,0], dtype=np.uint64)
skipped_because_max_non_ascii = np.array([0,0], dtype=np.uint64)
skipped_because_forbidden_chars = np.array([0,0], dtype=np.uint64)
total_original_length = 0
total_clean_length = 0
output = []
for line in tqdm(lines, disable = disable_pbar):
line = line.strip()
# get stats about line
length = len(line)
total_original_length += length
if length < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
line = bytes(line, 'utf-8').decode('utf-8', 'ignore') # strip not utf-8 chars
digit_count = 0
alpha_count = 0
ascii_count = 0
forbidden_char = False
for char in line:
if char in self.forbidden_chars:
forbidden_char = True
break
if char.isnumeric():
digit_count+=1
if char.isalpha():
alpha_count+=1
if char.isascii():
ascii_count+=1
# reject if forbidden char
if forbidden_char:
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
continue
# reject if number of letters is too small
if alpha_count == 0 or alpha_count / length < 0.5:
skipped_alpha_count += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping alpha={:.3f}: [{}]".format(alpha_count / length, line))
continue
# reject if too many numbers
if digit_count / alpha_count >= percent_max_numeric and digit_count > 6:
skipped_because_max_numeric += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping digit={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
# reject if too many non-ascii
if ascii_count / alpha_count < percent_max_non_ascii and length > 15:
skipped_because_max_non_ascii += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping ascii={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
#skip lines that appear to be ascii tables │
if (line.strip()[0] == '|' and line.count('|') > 2) or (line.strip()[0] == '│' and line.count('│') > 2):
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping table line: [{}]".format(line))
continue
# clean line
#print("\nbef: {}".format(line))
line = self.r1.sub(r"\1\2", line)
line = self.r2.sub(r"\1\2", line)
line = self.r3.sub("-", line)
line = self.r4.sub(r"\1\2", line)
line = self.r5.sub("", line)
line = self.r6.sub("", line)
line = self.r7.sub("", line)
# separators
line = self.r8.sub("", line)
line = self.r9.sub("", line)
line = line.replace("( ă)", "(ă)")
line = line.replace("ţ", "ț")
line = line.replace("ş", "ș")
line = line.replace("Ţ", "Ț")
line = line.replace("Ş", "Ș")
line = line.replace("â", "â")
#print("aft: {}".format(line))
line = self.space.sub(' ', line).strip()
# check that after processing the line is not too short
if len(line) < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
total_clean_length += len(line)
output.append(line+"\n")
# pack stats
stats = {}
stats["skipped_because_min_length"] = skipped_because_min_length
stats["skipped_alpha_count"] = skipped_alpha_count
stats["skipped_because_max_numeric"] = skipped_because_max_numeric
stats["skipped_because_max_non_ascii"] = skipped_because_max_non_ascii
stats["skipped_because_forbidden_chars"] = skipped_because_forbidden_chars
stats["total_original_length"] = total_original_length
stats["total_clean_length"] = total_clean_length
return output, stats
def add_stats(self, a, b):
stats = {}
stats["skipped_because_min_length"] = a["skipped_because_min_length"] + b["skipped_because_min_length"]
stats["skipped_alpha_count"] = a["skipped_alpha_count"] + b["skipped_alpha_count"]
stats["skipped_because_max_numeric"] = a["skipped_because_max_numeric"] + b["skipped_because_max_numeric"]
stats["skipped_because_max_non_ascii"] = a["skipped_because_max_non_ascii"] + b["skipped_because_max_non_ascii"]
stats["skipped_because_forbidden_chars"] = a["skipped_because_forbidden_chars"] + b["skipped_because_forbidden_chars"]
stats["total_original_length"] = a["total_original_length"] + b["total_original_length"]
stats["total_clean_length"] = a["total_clean_length"] + b["total_clean_length"]
return stats
def print_stats(self, stats):
print("\nCleaning statistics:")
print("Total original length (chars) = {}".format(stats["total_original_length"]))
print("Total length after cleaning (chars) = {}".format(stats["total_clean_length"]))
print("Percent data kept = {:.3f} %".format(100.*stats["total_clean_length"]/stats["total_original_length"]))
print("Skipped because line length was below minimum (lines/chars): {} ".format(stats["skipped_because_min_length"]))
print("Skipped because line had forbidden characters (lines/chars): {} ".format(stats["skipped_because_forbidden_chars"]))
print("Skipped because alpha count was below minimum (lines/chars): {} ".format(stats["skipped_alpha_count"]))
print("Skipped because digit count was above maximum (lines/chars): {} ".format(stats["skipped_because_max_numeric"]))
print("Skipped because too many non-ascii characters (lines/chars): {} ".format(stats["skipped_because_max_non_ascii"]))
text = [" - ~~~~~Păstraţi acest prospect. S- ar putea să fie necesar să- l recitiţi.",
"- Dacă aveţi orice întrebări suplimentare, adresaţi- vă medicului dumneavoastră sau farmacistului.\n",
"{LL/ AAAA}\n",
"MANUALUL UTILIZATORULUI\n",
"Vezi textul manualului mai jos.\n",
"303 Informaţii detaliate privind acest medicament sunt disponibile pe website- ul Agenţiei Europene a Medicamentului (EMEA): http: // www. emea. europa. eu /.\n",
"304 PROSPECT: \n",
"INFORMAŢII PENTRU UTILIZATOR",
"Humalog Mix50 100 U/ ml • • • ~~~~",
"Τηλ: +30 210 629 4600 España Lilly S. A.",
"Tel: + 34- 91 663 50 00 France Lilly France S. A. S.",
"Tél: +33 - (0) 1 55 49 34 34 Ireland Eli Lilly and Company (Ireland) Limited Tel: + 353 - (0) 1 661 4377 Ísland Icepharma hf.",
"Sími + 354 540 8000 Italia Eli Lilly Italia S. p. A.",
"Tel: + 39 - 055 42571 Κύπρος Phadisco Ltd Τηλ: +357 22 715000 ",
"Luxembourg/ Luxemburg Eli Lilly Benelux S. A.",
"Tél/ Tel: + 32 - (0) 2 548 84 84 Magyarország Lilly Hungária Kft.",
"Tel: + 36 1 328 5100 Malta Charles de Giorgio Ltd.",
"Κύπρος Βαρνάβας Χατζηπαναγής Λτδ 7 Ανδροκλέους CY- 1060 Λευκωσία Tηλ"]
#tt = []
#for i in range(100000):
# tt.extend(text)
#print(len(tt))
| true | true |
7900956c420de46505e2863cdb1f71f64a9b7a69 | 364 | py | Python | config.py | weigun/StressTest | 10d747556e1b7ac95100b3e4d4b511ac6de45b4f | [
"MIT"
] | null | null | null | config.py | weigun/StressTest | 10d747556e1b7ac95100b3e4d4b511ac6de45b4f | [
"MIT"
] | null | null | null | config.py | weigun/StressTest | 10d747556e1b7ac95100b3e4d4b511ac6de45b4f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
TIME_OUT = 60
EXCEPT_FILE = ['test.py','login.py','mix.py']
class Api(object):
login = "/api/users/login"
user_info="/api/users/info"
signin = "/api/users/sign/signIn"
map = "/api/RedEnvelope/updateUserMap"
find_redbag = "/api/RedEnvelope/findReds"
get_redbag = "/api/redUser/getRed"
test= "/api/sys/testJson" | 28 | 45 | 0.64011 |
TIME_OUT = 60
EXCEPT_FILE = ['test.py','login.py','mix.py']
class Api(object):
login = "/api/users/login"
user_info="/api/users/info"
signin = "/api/users/sign/signIn"
map = "/api/RedEnvelope/updateUserMap"
find_redbag = "/api/RedEnvelope/findReds"
get_redbag = "/api/redUser/getRed"
test= "/api/sys/testJson" | true | true |
79009598e7fa08dde69be3808af45c94fd029af7 | 13,112 | py | Python | test/simulation_tests.py | gsmcwhirter/simulations | 73d8349125d50394ecf126af672c35c703e07aeb | [
"MIT"
] | null | null | null | test/simulation_tests.py | gsmcwhirter/simulations | 73d8349125d50394ecf126af672c35c703e07aeb | [
"MIT"
] | null | null | null | test/simulation_tests.py | gsmcwhirter/simulations | 73d8349125d50394ecf126af672c35c703e07aeb | [
"MIT"
] | 3 | 2016-08-24T15:40:25.000Z | 2019-12-14T17:26:57.000Z | import simulations.simulation as simulation
import simulations.simulation_runner as simrunner
import cPickle
import os
import random
import re
import string
import subprocess
import sys
from simulations.utils.optionparser import OptionParser
from nose.tools import assert_equal
from nose.tools import assert_raises
def filename_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class Sim(simulation.Simulation):
def _run(self):
return "runs"
class Sim2(simulation.Simulation):
def _run(self):
print >> self.out, "runs"
return "runs"
class Batch(simrunner.SimulationRunner):
def _add_listeners(self):
self.on('oparser set up', self._set_options)
self.on('options parsed', self._check_options)
self.on('options parsed', self._set_data)
self.on('done', self._when_done)
@staticmethod
def _set_options(self):
self.oparser.add_option("-t", "--test", action="store_true", dest="test", default=False, help="Testing")
@staticmethod
def _check_options(self):
if not self.options.test:
self.oparser.error("Test flag not passed")
@staticmethod
def _set_data(self):
self.data['test'] = self.options.test
@staticmethod
def _when_done(self):
return "test"
class TestSimulation:
def setUp(self):
self.sim = Sim(1, 2, None)
def tearDown(self):
self.sim = None
def test_simulation_init(self):
assert self.sim is not None, "Sim is not set up"
assert_equal(self.sim.data, 1)
assert_equal(self.sim.num, 2)
assert self.sim.outfile is None, "_outfile is not None"
assert self.sim.out is None
assert_equal(self.sim.out_opened, False)
def test_simulation_set_outfile(self):
self.sim.set_output_file("/tmp/test")
assert_equal(self.sim.outfile, "/tmp/test")
assert self.sim.out is None, "Sim.out is set up"
self.sim.is_running = True
self.sim.set_output_file("/tmp/test")
assert self.sim.out is not None, "Sim.out is not set up"
simulation._close_out_fd(self.sim)
assert self.sim.out is None, "Sim.out was not closed"
assert_equal(self.sim.out_opened, False)
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
self.sim.set_output_file("/tmp/test2")
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
def test_simulation_run(self):
assert_equal(self.sim.out_opened, False)
self.sim.set_output_file(False)
result = self.sim.run()
assert_equal(self.sim.result, "runs")
assert_equal(result, "runs")
assert_equal(self.sim.out_opened, False)
assert simulation.Simulation._run(self.sim) is None
def test_delegation_method(self):
self.sim.set_output_file(None)
assert_equal(simrunner.run_simulation([Sim, 1, 2, None]), "runs")
class TestSimulationBatch:
def setUp(self):
self.dir = "/tmp/" + filename_generator(8)
self.batch = Batch(Sim2)
def tearDown(self):
self.batch = None
if os.path.isdir(self.dir):
files = os.listdir(self.dir)
for f in files:
if f == "." or f == "..": continue
if f[-8:] == ".testout":
os.remove(self.dir + os.sep + f)
os.rmdir(self.dir)
def test_batch_init(self):
assert self.batch is not None, "Batch is not set up"
assert isinstance(self.batch.oparser, OptionParser), "Option parser is not initialized"
assert self.batch.options is None, "Options is initialized"
assert self.batch.args is None, "Args is initialized"
assert_equal(self.batch.data, {})
assert_equal(self.batch._task_dup_num, False)
assert_equal(len(self.batch.identifier), 6)
assert re.match('[{0}{1}]{{6}}'.format(string.ascii_uppercase, string.digits), self.batch.identifier)
def test_handler_options(self):
sim2 = Batch(Sim2, option_error_handler=2, option_exit_handler=3)
assert_equal(sim2.oparser._errorhandler, 2)
assert_equal(sim2.oparser._exithandler, 3)
def test_batch_option_setup(self):
assert self.batch.oparser.has_option("-D"), "No -D option"
assert self.batch.oparser.has_option("--nofiledump"), "No --nofiledump option"
assert self.batch.oparser.has_option("-F"), "No -F option"
assert self.batch.oparser.has_option("--filename"), "No --filename option"
assert self.batch.oparser.has_option("-N"), "No -N option"
assert self.batch.oparser.has_option("--duplications"), "No --duplications option"
assert self.batch.oparser.has_option("-O"), "No -O option"
assert self.batch.oparser.has_option("--output"), "No --output option"
assert self.batch.oparser.has_option("-P"), "No -P option"
assert self.batch.oparser.has_option("--poolsize"), "No --poolsize option"
assert self.batch.oparser.has_option("-Q"), "No -Q option"
assert self.batch.oparser.has_option("--quiet"), "No --quiet option"
assert self.batch.oparser.has_option("-S"), "No -S option"
assert self.batch.oparser.has_option("--statsfile"), "No --statsfile option"
assert self.batch.oparser.has_option("-t"), "No -t option"
assert self.batch.oparser.has_option("--test"), "No --test option"
def test_batch_go(self):
args = ["-F", "iter_{0}.testout", "-N", "4", "-O", self.dir, "-S", "results.testout", "--test"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 4)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "iter_{0}.testout")
assert_equal(self.batch.options.file_dump, True)
assert_equal(self.batch.options.stats_file, "results.testout")
## pp stuff
#assert_equal(self.batch.options.pool_size, 'autodetect')
assert self.batch.options.pool_size is None, "Pool size is not None"
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(4):
assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
for i in range(4):
with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
assert_equal(dup_file.read(), "runs\n")
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(4):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 2)
assert_equal(self.batch.options.quiet, True)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go3(self):
args = ["-N", "6", "-P", "1", "-O", self.dir, "-S", "results.testout", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 1)
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_option_failure(self):
args = ["-N", "-6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
assert_raises(SystemExit, self.batch.go, option_values=None)
def test_option_failure2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D"]
assert_raises(SystemExit, self.batch.go, option_args=args)
def test_option_failure3(self):
args = ["-N", "6", "-P", "-1", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
## pp stuff
#class TestClustering:
#
# def setUp(self):
# self.secret = filename_generator(6)
# self.server = subprocess.Popen(["ppserver.py", "-s", self.secret])
# self.batch = Batch(Sim2)
# self.dir = "/tmp/" + filename_generator(8)
#
# def tearDown(self):
# self.batch = None
# self.server.terminate()
# if os.path.isdir(self.dir):
# files = os.listdir(self.dir)
# for f in files:
# if f == "." or f == "..": continue
# if f[-8:] == ".testout":
# os.remove(self.dir + os.sep + f)
# os.rmdir(self.dir)
#
# def test_batch_cluster_go(self):
# args = ["-F", "iter_{0}.testout", "-N", "4", "-P", "2", "-O", self.dir, "-S", "results.testout", "--test", "--cluster=127.0.0.1", "--clustersecret="+self.secret]
# assert self.batch.go(option_args=args) is None
# assert_equal(self.batch.options.test, True)
# assert_equal(self.batch.options.dup, 4)
# assert_equal(self.batch.options.output_dir, self.dir)
# assert_equal(self.batch.options.output_file, "iter_{0}.testout")
# assert_equal(self.batch.options.file_dump, True)
# assert_equal(self.batch.options.stats_file, "results.testout")
# assert_equal(self.batch.options.pool_size, 2)
# assert_equal(self.batch.options.quiet, False)
# assert_equal(self.batch.options.cluster_string, '127.0.0.1')
# assert_equal(self.batch.options.cluster_secret, self.secret)
#
# assert_equal(self.batch.data['test'], True)
#
# for i in range(4):
# assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
# assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
#
# for i in range(4):
# with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
# assert_equal(dup_file.read(), "runs\n")
#
# with open(self.dir + os.sep + 'results.testout', "r") as results_file:
# should_be = ''
# should_be += cPickle.dumps(self.batch.options) + "\n"
# should_be += "\n"
# for _ in range(4):
# should_be += cPickle.dumps("runs") + "\n"
# should_be += "\n"
# assert_equal(results_file.read(), should_be)
#
| 41.625397 | 171 | 0.616306 | import simulations.simulation as simulation
import simulations.simulation_runner as simrunner
import cPickle
import os
import random
import re
import string
import subprocess
import sys
from simulations.utils.optionparser import OptionParser
from nose.tools import assert_equal
from nose.tools import assert_raises
def filename_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class Sim(simulation.Simulation):
def _run(self):
return "runs"
class Sim2(simulation.Simulation):
def _run(self):
print >> self.out, "runs"
return "runs"
class Batch(simrunner.SimulationRunner):
def _add_listeners(self):
self.on('oparser set up', self._set_options)
self.on('options parsed', self._check_options)
self.on('options parsed', self._set_data)
self.on('done', self._when_done)
@staticmethod
def _set_options(self):
self.oparser.add_option("-t", "--test", action="store_true", dest="test", default=False, help="Testing")
@staticmethod
def _check_options(self):
if not self.options.test:
self.oparser.error("Test flag not passed")
@staticmethod
def _set_data(self):
self.data['test'] = self.options.test
@staticmethod
def _when_done(self):
return "test"
class TestSimulation:
def setUp(self):
self.sim = Sim(1, 2, None)
def tearDown(self):
self.sim = None
def test_simulation_init(self):
assert self.sim is not None, "Sim is not set up"
assert_equal(self.sim.data, 1)
assert_equal(self.sim.num, 2)
assert self.sim.outfile is None, "_outfile is not None"
assert self.sim.out is None
assert_equal(self.sim.out_opened, False)
def test_simulation_set_outfile(self):
self.sim.set_output_file("/tmp/test")
assert_equal(self.sim.outfile, "/tmp/test")
assert self.sim.out is None, "Sim.out is set up"
self.sim.is_running = True
self.sim.set_output_file("/tmp/test")
assert self.sim.out is not None, "Sim.out is not set up"
simulation._close_out_fd(self.sim)
assert self.sim.out is None, "Sim.out was not closed"
assert_equal(self.sim.out_opened, False)
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
self.sim.set_output_file("/tmp/test2")
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
def test_simulation_run(self):
assert_equal(self.sim.out_opened, False)
self.sim.set_output_file(False)
result = self.sim.run()
assert_equal(self.sim.result, "runs")
assert_equal(result, "runs")
assert_equal(self.sim.out_opened, False)
assert simulation.Simulation._run(self.sim) is None
def test_delegation_method(self):
self.sim.set_output_file(None)
assert_equal(simrunner.run_simulation([Sim, 1, 2, None]), "runs")
class TestSimulationBatch:
def setUp(self):
self.dir = "/tmp/" + filename_generator(8)
self.batch = Batch(Sim2)
def tearDown(self):
self.batch = None
if os.path.isdir(self.dir):
files = os.listdir(self.dir)
for f in files:
if f == "." or f == "..": continue
if f[-8:] == ".testout":
os.remove(self.dir + os.sep + f)
os.rmdir(self.dir)
def test_batch_init(self):
assert self.batch is not None, "Batch is not set up"
assert isinstance(self.batch.oparser, OptionParser), "Option parser is not initialized"
assert self.batch.options is None, "Options is initialized"
assert self.batch.args is None, "Args is initialized"
assert_equal(self.batch.data, {})
assert_equal(self.batch._task_dup_num, False)
assert_equal(len(self.batch.identifier), 6)
assert re.match('[{0}{1}]{{6}}'.format(string.ascii_uppercase, string.digits), self.batch.identifier)
def test_handler_options(self):
sim2 = Batch(Sim2, option_error_handler=2, option_exit_handler=3)
assert_equal(sim2.oparser._errorhandler, 2)
assert_equal(sim2.oparser._exithandler, 3)
def test_batch_option_setup(self):
assert self.batch.oparser.has_option("-D"), "No -D option"
assert self.batch.oparser.has_option("--nofiledump"), "No --nofiledump option"
assert self.batch.oparser.has_option("-F"), "No -F option"
assert self.batch.oparser.has_option("--filename"), "No --filename option"
assert self.batch.oparser.has_option("-N"), "No -N option"
assert self.batch.oparser.has_option("--duplications"), "No --duplications option"
assert self.batch.oparser.has_option("-O"), "No -O option"
assert self.batch.oparser.has_option("--output"), "No --output option"
assert self.batch.oparser.has_option("-P"), "No -P option"
assert self.batch.oparser.has_option("--poolsize"), "No --poolsize option"
assert self.batch.oparser.has_option("-Q"), "No -Q option"
assert self.batch.oparser.has_option("--quiet"), "No --quiet option"
assert self.batch.oparser.has_option("-S"), "No -S option"
assert self.batch.oparser.has_option("--statsfile"), "No --statsfile option"
assert self.batch.oparser.has_option("-t"), "No -t option"
assert self.batch.oparser.has_option("--test"), "No --test option"
def test_batch_go(self):
args = ["-F", "iter_{0}.testout", "-N", "4", "-O", self.dir, "-S", "results.testout", "--test"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 4)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "iter_{0}.testout")
assert_equal(self.batch.options.file_dump, True)
assert_equal(self.batch.options.stats_file, "results.testout")
assert self.batch.options.pool_size is None, "Pool size is not None"
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(4):
assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
for i in range(4):
with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
assert_equal(dup_file.read(), "runs\n")
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(4):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 2)
assert_equal(self.batch.options.quiet, True)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go3(self):
args = ["-N", "6", "-P", "1", "-O", self.dir, "-S", "results.testout", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 1)
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_option_failure(self):
args = ["-N", "-6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
assert_raises(SystemExit, self.batch.go, option_values=None)
def test_option_failure2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D"]
assert_raises(SystemExit, self.batch.go, option_args=args)
def test_option_failure3(self):
args = ["-N", "6", "-P", "-1", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
| true | true |
7900959cc131e0703613025abcfc45610938cee5 | 886 | py | Python | dc_plc/dc_plc/report/dc_product_procmap_stats/dc_product_procmap_stats.py | igrekus/dc_plc | 76fbb6b1c98ff9d0de46f7979b76cd775834be79 | [
"MIT"
] | 3 | 2020-09-06T11:34:42.000Z | 2022-03-12T04:52:58.000Z | dc_plc/dc_plc/report/dc_product_procmap_stats/dc_product_procmap_stats.py | igrekus/dc_plc | 76fbb6b1c98ff9d0de46f7979b76cd775834be79 | [
"MIT"
] | null | null | null | dc_plc/dc_plc/report/dc_product_procmap_stats/dc_product_procmap_stats.py | igrekus/dc_plc | 76fbb6b1c98ff9d0de46f7979b76cd775834be79 | [
"MIT"
] | 5 | 2020-06-18T07:47:14.000Z | 2022-01-13T06:33:46.000Z | # Copyright (c) 2013, igrekus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from dc_plc.custom.utils import add_completeness, add_query_relevance
from dc_plc.controllers.stats_query import get_procmap_stats
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
"ID:Link/DC_PLC_Product_Summary",
_("Relevance"),
_("Progress"),
_("RnD Title"),
_("Function"),
_("External number"),
_("Process map"),
_("Internal number")
]
def get_data(filters):
res = get_procmap_stats(filters)
has_perms = 'DC_PLC_Process_Map_Specialist' in frappe.get_roles(frappe.session.user)
res = [add_completeness(row, [4]) for row in res]
res = [add_query_relevance(row, has_perms) for row in res]
return res
| 21.609756 | 85 | 0.749436 |
from __future__ import unicode_literals
import frappe
from frappe import _
from dc_plc.custom.utils import add_completeness, add_query_relevance
from dc_plc.controllers.stats_query import get_procmap_stats
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
"ID:Link/DC_PLC_Product_Summary",
_("Relevance"),
_("Progress"),
_("RnD Title"),
_("Function"),
_("External number"),
_("Process map"),
_("Internal number")
]
def get_data(filters):
res = get_procmap_stats(filters)
has_perms = 'DC_PLC_Process_Map_Specialist' in frappe.get_roles(frappe.session.user)
res = [add_completeness(row, [4]) for row in res]
res = [add_query_relevance(row, has_perms) for row in res]
return res
| true | true |
790095f4d4fec55b0efdf9ce1bda4f380bf62a12 | 3,229 | py | Python | examples/semantic_indexing/biencoder_base_model.py | Elvisambition/PaddleNLP | 4563c2d531850b92a10379e8c2639e3820762809 | [
"Apache-2.0"
] | null | null | null | examples/semantic_indexing/biencoder_base_model.py | Elvisambition/PaddleNLP | 4563c2d531850b92a10379e8c2639e3820762809 | [
"Apache-2.0"
] | null | null | null | examples/semantic_indexing/biencoder_base_model.py | Elvisambition/PaddleNLP | 4563c2d531850b92a10379e8c2639e3820762809 | [
"Apache-2.0"
] | null | null | null | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class BiEncoder(nn.Layer):
def __init__(self,question_encoder,context_encoder,dropout,output_emb_size = 768,state=None):
super(BiEncoder, self).__init__()
self.state = state
if self.state == None:
self.question_encoder = question_encoder
self.context_encoder = context_encoder
elif self.state == "FORQUESTION":
self.question_encoder = question_encoder
elif self.state == "FORCONTEXT":
self.context_encoder = context_encoder
self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=0.02))
self.emb_reduce_linear = paddle.nn.Linear(
768, output_emb_size, weight_attr=weight_attr)
def get_question_pooled_embedding(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.question_encoder(input_ids, token_type_ids, position_ids,attention_mask)
"""cls_embedding = self.emb_reduce_linear(cls_embedding)
cls_embedding = self.dropout(cls_embedding)
cls_embedding = F.normalize(cls_embedding, p=2, axis=-1)"""
return cls_embedding
def get_context_pooled_embedding(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.context_encoder(input_ids, token_type_ids, position_ids,attention_mask)
"""cls_embedding = self.emb_reduce_linear(cls_embedding)
cls_embedding = self.dropout(cls_embedding)
cls_embedding = F.normalize(cls_embedding, p=2, axis=-1)"""
return cls_embedding
def forward(self,
question_id,
question_segments,
question_attn_mask,
context_ids,
context_segments,
context_attn_mask,
):
question_pooled_out = self.get_question_pooled_embedding(question_id,question_segments,question_attn_mask)
context_pooled_out = self.get_context_pooled_embedding(context_ids,context_segments,context_attn_mask)
return question_pooled_out,context_pooled_out
class BiEncoderNllLoss(object):
def calc(self,
q_vectors,
ctx_vectors,
positive_idx_per_question,
loss_scale=None):
scorces = paddle.matmul(q_vectors,paddle.transpose(ctx_vectors,[0,1]))
if len(q_vectors.size()) > 1:
q_num = q_vectors.size(0)
scores = scorces.view(q_num, -1)
softmax_scorces = F.log_softmax(scores,axis=1)
loss = F.nll_loss(softmax_scorces,paddle.to_tensor(positive_idx_per_question))
max_score = paddle.max(softmax_scorces,axis=1)
correct_predictions_count = (None)
if loss_scale:
loss.mul_(loss_scale)
return loss,correct_predictions_count | 37.988235 | 114 | 0.633013 | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class BiEncoder(nn.Layer):
def __init__(self,question_encoder,context_encoder,dropout,output_emb_size = 768,state=None):
super(BiEncoder, self).__init__()
self.state = state
if self.state == None:
self.question_encoder = question_encoder
self.context_encoder = context_encoder
elif self.state == "FORQUESTION":
self.question_encoder = question_encoder
elif self.state == "FORCONTEXT":
self.context_encoder = context_encoder
self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=0.02))
self.emb_reduce_linear = paddle.nn.Linear(
768, output_emb_size, weight_attr=weight_attr)
def get_question_pooled_embedding(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.question_encoder(input_ids, token_type_ids, position_ids,attention_mask)
return cls_embedding
def get_context_pooled_embedding(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
_, cls_embedding = self.context_encoder(input_ids, token_type_ids, position_ids,attention_mask)
return cls_embedding
def forward(self,
question_id,
question_segments,
question_attn_mask,
context_ids,
context_segments,
context_attn_mask,
):
question_pooled_out = self.get_question_pooled_embedding(question_id,question_segments,question_attn_mask)
context_pooled_out = self.get_context_pooled_embedding(context_ids,context_segments,context_attn_mask)
return question_pooled_out,context_pooled_out
class BiEncoderNllLoss(object):
def calc(self,
q_vectors,
ctx_vectors,
positive_idx_per_question,
loss_scale=None):
scorces = paddle.matmul(q_vectors,paddle.transpose(ctx_vectors,[0,1]))
if len(q_vectors.size()) > 1:
q_num = q_vectors.size(0)
scores = scorces.view(q_num, -1)
softmax_scorces = F.log_softmax(scores,axis=1)
loss = F.nll_loss(softmax_scorces,paddle.to_tensor(positive_idx_per_question))
max_score = paddle.max(softmax_scorces,axis=1)
correct_predictions_count = (None)
if loss_scale:
loss.mul_(loss_scale)
return loss,correct_predictions_count | true | true |
790097688658088fee2608d89e4b51b201dfef2b | 421 | py | Python | src/tools.py | r3w0p/memeoff | 69ef35bbd3949cfeb22b220f297f842ae9f48027 | [
"MIT"
] | 1 | 2021-03-23T19:53:09.000Z | 2021-03-23T19:53:09.000Z | src/tools.py | r3w0p/memeoff | 69ef35bbd3949cfeb22b220f297f842ae9f48027 | [
"MIT"
] | 22 | 2021-03-16T22:30:39.000Z | 2022-02-07T18:43:18.000Z | src/tools.py | r3w0p/memeoff | 69ef35bbd3949cfeb22b220f297f842ae9f48027 | [
"MIT"
] | null | null | null | from pathlib import Path
def dir_touch(path_file) -> None:
Path(path_file).mkdir(parents=True, exist_ok=True)
def file_touch(path_file) -> None:
p = Path(path_file)
p.parents[0].mkdir(parents=True, exist_ok=True)
p.touch()
def index_or_default(lst, val, default=-1):
return lst.index(val) if val in lst else default
def print_info(logger, message):
print(message)
logger.info(message)
| 20.047619 | 54 | 0.703088 | from pathlib import Path
def dir_touch(path_file) -> None:
Path(path_file).mkdir(parents=True, exist_ok=True)
def file_touch(path_file) -> None:
p = Path(path_file)
p.parents[0].mkdir(parents=True, exist_ok=True)
p.touch()
def index_or_default(lst, val, default=-1):
return lst.index(val) if val in lst else default
def print_info(logger, message):
print(message)
logger.info(message)
| true | true |
79009926d5e3cc7ab1e1e230bb129c2f66059adb | 248 | py | Python | pretix_attestation_plugin/urls.py | ayanginet/pretix-attestation-placeholder-plugin | e3c476f21963fd697b0a2be343f4a90dae6e1618 | [
"MIT"
] | 1 | 2021-09-07T07:55:09.000Z | 2021-09-07T07:55:09.000Z | pretix_attestation_plugin/urls.py | ayanginet/pretix-attestation-placeholder-plugin | e3c476f21963fd697b0a2be343f4a90dae6e1618 | [
"MIT"
] | null | null | null | pretix_attestation_plugin/urls.py | ayanginet/pretix-attestation-placeholder-plugin | e3c476f21963fd697b0a2be343f4a90dae6e1618 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/key-file-upload/$",
views.KeyFileUploadView.as_view(),
name="key_file_upload",
),
]
| 19.076923 | 82 | 0.58871 | from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/key-file-upload/$",
views.KeyFileUploadView.as_view(),
name="key_file_upload",
),
]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.