hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70d5dd451f1625af076af63c6aefa0f1d2c9f5a3 | 2,004 | py | Python | micro/core/logger.py | humu1us/notifier | 1228c27dfe8449fe4cde3860b50eebd84866f204 | [
"MIT"
] | 12 | 2017-12-18T07:09:25.000Z | 2021-03-17T15:22:16.000Z | micro/core/logger.py | humu1us/micro | 1228c27dfe8449fe4cde3860b50eebd84866f204 | [
"MIT"
] | 5 | 2017-12-27T01:02:55.000Z | 2019-01-28T04:42:07.000Z | micro/core/logger.py | humu1us/micro | 1228c27dfe8449fe4cde3860b50eebd84866f204 | [
"MIT"
] | null | null | null | import os
import logging
from .params import Params
from ..core.utils import set_folder
LOG_FORMAT = "[%(asctime)s] %(levelname)s: " + \
"%(message)s - %(filename)s, %(funcName)s, %(lineno)s"
LOG_DEBUG_FORMAT = "[%(asctime)s] [%(process)d] %(levelname)s: " + \
"%(message)s - %(name)s, %(filename)s, %(funcName)s, %(lineno)s"
class Logger():
def __init__(self):
Params()
self.__name = Params.log_file_name()
self.__path = Params.log_folder_path()
self.__level = logging.getLevelName(Params.log_level())
self.__micro_log = Params.namespace()
self.__celery_log = "celery"
self.__gunicorn_log = "gunicorn.error"
set_folder(self.__path)
self.__micro = logging.getLogger(self.__micro_log)
self.__micro.setLevel(self.__level)
self.__celery = logging.getLogger(self.__celery_log)
self.__celery.setLevel(self.__level)
self.__gunicorn = logging.getLogger(self.__gunicorn_log)
self.__gunicorn.setLevel(self.__level)
datefmt = "%Y-%m-%d %H:%M:%S"
self.__formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=datefmt)
if self.__level == logging.DEBUG:
self.__formatter = logging.Formatter(fmt=LOG_DEBUG_FORMAT,
datefmt=datefmt)
if not self.__micro.handlers:
self.__set_file_handler()
def __set_file_handler(self):
handler = logging.FileHandler(os.path.join(self.__path, self.__name))
handler.setLevel(self.__level)
handler.setFormatter(self.__formatter)
self.__micro.addHandler(handler)
self.__celery.addHandler(handler)
self.__gunicorn.addHandler(handler)
def debug(self, message):
self.__micro.debug(message)
def info(self, message):
self.__micro.info(message)
def warning(self, message):
self.__micro.warning(message)
def error(self, message):
self.__micro.error(message)
| 33.966102 | 77 | 0.641218 | 233 | 2,004 | 5.090129 | 0.23176 | 0.075885 | 0.057336 | 0.067454 | 0.102867 | 0.102867 | 0.043845 | 0 | 0 | 0 | 0 | 0 | 0.230539 | 2,004 | 58 | 78 | 34.551724 | 0.769131 | 0 | 0 | 0 | 0 | 0.021739 | 0.111277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.086957 | 0 | 0.23913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70d7fd667e6414634516ca198a9c285725f53cec | 670 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/notes/test_notes_new_note.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 3 | 2016-08-17T08:52:51.000Z | 2020-03-29T04:56:45.000Z | tests/python/gaia-ui-tests/gaiatest/tests/functional/notes/test_notes_new_note.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/notes/test_notes_new_note.py | NickProgramm/gaia | 975a35c0f5010df341e96d6c5ec60217f5347412 | [
"Apache-2.0"
] | 1 | 2021-11-18T21:21:19.000Z | 2021-11-18T21:21:19.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.notes.app import Notes, NotesMainMenu
class TestNotes (GaiaTestCase):
def test_notes_launch(self):
# This test creates a note and verifies that by checking the text
note_text = 'I am a note!'
self.notes = Notes(self.marionette)
self.notes.launch()
main_menu = self.notes.write_and_save_note(note_text)
self.assertEqual(main_menu.first_note_title, note_text)
| 30.454545 | 73 | 0.716418 | 105 | 670 | 4.457143 | 0.609524 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007533 | 0.207463 | 670 | 21 | 74 | 31.904762 | 0.873823 | 0.38209 | 0 | 0 | 0 | 0 | 0.02934 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70d9b357ab2a34174d7642838d646308c6a0a95b | 2,730 | py | Python | solvcon/parcel/linear/case.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 16 | 2015-12-09T02:54:42.000Z | 2021-04-20T11:26:39.000Z | solvcon/parcel/linear/case.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 95 | 2015-12-09T00:49:40.000Z | 2022-02-14T13:34:55.000Z | solvcon/parcel/linear/case.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 13 | 2015-05-08T04:16:42.000Z | 2021-01-15T09:28:06.000Z | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2012, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The control interface.
"""
from solvcon import case
from solvcon import domain
from . import solver as lsolver
class LinearCase(case.MeshCase):
"""
Basic case with linear CESE method.
"""
defdict = {
'execution.verified_norm': -1.0,
'solver.solvertype': lsolver.LinearSolver,
'solver.domaintype': domain.Domain,
'solver.alpha': 0,
'solver.sigma0': 3.0,
'solver.taylor': 1.0,
'solver.cnbfac': 1.0,
'solver.sftfac': 1.0,
'solver.taumin': None,
'solver.tauscale': None,
}
def make_solver_keywords(self):
kw = super(LinearCase, self).make_solver_keywords()
# time.
kw['time'] = self.execution.time
kw['time_increment'] = self.execution.time_increment
# c-tau scheme parameters.
kw['alpha'] = int(self.solver.alpha)
for key in ('sigma0', 'taylor', 'cnbfac', 'sftfac',
'taumin', 'tauscale',):
val = self.solver.get(key)
if val != None: kw[key] = float(val)
return kw
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
| 37.39726 | 79 | 0.692674 | 368 | 2,730 | 5.119565 | 0.527174 | 0.022293 | 0.016985 | 0.024416 | 0.097665 | 0.072187 | 0.072187 | 0.072187 | 0.072187 | 0.072187 | 0 | 0.010768 | 0.217582 | 2,730 | 72 | 80 | 37.916667 | 0.871255 | 0.613553 | 0 | 0 | 0 | 0 | 0.20979 | 0.022977 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70db38e7075982ee4a3090ddeb4e8d6bda2eae0b | 9,168 | py | Python | tools/src/test/python/dlpx/virtualization/_internal/test_file_util.py | muralinimmagadda/virtualization-sdk | 242754fb0817d7a28fceed9d9fd4c626041dd952 | [
"Apache-2.0"
] | null | null | null | tools/src/test/python/dlpx/virtualization/_internal/test_file_util.py | muralinimmagadda/virtualization-sdk | 242754fb0817d7a28fceed9d9fd4c626041dd952 | [
"Apache-2.0"
] | 4 | 2019-10-15T17:59:13.000Z | 2020-01-28T21:11:01.000Z | tools/src/test/python/dlpx/virtualization/_internal/test_file_util.py | muralinimmagadda/virtualization-sdk | 242754fb0817d7a28fceed9d9fd4c626041dd952 | [
"Apache-2.0"
] | 3 | 2019-10-14T18:33:30.000Z | 2019-10-23T17:08:08.000Z | #
# Copyright (c) 2019, 2020 by Delphix. All rights reserved.
#
import os
from dlpx.virtualization._internal import exceptions, file_util
import mock
import pytest
class TestFileUtil:
@staticmethod
def test_delete_paths(plugin_config_file, schema_file, src_dir):
file_util.delete_paths(plugin_config_file, schema_file, src_dir)
assert not os.path.exists(plugin_config_file)
assert not os.path.exists(schema_file)
assert not os.path.exists(src_dir)
@staticmethod
def test_delete_paths_none_values(plugin_config_file):
file_util.delete_paths(plugin_config_file, None)
assert not os.path.exists(plugin_config_file)
@staticmethod
def test_get_src_dir_path_relative(tmp_path):
plugin_root = tmp_path / 'plugin'
src_dir = plugin_root / 'src'
plugin_root.mkdir()
src_dir.mkdir()
cwd = os.getcwd()
try:
os.chdir(str(tmp_path))
actual = file_util.get_src_dir_path(os.path.join('plugin', 'plugin_config.yml'),
'src')
finally:
os.chdir(cwd)
assert actual == str(src_dir)
@staticmethod
def test_get_src_dir_path_is_abs_fail():
expected_message = "The path '{}' should be a relative path, but is " \
"not.".format('/absolute/src')
with pytest.raises(exceptions.UserError) as err_info:
file_util.get_src_dir_path('/absolute/config', '/absolute/src')
message = err_info.value.message
assert expected_message in message
@staticmethod
def test_get_src_dir_path_exists_fail():
expected_path = os.path.join(os.getcwd(), 'fake', 'nonexistent', 'dir')
expected_message = "The path '{}' does not exist.".format(
expected_path)
with pytest.raises(exceptions.UserError) as err_info:
file_util.get_src_dir_path('fake/plugin_config', 'nonexistent/dir')
message = err_info.value.message
assert expected_message in message
@staticmethod
@mock.patch('os.path.isabs', return_value=False)
@mock.patch('os.path.exists', return_value=True)
def test_get_src_dir_path_is_dir_fail(mock_existing_path,
mock_relative_path):
expected_path = os.path.join(os.getcwd(), 'fake', 'not', 'dir')
expected_message = "The path '{}' should be a {} but is not.".format(
expected_path, 'directory')
with pytest.raises(exceptions.UserError) as err_info:
file_util.get_src_dir_path('fake/plugin_config', 'not/dir')
message = err_info.value.message
assert expected_message in message
@staticmethod
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isabs', return_value=False)
@pytest.mark.parametrize(
'plugin_config_file_path, src_dir_path',
[('plugin/file_name', '.'),
('/mongo/file_name', '/src'), ('/plugin/mongo/file_name', '/plugin'),
('/plugin/file_name', '/plugin/src/../..')])
def test_get_src_dir_path_fail(mock_relative_path, mock_existing_path,
mock_directory_path,
plugin_config_file_path, src_dir_path):
expected_plugin_root_dir = os.path.join(os.getcwd(), os.path.dirname(plugin_config_file_path))
expected_plugin_root_dir = file_util.standardize_path(
expected_plugin_root_dir)
expected_src_dir = file_util.standardize_path(
os.path.join(expected_plugin_root_dir, src_dir_path))
expected_src_dir = os.path.join(expected_plugin_root_dir,
expected_src_dir)
expected_message = "The src directory {} is not a subdirectory of " \
"the plugin root at {}"\
.format(expected_src_dir,
os.path.dirname(expected_plugin_root_dir))
with pytest.raises(exceptions.UserError) as err_info:
file_util.get_src_dir_path(plugin_config_file_path, src_dir_path)
message = err_info.value.message
assert expected_message in message
@staticmethod
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isabs', return_value=False)
@pytest.mark.parametrize(
'plugin_config_file_path, src_dir_path',
[(os.path.join(os.path.dirname(os.getcwd()),
'plugin/filename'), '../plugin/src'),
(os.path.join(os.path.dirname(os.getcwd()),
'plugin/filename'), './plugin/src'),
(os.path.join(os.path.dirname(os.getcwd()),
'/UPPERCASE/file_name'), '/UPPERCASE/src'),
(os.path.join(os.path.dirname(os.getcwd()),
'/mongo/file_name'), '/mongo/src/main/python'),
(os.path.join(os.path.dirname(os.getcwd()),
r'windows\path\some_file'), r'windows\path')])
def test_get_src_dir_path_success(mock_relative_path, mock_existing_path,
mock_directory_path,
plugin_config_file_path, src_dir_path):
file_util.get_src_dir_path(plugin_config_file_path, src_dir_path)
@staticmethod
def test_make_dir_success(tmpdir):
testdir = os.path.join(tmpdir.strpath, 'test_dir')
file_util.make_dir(testdir, True)
assert os.path.exists(testdir)
assert os.path.isdir(testdir)
@staticmethod
def test_make_dir_fail():
testdir = '/dir/that/does/not/exist/test_dir'
with pytest.raises(exceptions.UserError) as err_info:
file_util.make_dir(testdir, True)
message = err_info.value.message
assert message == ("Unable to create new directory"
" '/dir/that/does/not/exist/test_dir'"
"\nError code: 2."
" Error message: No such file or directory")
@staticmethod
def test_make_dir_force_fail(tmpdir):
with pytest.raises(exceptions.UserError) as err_info:
file_util.make_dir(tmpdir.strpath, False)
message = err_info.value.message
assert "Error code: 17. Error message: File exists" in message
@staticmethod
def test_clean_copy_no_tgt_dir(tmp_path):
#
# Before: After:
# src/ src/
# hello.txt hello.txt
# tgt/
# hello.txt
#
src = tmp_path / 'src'
src.mkdir()
f = src / 'hello.txt'
f.write_text(u'hello')
tgt = tmp_path / 'tgt'
file_util.clean_copy(src.as_posix(), tgt.as_posix())
expected_file = tgt / 'hello.txt'
assert expected_file.exists()
assert expected_file.read_text() == 'hello'
@staticmethod
def test_clean_copy_removes_tgt_dir(tmp_path):
#
# Before: After:
# src/ src/
# hello.txt hello.txt
# tgt/ tgt/
# remove.txt hello.txt
#
src = tmp_path / 'src'
src.mkdir()
src_file = src / 'hello.txt'
src_file.write_text(u'hello')
tgt = tmp_path / 'tgt'
tgt.mkdir()
tgt_file = tgt / 'remove.txt'
tgt_file.touch()
file_util.clean_copy(src.as_posix(), tgt.as_posix())
expected_file = tgt / 'hello.txt'
assert expected_file.exists()
assert expected_file.read_text() == 'hello'
assert not tgt_file.exists()
@staticmethod
def test_clean_copy_nested_tgt_dir(tmp_path):
#
# Before: After:
# src/ src/
# child/ child/
# hello.txt hello.txt
# tgt_parent/ tgt_parent/
# tgt/
# child/
# hello.txt
#
src = tmp_path / 'src'
src.mkdir()
child = src / 'child'
child.mkdir()
src_file = child / 'hello.txt'
src_file.write_text(u'hello')
tgt_parent = tmp_path / 'tgt_parent'
tgt_parent.mkdir()
tgt = tgt_parent / 'tgt'
file_util.clean_copy(src.as_posix(), tgt.as_posix())
expected_file = tgt / 'child' / 'hello.txt'
assert expected_file.exists()
assert expected_file.read_text() == 'hello'
@staticmethod
def test_tmpdir():
with file_util.tmpdir() as d:
assert os.path.exists(d)
assert not os.path.exists(d)
@staticmethod
def test_tmpdir_with_raised_exception():
try:
with file_util.tmpdir() as d:
assert os.path.exists(d)
raise RuntimeError('test')
except RuntimeError as e:
assert str(e) == 'test'
assert not os.path.exists(d)
| 37.117409 | 102 | 0.584969 | 1,101 | 9,168 | 4.588556 | 0.131698 | 0.043943 | 0.037609 | 0.030879 | 0.714964 | 0.628068 | 0.576207 | 0.502375 | 0.426762 | 0.389945 | 0 | 0.001734 | 0.308028 | 9,168 | 246 | 103 | 37.268293 | 0.794609 | 0.062609 | 0 | 0.478022 | 0 | 0 | 0.134135 | 0.021364 | 0 | 0 | 0 | 0 | 0.137363 | 1 | 0.087912 | false | 0 | 0.021978 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70db5cf98b11421c37c874ef45672595cd635be6 | 14,476 | py | Python | tests/python/relay/test_external_codegen.py | Bo-Yuan-Huang/3la-tvm-exact | 2199cac47c695cd89055ac536d293e4e8eddc04f | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tests/python/relay/test_external_codegen.py | Bo-Yuan-Huang/3la-tvm-exact | 2199cac47c695cd89055ac536d293e4e8eddc04f | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tests/python/relay/test_external_codegen.py | Bo-Yuan-Huang/3la-tvm-exact | 2199cac47c695cd89055ac536d293e4e8eddc04f | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for graph partitioning."""
import os
import sys
import json
import math
import numpy as np
import tvm
from tvm import te
import tvm.relay.testing
import tvm.relay.transform
from tvm import relay
from tvm.relay import transform
from tvm import runtime
from tvm.contrib import utils
import vta
import vta.testing
def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", ctx=tvm.cpu(), use_graph_rt=True):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
def update_lib(lib):
vta_hw_path = os.environ['VTA_HW_PATH']
tvm_home = os.environ['TVM_HOME']
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..")
vta_config = json.load(open('/' + os.path.join(*(vta_hw_path.split(os.path.sep) + ['config', 'vta_config.json']))))
vta_config['LOG_BLOCK_IN'] = vta_config['LOG_BLOCK']
vta_config['LOG_BLOCK_OUT'] = vta_config['LOG_BLOCK']
vta_config['LOG_OUT_WIDTH'] = vta_config['LOG_INP_WIDTH']
vta_config['LOG_OUT_BUFF_SIZE'] = vta_config['LOG_ACC_BUFF_SIZE'] + vta_config['LOG_OUT_WIDTH'] - vta_config['LOG_ACC_WIDTH']
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++14",
f"-L{tvm_home}/build",
"-lvta_fsim",
f'-I{tvm_home}/src/runtime/contrib',
f"-I{tvm_home}/3rdparty/vta-hw/include"] \
+ [f'-D{"VTA_" + x}={y}' for (x, y) in filter(lambda pi: 'LOG' in pi[0], vta_config.items())]
kwargs["options"].append(f'-DVTA_LOG_BLOCK_IN={vta_config["LOG_BLOCK"]}')
kwargs["options"].append(f'-DVTA_LOG_BLOCK_OUT={vta_config["LOG_BLOCK"]}')
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = tvm.runtime.load_module(lib_path)
return lib
def check_vm_result():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
exe = relay.vm.compile(mod, target=target)
code, lib = exe.save()
lib = update_lib(lib)
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, ctx)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)
def check_graph_runtime_result():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
json, lib, _ = relay.build(mod, target=target)
lib = update_lib(lib)
rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.run()
out = tvm.nd.empty(out_shape, ctx=ctx)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)
check_vm_result()
if use_graph_rt:
check_graph_runtime_result()
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def test_multi_node_subgraph():
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# subgraph0
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
subgraph0 = relay.Function([x0, w00, w01, w02], q00)
subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
call0 = relay.Call(subgraph0, [x, w0, w1, w2])
# subgraph1
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
subgraph1 = relay.Function([x1, w10, w11, w12], q10)
subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
call1 = relay.Call(subgraph1, [x, w3, w4, w5])
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((call0, call1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
map_inputs["x"] = x_data
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
)
def test_extern_gcc_single_op():
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
def test_extern_gcc_single_op_int():
x = relay.var("x", shape=(8, 8), dtype="int32")
y = relay.var("y", shape=(8, 8), dtype="int32")
x0 = relay.var("x0", shape=(8, 8), dtype="int32")
y0 = relay.var("y0", shape=(8, 8), dtype="int32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("int32")
y_data = np.random.rand(8, 8).astype("int32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
def test_extern_gcc():
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
# subgraph for mul
x0 = relay.var("x0", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
mul = x0 * y0
mul = relay.Function([x0, y0], mul)
mul = set_external_func_attr(mul, "ccompiler", "ccompiler_2")
call_mul = relay.Call(mul, [y, y])
# subgraph for add
x1 = relay.var("x1", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
add = x1 + y1
add = relay.Function([x1, y1], add)
add = set_external_func_attr(add, "ccompiler", "ccompiler_1")
call_add = relay.Call(add, [x, x])
# subgraph for sub
x2 = relay.var("x2", shape=(2, 2))
y2 = relay.var("y2", shape=(2, 2))
sub = x2 - y2
sub = relay.Function([x2, y2], sub)
sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0")
call_sub = relay.Call(sub, [call_mul, call_add])
mod = tvm.IRModule.from_expr(call_sub)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (2, 2), (y_data * y_data) - (x_data + x_data))
def test_extern_gcc_consts():
@tvm._ffi.register_func("relay.ext.ccompiler.constant_updater")
def constant_updater(expr, symbol):
"""A dummy constant updater just to test that a custom one works."""
return {"ccompiler_0_p0": tvm.nd.array(y0_data)}
x = relay.var("x", shape=(8, 8))
y0_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x0 = relay.var("x0", shape=(8, 8))
y0_const = relay.const(y0_data, "float32")
z = x0 + y0_const
f = relay.Function([x0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x])
mod = tvm.IRModule.from_expr(call)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
compiler = relay.backend.vm.VMCompiler()
compiler.lower(mod, "llvm")
compiler.codegen()
params = compiler.get_params()
assert len(params) == 1
assert "ccompiler_0_p0" in params.keys()
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
_, _, params = relay.build(mod, target="llvm")
assert len(params) == 1
assert "ccompiler_0_p0" in params.keys()
tvm._ffi.registry.remove_global_func("relay.ext.ccompiler.constant_updater")
def test_extern_dnnl():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1, weight1, weight2], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0, weight0, weight0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_ex = relay.create_executor("graph", mod=ref_mod, ctx=tvm.cpu())
ref_res = ref_ex.evaluate()(i_data, w_data, w_data)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5
)
def test_extern_dnnl_const():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.const(w_data, dtype=dtype)
weight2 = relay.const(w_data, dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_ex = relay.create_executor("graph", mod=ref_mod, ctx=tvm.cpu())
ref_res = ref_ex.evaluate()(i_data)
check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5)
def test_extern_vta():
if not tvm.get_global_func("relay.ext.vta_matmul", True):
print('VTA ILA codegen not supported')
vta.testing.simulator.dump_mode(True)
dtype = 'float32'
ishape = (16, 16)
wshape = (16, 16)
data = relay.var('data', shape=(ishape), dtype=dtype)
weight = relay.var('weight', shape=(wshape), dtype=dtype)
data_1 = relay.log(data)
o1 = relay.multiply(data_1, relay.const(np.random.uniform(1, 1, ishape)))
out = relay.nn.dense(o1, weight) # relay.Call(dense_func, [o1])
f = relay.Function([data, weight], out)
inputs = relay.var('input', shape=ishape, dtype=dtype)
weights = relay.var('w', shape=wshape, dtype=dtype)
call = relay.Call(f, [inputs, weights])
mod = tvm.IRModule()
mod['main'] = f
mod = relay.transform.InferType()(mod)
mod = tvm.IRModule.from_expr(call)
seq = tvm.transform.Sequential([transform.AnnotateTarget('vta_matmul'),
transform.PartitionGraph()])
mod = seq(mod)
in_data = np.array([math.e] * ishape[0] * ishape[1]).reshape(ishape).astype(dtype)
w_data = (np.arange(wshape[0] * wshape[1]) % 10).reshape(wshape).astype(dtype)
check_result(mod, {
'input' : in_data,
'w': w_data
}, (16, 16), np.matmul(np.array([1] * 16 * 16).reshape(ishape).astype(dtype),
np.transpose(w_data)).astype(dtype), use_graph_rt=False)
if __name__ == "__main__":
test_multi_node_subgraph()
test_extern_gcc_single_op()
test_extern_gcc_single_op_int()
test_extern_gcc()
test_extern_gcc_consts()
test_extern_dnnl()
test_extern_dnnl_const()
test_extern_vta()
| 36.280702 | 133 | 0.623446 | 2,154 | 14,476 | 4.016713 | 0.165274 | 0.042534 | 0.017684 | 0.024156 | 0.441054 | 0.40742 | 0.382339 | 0.318077 | 0.28976 | 0.28976 | 0 | 0.050203 | 0.215667 | 14,476 | 398 | 134 | 36.371859 | 0.71182 | 0.067007 | 0 | 0.263158 | 0 | 0 | 0.097825 | 0.016997 | 0 | 0 | 0 | 0 | 0.019737 | 1 | 0.046053 | false | 0.013158 | 0.049342 | 0 | 0.115132 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70dc6327e5965a776259bf7ef4e5165e26e23f32 | 3,621 | py | Python | tasks/OptimalMountainCar.py | Kognitive/QLearningComparison | a15180267a88e32fac7d944940caf79c5bdec536 | [
"MIT"
] | 1 | 2018-02-03T00:15:05.000Z | 2018-02-03T00:15:05.000Z | tasks/OptimalMountainCar.py | Kognitive/QLearningComparison | a15180267a88e32fac7d944940caf79c5bdec536 | [
"MIT"
] | null | null | null | tasks/OptimalMountainCar.py | Kognitive/QLearningComparison | a15180267a88e32fac7d944940caf79c5bdec536 | [
"MIT"
] | null | null | null | import numpy as np
import math
import matplotlib.pyplot as plt
# do the discretization
discretizationp = 1500
discretizationv = 1000
# min and max values
min_position = -1.2
max_position = 0.6
max_speed = 0.07
goal_position = 0.5
# get intervals low and high
low = np.array([min_position, -max_speed])
high = np.array([max_position, max_speed])
# determine the step width
pos_step = (max_position - min_position) / (discretizationp - 1)
vel_step = (2 * max_speed) / (discretizationv - 1)
print("pos_step is {}".format(pos_step))
print("vel_step is {}".format(vel_step))
# build the transition matrix
trans = np.zeros([discretizationp, discretizationv, 3, 3])
rews = np.ones([discretizationp, discretizationv, 3, 1]) * -1
def step(state, action):
position, velocity = state
velocity += (action - 1) * 0.001 + math.cos(3 * position) * (-0.0025)
velocity = np.clip(velocity, -max_speed, max_speed)
position += velocity
position = np.clip(position, min_position, max_position)
if (position == min_position and velocity < 0): velocity = 0
done = bool(position >= goal_position)
reward = -1.0
state = (position, velocity)
return np.array(state), reward, done, {}
print("Filling Transition")
p_ticks = np.arange(min_position, max_position + pos_step / 100, pos_step)
v_ticks = np.arange(-max_speed, max_speed + vel_step / 100, vel_step)
for pi in range(len(p_ticks)):
print(pi)
for vi in range(len(v_ticks)):
p = p_ticks[pi]
v = v_ticks[vi]
for a in range(3):
next, reward, done, _ = step((p, v), a)
pf = int((next[0] - min_position - pos_step / 100) / pos_step)
ps = int((next[1] + max_speed - vel_step / 100) / vel_step)
trans[pi, vi, a, :] = np.array([pf, ps, done])
print("Filled Transition")
# init q function
q_shape = (discretizationp, discretizationv, 3)
q_function = -np.zeros(q_shape)
next_q_function = -np.ones(q_shape) * 100
discount = 0.99
# repeat until converged
while np.max(np.abs(q_function - next_q_function)) >= 0.00001:
print(np.max(np.abs(q_function - next_q_function)))
# create next bootstrapped q function
q_function = next_q_function
bootstrapped_q_function = np.empty(q_shape)
# iterate over all fields
for pi in range(len(p_ticks)):
for vi in range(len(v_ticks)):
p = p_ticks[pi]
v = v_ticks[vi]
for a in range(3):
next = trans[pi, vi, a]
next_q = q_function[int(next[0]), int(next[1]), :]
bootstrapped_q_function[pi, vi, a] = rews[pi, vi, a] + discount * (np.max(next_q) if not next[2] else 0)
# update the q function correctly
next_q_function = np.squeeze(rews) + discount * np.squeeze(bootstrapped_q_function)
box = [min_position, max_position, - max_speed, max_speed]
min_position = -1.2
max_position = 0.6
max_speed = 0.07
fig = plt.figure(1)
fig.set_size_inches(6.2, 4.2)
ax1 = plt.axes([0.1, 0.1, 0.8, 0.8])
vf2 = ax1.imshow(np.transpose(np.max(next_q_function, axis=2)), interpolation='nearest', extent=box, aspect='auto')
plt.colorbar(vf2, ax=ax1)
plt.show()
# plot a different plot
fig = plt.figure(2)
fig.set_size_inches(6.2, 4.2)
plt.clf()
act_cmap = plt.cm.get_cmap('plasma', 3)
# print both plots
ba = plt.imshow(np.transpose(np.argmax(next_q_function, axis=2)), interpolation='nearest', cmap=act_cmap, vmin=-0.5, vmax=2.5,
extent=box, aspect='auto')
plt.xlabel("x")
plt.ylabel("v")
ba_cbar = plt.colorbar(ba, ticks=[0, 1, 2])
ba_cbar.set_ticklabels(['Left', 'Nothing', 'Right'])
plt.show() | 31.215517 | 126 | 0.659762 | 570 | 3,621 | 4.022807 | 0.257895 | 0.07065 | 0.039686 | 0.020933 | 0.249455 | 0.220672 | 0.198866 | 0.1256 | 0.108155 | 0.080244 | 0 | 0.038939 | 0.198564 | 3,621 | 116 | 127 | 31.215517 | 0.751206 | 0.080088 | 0 | 0.25 | 0 | 0 | 0.032841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0125 | false | 0 | 0.0375 | 0 | 0.0625 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70dc8be357aa481f9e8352c1e780e0659fcfc114 | 5,034 | py | Python | src/selection/select_data.py | renan-cunha/KDD-Enade-Computing | 0af6cfa98767e7ae763987a52ee171ab2b4cde51 | [
"MIT"
] | 2 | 2021-07-16T00:39:20.000Z | 2021-07-23T14:50:11.000Z | src/selection/select_data.py | renan-cunha/KDD-Enade-Computing | 0af6cfa98767e7ae763987a52ee171ab2b4cde51 | [
"MIT"
] | 15 | 2021-03-23T23:56:18.000Z | 2021-07-15T23:05:04.000Z | src/selection/select_data.py | renan-cunha/KDD-Enade-Computing | 0af6cfa98767e7ae763987a52ee171ab2b4cde51 | [
"MIT"
] | 3 | 2021-08-09T23:57:13.000Z | 2022-01-22T03:31:12.000Z | import pandas as pd
from typing import Tuple
import os
import sys
parent = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
sys.path.append(parent)
from src.get_data import get_raw_data
from src import config
from tqdm import tqdm
import subprocess
SELECTED_DATA_DIR = os.path.join(config.DATA_DIR, "selected_data")
COMPUTER_SCIENCE_CODE_2017_2014_2011 = 4004
COMPUTER_SCIENCE_CODE_2008 = 4001
COMPUTER_CODE_2005 = 40
def read_csv(year: int, path: str = SELECTED_DATA_DIR) -> pd.DataFrame:
return pd.read_csv(get_selected_enade_csv_file_path(year, path),
dtype=config.DTYPES)
def get_selected_enade_csv_file_path(year: int,
path: str = SELECTED_DATA_DIR) -> str:
return os.path.join(path, f"microdados_ciencia_computacao_{year}.csv")
def filter_computer_science(df: pd.DataFrame, year: int) -> pd.DataFrame:
if year in [2017, 2014, 2011]:
return filter_computer_science_2017_2014_2011(df)
elif year == 2008:
return filter_2008(df)
elif year == 2005:
return filter_2005(df)
else:
raise ValueError(f"Use a year of {config.YEARS}, not {year}")
def filter_2008(df: pd.DataFrame) -> pd.DataFrame:
df_senior = filter_senior_students(df)
return filter_computer_science_2008(df_senior)
def filter_2005(df: pd.DataFrame) -> pd.DataFrame:
df_senior = filter_senior_students(df)
return filter_computer_science_2005(df_senior)
def filter_computer_science_2017_2014_2011(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[df["CO_GRUPO"] == COMPUTER_SCIENCE_CODE_2017_2014_2011]
def filter_computer_science_2008(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[df["co_subarea"] == COMPUTER_SCIENCE_CODE_2008]
def has_same_value(series: pd.Series) -> bool:
"""Returns true if the series has the same value in each row,
returns false otherwise."""
series_unique = series.unique()
length_series = len(series_unique)
if length_series > 1:
return False
elif length_series == 0:
raise ValueError("The series is empty")
else:
return True
def get_computer_science_answer_key_2005(ufpa_cc_score_specific: pd.Series) -> Tuple[str, str]:
"""Returns the regex used to match the 'vt_ace_oce' with computer science
courses. It gets that by using the 'vt_ace_oce' of the known UFPA computer
science course"""
length_scores = ufpa_cc_score_specific.str.len()
if not has_same_value(length_scores):
raise ValueError("The series should have values with the same length")
def get_num_starting_dots(series: pd.Series, mode: str) -> int:
length_value = len(series.iloc[0])
num_dots_result = 0
for num_dots in range(1, length_value):
dot_string = "." * num_dots
if mode == "start":
equal_to_dot = series.str.startswith(dot_string)
elif mode == "end":
equal_to_dot = series.str.endswith(dot_string)
else:
raise ValueError(f"Use 'start' or 'end' as mode not {mode}")
if equal_to_dot.all():
num_dots_result = num_dots
else:
break
return num_dots_result
num_starting_dots = get_num_starting_dots(ufpa_cc_score_specific, 'start')
num_ending_dots = get_num_starting_dots(ufpa_cc_score_specific, 'end')
return '.' * num_starting_dots, '.' * num_ending_dots
def select_ufpa_computer_science_2005(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[df["co_curso"] == config.UFPA_CODE_COURSE]
def filter_specific_score_2005(df: pd.DataFrame) -> pd.Series:
return df["vt_ace_oce"]
def filter_computer_science_2005(df: pd.DataFrame) -> pd.DataFrame:
ufpa_comp_sci = select_ufpa_computer_science_2005(df)
ufpa_comp_sci_specific_score = filter_specific_score_2005(ufpa_comp_sci)
computer_science_dot_match = get_computer_science_answer_key_2005(ufpa_comp_sci_specific_score)
starting_dots, ending_dots = computer_science_dot_match
computer_df = df.loc[df["co_grupo"] == COMPUTER_CODE_2005]
starting_dot_index = computer_df["vt_ace_oce"].str.startswith(starting_dots)
ending_dot_index = computer_df["vt_ace_oce"].str.endswith(ending_dots)
return computer_df.loc[starting_dot_index & ending_dot_index]
def filter_senior_students(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[df["in_grad"] == 0]
def main(raw_data_path: str = get_raw_data.RAW_ENADE_DATA_DIR,
selected_data_path: str = SELECTED_DATA_DIR):
subprocess.run(["mkdir", "-p", selected_data_path])
get_data = get_raw_data.GetData(raw_data_path=raw_data_path)
for year in tqdm(config.YEARS):
df_year = get_data.read_csv(year)
df_computer_science_year = filter_computer_science(df_year, year)
file_path = get_selected_enade_csv_file_path(year, selected_data_path)
df_computer_science_year.to_csv(file_path, index=False)
if __name__ == "__main__":
main()
| 34.958333 | 99 | 0.714541 | 737 | 5,034 | 4.510176 | 0.1981 | 0.099278 | 0.035199 | 0.036101 | 0.346871 | 0.284296 | 0.241576 | 0.155836 | 0.119735 | 0.083935 | 0 | 0.03832 | 0.191299 | 5,034 | 143 | 100 | 35.202797 | 0.778187 | 0.04847 | 0 | 0.061856 | 0 | 0 | 0.06524 | 0.008391 | 0 | 0 | 0 | 0 | 0 | 1 | 0.154639 | false | 0 | 0.082474 | 0.072165 | 0.412371 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70dc8fcbafe8b99e597014de3177d52d5906fc56 | 1,695 | py | Python | dataset.py | RheinhardHye/MSN-Point-Cloud-Completion | c5f23a91d343f6a76d07f04a589d0294d456a293 | [
"Apache-2.0"
] | 1 | 2021-01-07T06:38:33.000Z | 2021-01-07T06:38:33.000Z | dataset.py | RheinhardHye/MSN-Point-Cloud-Completion | c5f23a91d343f6a76d07f04a589d0294d456a293 | [
"Apache-2.0"
] | null | null | null | dataset.py | RheinhardHye/MSN-Point-Cloud-Completion | c5f23a91d343f6a76d07f04a589d0294d456a293 | [
"Apache-2.0"
] | null | null | null | import open3d as o3d
import torch
import numpy as np
import torch.utils.data as data
import torchvision.transforms as transforms
import os
import random
#from utils import *
def resample_pcd(pcd, n):
"""Drop or duplicate points so that pcd has exactly n points"""
idx = np.random.permutation(pcd.shape[0])
if idx.shape[0] < n:
idx = np.concatenate([idx, np.random.randint(pcd.shape[0], size = n - pcd.shape[0])])
return pcd[idx[:n]]
class ShapeNet(data.Dataset):
def __init__(self, train = True, npoints = 8192):
if train:
self.list_path = './data/train.list'
else:
self.list_path = './data/val.list'
self.npoints = npoints
self.train = train
with open(os.path.join(self.list_path)) as file:
self.model_list = [line.strip().replace('/', '_') for line in file]
random.shuffle(self.model_list)
self.len = len(self.model_list * 50)
def __getitem__(self, index):
model_id = self.model_list[index // 50]
scan_id = index % 50
def read_pcd(filename):
pcd = o3d.io.read_point_cloud(filename)
return torch.from_numpy(np.array(pcd.points)).float()
if self.train:
partial = read_pcd(os.path.join("./data/train/", model_id + '_%d_denoised.pcd' % scan_id))
else:
partial = read_pcd(os.path.join("./data/val/", model_id + '_%d_denoised.pcd' % scan_id))
complete = read_pcd(os.path.join("./data/complete/", '%s.pcd' % model_id))
return model_id, resample_pcd(partial, 5000), resample_pcd(complete, self.npoints)
def __len__(self):
return self.len | 37.666667 | 102 | 0.620059 | 238 | 1,695 | 4.239496 | 0.327731 | 0.034688 | 0.039643 | 0.038652 | 0.125867 | 0.125867 | 0.105055 | 0 | 0 | 0 | 0 | 0.016458 | 0.247198 | 1,695 | 45 | 103 | 37.666667 | 0.774295 | 0.045428 | 0 | 0.052632 | 0 | 0 | 0.069436 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.184211 | 0.026316 | 0.447368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70dfa6e3c3dd1436c7ea7dcee1b6e9a6596b389f | 16,031 | py | Python | libcity/model/trajectory_loc_prediction/CARA.py | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 | [
"Apache-2.0"
] | 221 | 2021-09-06T03:33:31.000Z | 2022-03-28T05:36:49.000Z | libcity/model/trajectory_loc_prediction/CARA.py | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 | [
"Apache-2.0"
] | 43 | 2021-09-19T16:12:28.000Z | 2022-03-31T16:29:03.000Z | libcity/model/trajectory_loc_prediction/CARA.py | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 | [
"Apache-2.0"
] | 64 | 2021-09-06T07:56:10.000Z | 2022-03-25T08:48:35.000Z | # coding: utf-8
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from libcity.model.abstract_model import AbstractModel
from math import sin, cos, sqrt, atan2, radians
import numpy as np
def identity_loss(y_true, y_pred):
return torch.mean(y_pred - 0 * y_true)
class CARA1(nn.Module):
def hard_sigmoid(self, x):
x = torch.tensor(x / 6 + 0.5)
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
def __init__(self, output_dim, input_dim,
init='glorot_uniform', inner_init='orthogonal',
**kwargs):
super(CARA1, self).__init__()
self.output_dim = output_dim
self.init = init
self.inner_init = inner_init
self.activation = self.hard_sigmoid
self.inner_activation = nn.Tanh()
self.build(input_dim)
def add_weight(self, shape, initializer):
ts = torch.zeros(shape)
if initializer == 'glorot_uniform':
ts = nn.init.xavier_normal_(ts)
elif initializer == 'orthogonal':
ts = nn.init.orthogonal_(ts)
return nn.Parameter(ts)
def build(self, input_shape):
# self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape
self.W_z = self.add_weight((self.input_dim, self.output_dim),
initializer=self.init)
self.U_z = self.add_weight((self.output_dim, self.output_dim),
initializer=self.init)
self.b_z = self.add_weight((self.output_dim,),
initializer='zero')
self.W_r = self.add_weight((self.input_dim, self.output_dim),
initializer=self.init)
self.U_r = self.add_weight((self.output_dim, self.output_dim),
initializer=self.init)
self.b_r = self.add_weight((self.output_dim,),
initializer='zero')
self.W_h = self.add_weight((self.input_dim, self.output_dim),
initializer=self.init)
self.U_h = self.add_weight((self.output_dim, self.output_dim),
initializer=self.init)
self.b_h = self.add_weight((self.output_dim,),
initializer='zero')
self.A_h = self.add_weight((self.output_dim, self.output_dim),
initializer=self.init)
self.A_u = self.add_weight((self.output_dim, self.output_dim),
initializer=self.init)
self.b_a_h = self.add_weight((self.output_dim,),
initializer='zero')
self.b_a_u = self.add_weight((self.output_dim,),
initializer='zero')
self.W_t = self.add_weight((self.input_dim, self.output_dim),
initializer=self.init)
self.U_t = self.add_weight((1, self.output_dim),
initializer=self.init)
self.b_t = self.add_weight((self.output_dim,),
initializer='zero')
self.W_g = self.add_weight((self.input_dim, self.output_dim),
initializer=self.init)
self.U_g = self.add_weight((1, self.output_dim),
initializer=self.init)
self.b_g = self.add_weight((self.output_dim,),
initializer='zero')
def preprocess_input(self, x):
return x
def forward(self, x):
"""
X : batch * timeLen * dims(有拓展)
"""
tlen = x.shape[1]
output = torch.zeros((x.shape[0], self.output_dim))
for i in range(tlen):
output = self.step(x[:, i, :], output)
return output
def step(self, x, states):
"""
用于多批次同一时间
states为上一次多批次统一时间数据
"""
h_tm1 = states
# phi_t
u = x[:, self.output_dim: 2 * self.output_dim]
# delta_t
t = x[:, 2 * self.output_dim: (2 * self.output_dim) + 1]
# delta_g
g = x[:, (2 * self.output_dim) + 1:]
# phi_v
x = x[:, :self.output_dim]
t = self.inner_activation(torch.matmul(t, self.U_t))
g = self.inner_activation(torch.matmul(g, self.U_g))
# Time-based gate
t1 = self.inner_activation(torch.matmul(x, self.W_t) + t + self.b_t)
# Geo-based gate
g1 = self.inner_activation(torch.matmul(x, self.W_g) + g + self.b_g)
# Contextual Attention Gate
a = self.inner_activation(
torch.matmul(h_tm1, self.A_h) + torch.matmul(u, self.A_u) + self.b_a_h + self.b_a_u)
x_z = torch.matmul(x, self.W_z) + self.b_z
x_r = torch.matmul(x, self.W_r) + self.b_r
x_h = torch.matmul(x, self.W_h) + self.b_h
u_z_ = torch.matmul((1 - a) * u, self.W_z) + self.b_z
u_r_ = torch.matmul((1 - a) * u, self.W_r) + self.b_r
u_h_ = torch.matmul((1 - a) * u, self.W_h) + self.b_h
u_z = torch.matmul(a * u, self.W_z) + self.b_z
u_r = torch.matmul(a * u, self.W_r) + self.b_r
u_h = torch.matmul(a * u, self.W_h) + self.b_h
# update gate
z = self.inner_activation(x_z + torch.matmul(h_tm1, self.U_z) + u_z)
# reset gate
r = self.inner_activation(x_r + torch.matmul(h_tm1, self.U_r) + u_r)
# hidden state
hh = self.activation(x_h + torch.matmul(r * t1 * g1 * h_tm1, self.U_h) + u_h)
h = z * h_tm1 + (1 - z) * hh
h = (1 + u_z_ + u_r_ + u_h_) * h
return h
# return h
def bpr_triplet_loss(x):
positive_item_latent, negative_item_latent = x
reg = 0
loss = 1 - torch.log(torch.sigmoid(
torch.sum(positive_item_latent, dim=-1, keepdim=True) -
torch.sum(negative_item_latent, dim=-1, keepdim=True))) - reg
return loss
class Recommender(nn.Module):
def __init__(self, num_users, num_items, num_times, latent_dim, maxvenue=5):
super(Recommender, self).__init__()
self.maxVenue = maxvenue
self.latent_dim = latent_dim
# num * maxVenue * dim
self.U_Embedding = nn.Embedding(num_users, latent_dim)
self.V_Embedding = nn.Embedding(num_items, latent_dim)
self.T_Embedding = nn.Embedding(num_times, latent_dim)
torch.nn.init.uniform_(self.U_Embedding.weight)
torch.nn.init.uniform_(self.V_Embedding.weight)
torch.nn.init.uniform_(self.T_Embedding.weight)
self.rnn = nn.Sequential(
CARA1(latent_dim, latent_dim, input_shape=(self.maxVenue, (self.latent_dim * 2) + 2,), unroll=True))
# latent_dim * 2 + 2 = v_embedding + t_embedding + time_gap + distance
def forward(self, x):
# INPUT = [self.user_input, self.time_input, self.gap_time_input, self.pos_distance_input,
# self.neg_distance_input, self.checkins_input,
# self.neg_checkins_input]
# pass
# User latent factor
user_input = torch.tensor(x[0])
time_input = torch.tensor(x[1])
gap_time_input = torch.tensor(x[2], dtype=torch.float32)
pos_distance_input = torch.tensor(x[3], dtype=torch.float32)
neg_distance_input = torch.tensor(x[4], dtype=torch.float32)
checkins_input = torch.tensor(x[5])
neg_checkins_input = torch.tensor(x[6])
self.u_latent = self.U_Embedding(user_input)
self.t_latent = self.T_Embedding(time_input)
h, w = gap_time_input.shape
gap_time_input = gap_time_input.view(h, w, 1)
rnn_input = torch.cat([self.V_Embedding(checkins_input), self.T_Embedding(time_input), gap_time_input], -1)
neg_rnn_input = torch.cat([self.V_Embedding(neg_checkins_input), self.T_Embedding(time_input), gap_time_input],
-1)
h, w = pos_distance_input.shape
pos_distance_input = pos_distance_input.view(h, w, 1)
h, w = neg_distance_input.shape
neg_distance_input = neg_distance_input.view(h, w, 1)
rnn_input = torch.cat([rnn_input, pos_distance_input], -1)
neg_rnn_input = torch.cat([neg_rnn_input, neg_distance_input], -1)
self.checkins_emb = self.rnn(rnn_input)
self.neg_checkins_emb = self.rnn(neg_rnn_input)
pred = (self.checkins_emb * self.u_latent).sum(dim=1)
neg_pred = (self.neg_checkins_emb * self.u_latent).sum(dim=1)
return bpr_triplet_loss([pred, neg_pred])
def rank(self, uid, hist_venues, hist_times, hist_time_gap, hist_distances):
# hist_venues = hist_venues + [candidate_venue]
# hist_times = hist_times + [time]
# hist_time_gap = hist_time_gap + [time_gap]
# hist_distances = hist_distances + [distance]
# u_latent = self.U_Embedding(torch.tensor(uid))
# v_latent = self.V_Embedding(torch.tensor(hist_venues))
# t_latent = self.T_Embedding(torch.tensor(hist_times))
u_latent = self.U_Embedding.weight[uid]
v_latent = self.V_Embedding.weight[hist_venues.reshape(-1)].view(hist_venues.shape[0], hist_venues.shape[1], -1)
t_latent = self.T_Embedding.weight[hist_times.reshape(-1)].view(hist_times.shape[0], hist_times.shape[1], -1)
h, w = hist_time_gap.shape
hist_time_gap = hist_time_gap.reshape(h, w, 1)
h, w = hist_distances.shape
hist_distances = hist_distances.reshape(h, w, 1)
rnn_input = torch.cat([t_latent, torch.tensor(hist_time_gap, dtype=torch.float32)], dim=-1)
rnn_input = torch.cat([rnn_input, torch.tensor(hist_distances, dtype=torch.float32)], dim=-1)
rnn_input = torch.cat([v_latent, rnn_input], dim=-1)
dynamic_latent = self.rnn(rnn_input)
scores = torch.mul(dynamic_latent, u_latent).sum(1)
# scores = np.dot(dynamic_latent, u_latent)
return scores
class CARA(AbstractModel):
"""rnn model with long-term history attention"""
def __init__(self, config, data_feature):
super(CARA, self).__init__(config, data_feature)
self.loc_size = data_feature['loc_size']
self.tim_size = data_feature['tim_size']
self.uid_size = data_feature['uid_size']
self.poi_profile = data_feature['poi_profile']
self.id2locid = data_feature['id2locid']
self.id2loc = []
for i in range(self.loc_size - 1):
self.id2loc.append(self.id2locid[str(i)])
self.id2loc.append(self.loc_size)
self.id2loc = np.array(self.id2loc)
self.coor = self.poi_profile['coordinates'].apply(eval)
self.rec = Recommender(self.uid_size, self.loc_size, self.tim_size, 10)
def get_time_interval(self, x):
y = x[:, :-1]
y = np.concatenate([x[:, 0, None], y], axis=1)
return x - y
def get_time_interval2(self, x):
y = x[:-1]
y = np.concatenate([x[0, None], y], axis=0)
return x - y
def get_pos_distance(self, x):
x = np.array(x.tolist())
y = np.concatenate([x[:, 0, None, :], x[:, :-1, :]], axis=1)
r = 6373.0
rx = np.radians(x)
ry = np.radians(y)
d = x - y
a = np.sin(d[:, :, 0] / 2) ** 2 + np.cos(rx[:, :, 0]) * np.cos(ry[:, :, 0]) * np.sin(d[:, :, 1] / 2) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
return r * c
def get_pos_distance2(self, x):
x = np.array(x.tolist())
y = np.concatenate([x[0, None, :], x[:-1, :]], axis=0)
r = 6373.0
rx = np.radians(x)
ry = np.radians(y)
d = x - y
a = np.sin(d[:, 0] / 2) ** 2 + np.cos(rx[:, 0]) * np.cos(ry[:, 0]) * np.sin(d[:, 1] / 2) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
return r * c
def get_distance(self, lat1, lng1, lat2, lng2):
r = 6373.0
lat1 = radians(lat1)
lon1 = radians(lng1)
lat2 = radians(lat2)
lon2 = radians(lng2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = int(r * c)
return distance
def get_neg_checkins(self, vis, x, y):
len1, len2 = x.shape
x_res = []
x_res_distance = y[:].copy()
for i in range(len1):
visits = x[i]
j = np.random.randint(self.loc_size - 1)
while j in vis[i]:
j = np.random.randint(self.loc_size - 1)
tmp = visits[:].copy()
tmp[-1] = j
x_res.append(tmp)
j1 = self.coor[self.id2loc[visits[-1]]]
j = self.coor[self.id2loc[j]]
x_res_distance[i, -1] = self.get_distance(j1[0], j1[1], j[0], j[1])
return x_res, x_res_distance
def forward(self, batch):
hloc = np.array(batch['current_loc'])[:, :5]
target = np.array(batch['target'])
h = target.shape
target = target.reshape((*h, 1))
hloc = np.concatenate([hloc, target], axis=1)
hloc1 = self.id2loc[hloc]
tloc = np.array(batch['current_tim'])[:, :5]
target_tim = np.array(batch['target_tim'])
h = target_tim.shape
target_tim = target_tim.reshape((*h, 1))
tloc = np.concatenate([tloc, target_tim], axis=1)
x_users = batch['uid']
t_interval = self.get_time_interval(tloc)
titude = self.coor[hloc1.reshape(-1)].to_numpy().reshape(hloc.shape)
pos_distance = self.get_pos_distance(titude)
x_neg_checkins, x_neg_distance = self.get_neg_checkins(np.array(batch['current_loc']), hloc, pos_distance)
x = [torch.tensor(x_users), torch.tensor(tloc), torch.tensor(t_interval),
torch.tensor(pos_distance), torch.tensor(x_neg_distance), torch.tensor(hloc),
torch.tensor(x_neg_checkins)]
return self.rec(x)
def predict(self, batch):
hloc = np.array(batch['current_loc'])[:, :5]
tloc = np.array(batch['current_tim'])[:, :5]
x_users = batch['uid']
my_true = batch['target']
my_true_tim = batch['target_tim']
output = []
for id, mloc in enumerate(hloc):
hlocs = []
tlocs = []
users = []
t_intervals = []
distances = []
target = my_true[id]
target_tim = my_true_tim[id]
mu = x_users[id]
mh, mt = hloc[id], tloc[id].copy()
mt = np.append(mt, target_tim)
mi = self.get_time_interval2(mt)
for i in range(101):
mh = hloc[id].copy()
if i == 0:
mh = np.append(mh, target)
tmh = self.id2loc[mh]
mtt = self.coor[tmh.reshape(-1)].to_numpy().reshape(tmh.shape)
md = self.get_pos_distance2(mtt)
else:
j = target
while j == target:
j = np.random.randint(0, self.loc_size - 1)
mh = np.append(mh, j)
tmh = self.id2loc[mh]
mtt = self.coor[tmh.reshape(-1)].to_numpy().reshape(tmh.shape)
md = self.get_pos_distance2(mtt)
hlocs.append(mh)
tlocs.append(mt)
users.append(mu)
t_intervals.append(mi)
distances.append(md)
output.append(self.rec.rank(np.array(users), np.array(hlocs), np.array(tlocs), np.array(t_intervals),
np.array(distances)).cpu().detach().numpy())
return torch.tensor(output)
def calculate_loss(self, batch):
return torch.mean(self.forward(batch))
| 39.582716 | 120 | 0.561849 | 2,207 | 16,031 | 3.854101 | 0.113276 | 0.035975 | 0.050435 | 0.053609 | 0.410651 | 0.327181 | 0.299671 | 0.264872 | 0.242182 | 0.211615 | 0 | 0.019452 | 0.307342 | 16,031 | 404 | 121 | 39.680693 | 0.746578 | 0.064562 | 0 | 0.177632 | 0 | 0 | 0.014957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072368 | false | 0 | 0.026316 | 0.009868 | 0.167763 | 0.003289 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70e1b6c0fbcdf4ec99159d157a0331db3d7297ec | 603 | py | Python | warm_up/2/string_bits.py | zerohk/codingbat_py | 0a83fcd5f0e2edb5b66402bbd29e08be95abaf18 | [
"Apache-2.0"
] | null | null | null | warm_up/2/string_bits.py | zerohk/codingbat_py | 0a83fcd5f0e2edb5b66402bbd29e08be95abaf18 | [
"Apache-2.0"
] | null | null | null | warm_up/2/string_bits.py | zerohk/codingbat_py | 0a83fcd5f0e2edb5b66402bbd29e08be95abaf18 | [
"Apache-2.0"
] | null | null | null | '''
Given a string, return a new string made of every other char starting with the
first, so "Hello" yields "Hlo".
string_bits('Hello') → 'Hlo'
string_bits('Hi') → 'H'
string_bits('Heeololeo') → 'Hello'
'''
def string_bits(str):
i = 0
str1 = ''
for i in range(0, len(str), 2):
str1 = str1 + str[i]
return str1
'''
Solution:
def string_bits(str):
result = ""
# Many ways to do this. This uses the standard loop of i on every char,
# and inside the loop skips the odd index values.
for i in range(len(str)):
if i % 2 == 0:
result = result + str[i]
return result
'''
| 21.535714 | 78 | 0.626866 | 102 | 603 | 3.686275 | 0.5 | 0.132979 | 0.069149 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019523 | 0.235489 | 603 | 27 | 79 | 22.333333 | 0.789588 | 0.330017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70e1f3fe568bf868d773589846c59a04e7c3129d | 22,893 | py | Python | savReaderWriter/util/savViewer.py | eawag-rdm/savReaderWriter | e766a38e20c09eb565ccfbe9064a7c557cc66baa | [
"MIT"
] | 12 | 2015-10-07T10:56:24.000Z | 2021-08-16T10:07:19.000Z | savReaderWriter/util/savViewer.py | eawag-rdm/savReaderWriter | e766a38e20c09eb565ccfbe9064a7c557cc66baa | [
"MIT"
] | 2 | 2019-01-01T13:36:43.000Z | 2019-02-28T13:57:24.000Z | savReaderWriter/util/savViewer.py | eawag-rdm/savReaderWriter | e766a38e20c09eb565ccfbe9064a7c557cc66baa | [
"MIT"
] | 3 | 2016-09-19T12:10:34.000Z | 2021-05-16T17:52:30.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import abc
import sys
import os
import mmap
import re
import locale
import threading
import time
from collections import namedtuple as ntuple
from random import randint
from ctypes import c_int, c_long
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# TODO: status bar sometimes does not update (small csv, then big csv)
"""
savViewer - SPSS Data file viewer
Currently supported are:
-SPSS Data file (.sav, .zsav) -- depends on savReaderWriter
-Character separated values (.csv, .tab) -- depends on icu (and csv)
-Microsoft Excel (*.xls, *.xlsx) -- depends on xlrd
Commandline use: python savViewer.py somefile.sav
GUI use: python savViewer.py
Suitable for use with Python 2.7 and 3.3
"""
__author__ = "Albert-Jan Roskam"
__email__ = "@".join(["fomcl", "yahoo." + "com"])
__version__ = "1.0.5"
__date__ = "2014-01-14"
# Python 3
py3k = sys.version_info.major >= 3
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
try:
xrange
except NameError:
xrange = range
try:
unicode
except NameError:
unicode = str
# ensure locale.getlocale won't return (None, None)
locale.setlocale(locale.LC_ALL, "")
class ExtIterBase(object):
"""
Abstract base class that should make it easier to add concrete
*Iter (e.g. SavIter, CsvIter) classes
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __getitem__(self, key):
raise NotImplementedError("need __getitem__ method")
@abc.abstractmethod
def close(self):
"""Does some cleanup, if needed"""
raise NotImplementedError("need close method")
@abc.abstractproperty
def fileEncoding(self):
"""Returns the encoding of a dataset"""
return NotImplementedError("need fileEncoding property")
@abc.abstractproperty
def shape(self):
"""Returns the shape of a dataset as a namedtuple with nrows, ncols"""
return NotImplementedError("need shape property")
@abc.abstractproperty
def varNames(self):
"""Returns the variable names of a dataset as a list"""
return NotImplementedError("need varNames property")
class CsvIter(ExtIterBase):
def __init__(self, csvFileName):
global csv, codecs, icu
import csv
import codecs
try:
import icu # sudo apt-get install libicu && pip install pyicu
self.icuOk = True
except ImportError:
self.icuOk = False
self.csvFileName = csvFileName
self.fileSize = os.path.getsize(self.csvFileName)
self.sampleSize = 2048 if self.fileSize > 2048 else self.fileSize
self.fileEncoding_ = self._get_encoding(self.csvFileName)
self.csvfile = codecs.open(self.csvFileName, "r+", self.fileEncoding)
self.data = self.mapfile(self.csvfile)
self.dialect = self._get_dialect(self.csvFileName, self.fileEncoding)
self.varNames_ = self._get_header(self.csvFileName,
self.fileEncoding,
self.dialect)
self.Shape = ntuple("Shape", ["nrows", "ncols"])
self.lookup = list()
self.lookup_done = False
self.thread = threading.Thread(target=self._get_row_lookup,
args=(open(self.csvFileName, "rb"),),
name="lookup maker thread")
self.thread.start()
def __getitem__(self, key):
"""return an item from a memory-mapped csv file"""
try:
start = self.lookup[key]
end = self.lookup[key + 1]
except IndexError:
end = self.fileSize
if self.thread.is_alive():
print("One moment please, lookup not yet ready enough")
elif abs(key) >= len(self.lookup):
raise IndexError("index out of range")
finally:
if self.lookup_done:
self.thread.join()
if py3k:
# 3.x csv requires unicode
line = self.data[start:end].strip().decode(self.fileEncoding_)
return next(csv.reader(line, dialect=self.dialect))
else:
# 2.x csv lacks unicode support
line = self.data[start:end].strip()
row = next(csv.reader([line], dialect=self.dialect))
return [cell.decode(self.fileEncoding_) for cell in row]
def mapfile(self, fileObj):
size = os.path.getsize(fileObj.name)
return mmap.mmap(fileObj.fileno(), size)
def _get_row_lookup(self, fObj):
record_start = 0
for line in fObj:
if not line:
break
self.lookup.append(record_start)
# len(), because fObj.tell() --> won't work with threading
record_start += len(line)
self.lookup_done = True
def close(self):
self.csvfile.close()
@property
def fileEncoding(self):
return self.fileEncoding_
def _get_encoding(self, csvFileName):
if not self.icuOk:
return locale.getpreferredencoding()
with open(csvFileName) as csvfile:
sample = csvfile.read(self.sampleSize)
cd = icu.CharsetDetector()
cd.setText(sample)
encoding = cd.detect().getName()
return encoding
def _get_dialect(self, csvFileName, encoding):
try:
csvfile = codecs.open(csvFileName, encoding=encoding)
sample = csvfile.read(self.sampleSize).encode(encoding)
dialect = csv.Sniffer().sniff(sample, delimiters=";,\t")
except csv.Error:
print("NOTE. Can't guess csv dialect. Assuming excel dialect")
dialect = csv.excel
finally:
csvfile.close()
return dialect
@property
def varNames(self):
return self.varNames_
def _get_header(self, csvFileName, encoding, dialect):
try:
csvfile = codecs.open(csvFileName, encoding=encoding)
sample = csvfile.read(self.sampleSize).encode(encoding)
has_header = csv.Sniffer().has_header(sample)
except csv.Error:
has_header = False
finally:
csvfile.seek(0)
data = csv.reader(csvfile, dialect)
varNames = next(data)
if not has_header:
varNames = ["col_%04d" % i for i in range(len(varNames))]
csvfile.close()
return varNames
@property
def shape(self):
nrows = 25 if not self.lookup else len(self.lookup)
ncols = len(self.varNames)
return self.Shape(nrows, ncols)
# source: https://docs.python.org/2/library/csv.html (bottom of page)
def unicode_csv_reader(self, unicode_csv_data, dialect, encoding, **kwargs):
csv_reader = csv.reader(self.utf_8_encoder(unicode_csv_data, encoding),
dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, encoding) for cell in row]
def utf_8_encoder(self, unicode_csv_data, encoding):
for line in unicode_csv_data:
yield line.encode(encoding)
TabIter = CsvIter
class XlsIter(ExtIterBase):
def __init__(self, xlsFileName):
global xlrd
import xlrd
self.xlsFileName = xlsFileName
self.file = open(self.xlsFileName, "rb")
self.xlsfile = xlrd.open_workbook(file_contents=mmap.mmap(
self.file.fileno(), 0,
access=mmap.ACCESS_READ))
self.shape_ = self._get_shape(self.xlsfile)
self.varNames_ = self._get_header(self.shape.ncols)
self.lookup = self._get_row_lookup()
@property
def fileEncoding(self):
return "utf-8"
@property
def shape(self):
return self.shape_
@property
def varNames(self):
return self.varNames_
def _get_row_lookup(self):
lookup, global_row = {}, 0
for sheetno, sheet in enumerate(self.xlsfile.sheets()):
for local_row in xrange(sheet.nrows):
lookup[global_row] = (sheetno, local_row)
global_row += 1
return lookup
def __getitem__(self, key):
try:
sheetno, local_row = self.lookup[key]
except KeyError:
raise IndexError("index out of range")
sheet_name = [self.xlsfile.sheet_names()[sheetno]]
record = self.xlsfile.sheets()[sheetno].row_values(local_row)
return sheet_name + record
def close(self):
self.file.close()
def _get_shape(self, xlsfile):
nrows, ncols = [], []
for sheet in self.xlsfile.sheets():
nrows.append(sheet.nrows)
for row in xrange(sheet.nrows):
ncols.append(sheet.ncols + 1) # + 1 for the sheetname itself
nrows, ncols = sum(nrows), max(ncols)
return ntuple("Shape", ["nrows", "ncols"])(nrows, ncols)
def _get_header(self, ncols):
return ["col_%04d" % i for i in range(ncols)]
XlsxIter = XlsIter
class SavIter(ExtIterBase):
def __init__(self, savFileName):
global SavReader
from savReaderWriter import SavReader
self.savFileName = savFileName
self.records = self.data(self.savFileName)
self.init_seekNextCase()
self.formatValues = self.records.formatValues
def __getitem__(self, key):
# much faster than SavReader.__getitem__
self.seekNextCase(self.fh, key)
return self.formatValues(self.records.record)
def init_seekNextCase(self):
self.spssio = self.records.spssio
self.fh = self.records.fh
self.seekNextCase = self.spssio.spssSeekNextCase
self.seekNextCase.argtypes = [c_int, c_long]
def close(self):
return self.records.close()
@property
def fileEncoding(self):
return self.records.fileEncoding
@property
def shape(self):
return self.records.shape
@property
def varNames(self):
decode = lambda x: unicode(x, self.fileEncoding)
try:
return list(map(decode, self.records.varNames))
except TypeError:
return self.records.varNames # ioUtf8=True
def data(self, savFileName):
kwargs = dict(savFileName=savFileName, ioUtf8=True, recodeSysmisTo=float("nan"))
data = SavReader(**kwargs)
if not data.isCompatibleEncoding():
del kwargs["ioUtf8"]
encoding = data.fileEncoding.replace("_", "-")
encoding = re.sub(r"cp(\d+)", r"\1", encoding)
locale_ = locale.getlocale()[0] + "." + encoding
kwargs["ioLocale"] = locale_
data.close()
try:
data = SavReader(**kwargs)
except ValueError:
msg = ("Locale not found --> Linux: sudo localedef -f "
"%s -i %s /usr/lib/locale/%s")
msg = msg % (encoding.upper(), locale_.split(".")[0], locale_)
raise ValueError(msg)
return data
ZsavIter = SavIter
##################
class Menu(QMainWindow):
def __init__(self, app, filename=None):
super(Menu, self).__init__()
self.app = app
screen = QDesktopWidget().screenGeometry()
self.resize(screen.width(), screen.height())
self.setWindowTitle("Welcome to Data File Viewer!")
self.main_widget = QWidget(self)
self.main_layout = QVBoxLayout(self.main_widget)
self.setCentralWidget(self.main_widget)
self.statusBar()
self.create_spinbox_group()
self.table = Table(savFileName=None)
self.main_layout.addWidget(self.table)
self.createActions()
self.create_menu_bar()
self.main_widget.setLayout(self.main_layout)
self.show()
self.set_filename(filename)
self.start_thread()
self.update_screen()
def create_menu_bar(self):
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(self.openFile)
fileMenu.addAction(self.exitAct)
aboutMenu = menubar.addAction(self.aboutAct)
def set_filename(self, filename):
if filename: # commandline use
self.table.savFileName = os.path.abspath(os.path.expanduser(filename))
else:
self.showDialog()
self.read_file()
def read_file(self):
if self.table.savFileName:
self.table.records = self.table.data(self.table.savFileName)
nrows, ncols = self.table.records.shape
self.spin_box.setRange(-nrows - 1, nrows - 1)
self.table.create_vert_scrollbar(nrows) # redraw
self.table.create_table(self.table.block_size, self.table.records.varNames)
self.table.update_grid()
nrows, ncols = "{:,}".format(nrows), "{:,}".format(ncols)
title = "%s (%s rows, %s columns)"
self.title = title % (self.table.savFileName, nrows, ncols)
self.setWindowTitle(self.title)
def showDialog(self):
if hasattr(self.table, "records"):
self.table.records.close()
# in case of a previously opened file: start in same directory
if self.table.savFileName:
directory = os.path.dirname(self.table.savFileName)
else:
directory = os.path.expanduser("~")
selectedFilter = ("SPSS Data files (*.sav *.zsav);;"
"Character-Separated Values files (*.csv *.tab);;"
"Excel files (*.xls *.xlsx);;"
"All Files (*)")
args = ('Open file', directory, selectedFilter)
self.table.savFileName = QFileDialog.getOpenFileName(self, *args)
if not py3k:
fs_encoding = sys.getfilesystemencoding() # windows okay? mbcs??
self.table.savFileName = unicode(self.table.savFileName, fs_encoding)
self.read_file()
def create_spinbox_group(self):
group = QGroupBox()
group.setTitle("Retrieve record")
self.main_layout.addWidget(group)
hbox = QHBoxLayout()
hbox.setSpacing(0)
# initialize spinbox with some defaults
self.spin_box = QSpinBox(self)
self.spin_box.setRange(-100, 100)
self.spin_box.setSingleStep(1)
self.spin_box.setPrefix("# ")
self.spin_box.setSpecialValueText("invalid")
hbox.addWidget(self.spin_box)
go_button = QPushButton('Go to record', self)
tooltip = "Enter an integer (0 = first record, -1 = last record)"
go_button.setToolTip(tooltip)
hbox.addWidget(go_button)
# add a checkbox -for fun
cb = QCheckBox('Managerize data', self)
def do_cb():
self.table.do_managerize = cb.isChecked()
QObject.connect(cb, SIGNAL("stateChanged(int)"), do_cb)
hbox.addWidget(cb)
group.setLayout(hbox)
hbox.addStretch(1)
def retrieve():
if hasattr(self.table, "records"):
value = self.spin_box.value()
value = self.table.records.shape.nrows + value if value < 0 else value
self.table.populate_table(self.table.records, value)
self.table.vert_scroll.setValue(value) # synchronize scroll & spinbox
QObject.connect(go_button, SIGNAL("clicked()"), retrieve)
def createActions(self):
self.openFile = QAction(QIcon('open.png'), 'Open', self)
self.openFile.setShortcut('Ctrl+O')
self.openFile.setStatusTip('Open new File')
self.connect(self.openFile, SIGNAL("triggered()"), self.showDialog)
self.exitAct = QAction(self.tr("&Exit"), self)
self.exitAct.setShortcut(self.tr("Ctrl+Q"))
self.exitAct.setStatusTip(self.tr("Exit the application"))
self.connect(self.exitAct, SIGNAL("triggered()"), self, SLOT("close()"))
self.aboutAct = QAction(self.tr("&About"), self)
self.aboutAct.setStatusTip(self.tr("Show the application's About box"))
self.connect(self.aboutAct, SIGNAL("triggered()"), self.about)
def about(self):
title = "SPSS Data File Viewer\n\n%s\n(%s)\nversion %s\n%s"
title = title % (__author__, __email__, __version__, __date__)
QMessageBox.about(self, self.tr("About"), self.tr(title))
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure you want to quit?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def start_thread(self):
self.thread = QThread()
self.connect(self.thread , SIGNAL('update(QString)') , self.update_screen)
self.thread.start()
def update_screen(self):
previous_nrows = -1
if not hasattr(self.table, "records"):
return
while True:
nrows, ncols = self.table.records.shape
title = "{} ({:,} rows, {:,} columns)"
title = title.format(self.table.savFileName, nrows, ncols)
self.setWindowTitle(title)
self.table.vert_scroll.setRange(0, nrows - 1)
self.spin_box.setRange(-nrows, nrows)
self.app.processEvents()
time.sleep(0.05)
if previous_nrows >= nrows:
break
previous_nrows = nrows
class MyScrollBar(QScrollBar):
"""vertical scroll bar of grid"""
def mouseReleaseEvent(self, event):
self.emit(SIGNAL("clicked()"))
class Table(QDialog):
"""
Read a supported data file blockwise, with <block_size> records at a time,
display in a table grid
"""
def __init__(self, savFileName, block_size=25, parent=None):
super(Table, self).__init__(parent)
self.savFileName = savFileName
self.block_size = block_size
self.layout = QGridLayout()
self.setLayout(self.layout)
self.create_table(self.block_size)
self.create_vert_scrollbar()
self.update_grid()
self.do_managerize = False # :-)
def data(self, fileName):
extension = os.path.splitext(fileName)[1]
classname = extension[1:].title() + "Iter"
try:
return globals()[classname](fileName)
except KeyError:
raise TypeError("Unknown filetype: %r" % extension)
def create_vert_scrollbar(self, upper=100):
self.vert_scroll = MyScrollBar(Qt.Vertical, self.table)
self.vert_scroll.setSingleStep(1)
self.vert_scroll.setFocusPolicy(Qt.StrongFocus)
self.vert_scroll.setValue(0)
self.vert_scroll.setRange(0, upper - 1)
self.layout.addWidget(self.vert_scroll, 0, 1)
QObject.connect(self.vert_scroll, SIGNAL("clicked()"), self.update_grid)
def create_table(self, block_size, colnames=None):
if colnames:
self.table = QTableWidget(block_size, len(colnames))
self.table.setHorizontalHeaderLabels(colnames)
else: # initialize empty table
self.table = QTableWidget(block_size, block_size)
self.table.setHorizontalHeaderLabels([""] * block_size)
self.table.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.table.setAlternatingRowColors(True)
self.layout.addWidget(self.table, 0, 0)
self.table.resizeRowsToContents()
def update_grid(self):
if not self.savFileName:
return # initialize empty table
slider_value = self.vert_scroll.value()
percentage = slider_value / self.records.shape.nrows * 100
tooltip = "record # %d (%2.1f %%)" % (slider_value, percentage)
self.vert_scroll.setToolTip(tooltip)
self.populate_table(self.records, slider_value)
def populate_table(self, data, start_row=None):
dim = data.shape
encoding = data.fileEncoding
varNames = data.varNames
# get block numbers
start_row = self.vert_scroll.value() if start_row is None else start_row
end_row = start_row + self.block_size
end_row = dim.nrows if end_row > dim.nrows else end_row
block = range(start_row, end_row)
fake_block = range(self.block_size)
# redraw a smaller grid, if needed
if len(block) != self.block_size:
self.create_table(len(block), varNames)
else:
self.create_table(self.block_size, varNames)
# set row/column labels
if py3k:
self.table.setVerticalHeaderLabels(list(map(str, block)))
else:
self.table.setVerticalHeaderLabels(QStringList(map(str, block)))
self.table.setHorizontalHeaderLabels(varNames)
# fill the grid with values. The very last block is annoying
for row, fake_row in izip_longest(block, fake_block):
row_exists = row is not None
if row_exists:
record = data[row]
for col in range(dim.ncols):
if row_exists:
try:
value = self._convert(record[col], encoding)
except IndexError:
value = "" # could be needed for multisheet xls files
except TypeError:
break
#value = "<???>"
table_item = QTableWidgetItem(value)
if value == u"nan":
table_item.setTextColor(QColor("red"))
table_item.setBackgroundColor(QColor("yellow"))
elif value == u"":
table_item.setBackgroundColor(QColor("gray"))
if self.do_managerize:
table_item.setBackgroundColor(QColor(self.managerize()))
self.table.setItem(fake_row, col, table_item)
#self.table.setItem(fake_row, col, QTableWidgetItem(value))
if not row_exists:
break
def _convert(self, value, encoding):
try:
return unicode(value, encoding)
except TypeError:
return unicode(value)
def managerize(self):
rgb = (randint(0, 255), randint(0, 255), randint(0, 255))
return '#%02x%02x%02x' % rgb
if __name__ == '__main__':
app = QApplication(sys.argv)
filename = None if len(sys.argv) == 1 else sys.argv[1]
menu = Menu(app, filename)
sys.exit(app.exec_())
| 34.529412 | 88 | 0.60014 | 2,558 | 22,893 | 5.241986 | 0.213839 | 0.030204 | 0.014915 | 0.005071 | 0.120964 | 0.075695 | 0.042061 | 0.020732 | 0.020732 | 0.013722 | 0 | 0.007292 | 0.293103 | 22,893 | 662 | 89 | 34.581571 | 0.821294 | 0.061984 | 0 | 0.207171 | 0 | 0.001992 | 0.056895 | 0.001287 | 0.001992 | 0 | 0 | 0.001511 | 0 | 1 | 0.113546 | false | 0 | 0.045817 | 0.017928 | 0.241036 | 0.005976 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70e637641e86d5c5173d20db6e71c572df61d856 | 2,651 | py | Python | src/parsing/interactions_parser.py | mkRPGDev/mkRPG | 154e5d264dc1cc5fba78980da430e9d7ca0ccc22 | [
"Beerware"
] | 2 | 2016-10-06T10:09:10.000Z | 2016-10-07T14:16:19.000Z | src/parsing/interactions_parser.py | mkRPGDev/mkRPG | 154e5d264dc1cc5fba78980da430e9d7ca0ccc22 | [
"Beerware"
] | 17 | 2016-12-01T10:10:23.000Z | 2017-01-12T16:41:51.000Z | src/parsing/interactions_parser.py | mkRPGDev/mkRPG | 154e5d264dc1cc5fba78980da430e9d7ca0ccc22 | [
"Beerware"
] | null | null | null | """
This file handles parsing of interactions files, that is to say files that
describe the reactions of the game to the keyboard, and the events associated to
these interactions.
"""
# -*- coding: utf-8 -*-
import sys
from collections import OrderedDict
import parsing.parsing_utils as parsing_utils
import parsing.actions_parser as actions_parser
def interaction_parser(interaction_tag):
"""Parses one interaction tag. An interaction tag is very simple, it
contains a curses keycode, a target and and event.
"""
interaction = OrderedDict()
_key = interaction_tag.find('key')
if _key is None:
parsing_utils.fail_not_found('key')
if _key.get('val') is None:
print("Tag %s found, but value not present" % 'key')
interaction.update({"key": parsing_utils.format_type(_key.get('val'))})
for tag in ['target', 'event']:
val = interaction_tag.find(tag)
if val is None:
parsing_utils.fail_not_found(tag)
if val.get("val") is None:
print("Tag %s found, but value not present" % tag)
sys.exit(1)
interaction.update({tag : val.get("val")})
return interaction
def interactions_parser(interaction_xml):
"""This function parses a whole file, and returns the dictionnary of all
actions described in the file.
"""
interactions = parsing_utils.try_open_and_parse(interaction_xml)
interactions_list = []
for interaction in interactions.findall('Interaction'):
interactions_list.append(interaction_parser(interaction))
return interactions_list
def interactions_files_parser(*interactions_files):
"""This function parses a list of files, in order to find all interactions
described in these files. It provides some safety, since it checks that
every keycode is used at most once.
"""
return parsing_utils.parse_multiple_files(interactions_parser,
*interactions_files)
def get_all_actions(interactions):
"""Returns a list with all action names defined in ```interaction```.
It is useful in order to check that no action called in an Action
tagged file is never called by an interaction.
No sanity check is done here, since interactions should be
well-formed when it arrives here.
"""
return parsing_utils.collect_data('event', interactions)
def check_actions(interactions, actions):
"""Checks if all actions called by an interaction exist."""
interaction_names = get_all_actions(interactions)
action_names = actions_parser.get_all_names(actions)
return interaction_names <= action_names
| 36.315068 | 80 | 0.706903 | 354 | 2,651 | 5.144068 | 0.330508 | 0.052718 | 0.014827 | 0.019769 | 0.081274 | 0.081274 | 0.081274 | 0.048325 | 0.048325 | 0.048325 | 0 | 0.000957 | 0.211618 | 2,651 | 72 | 81 | 36.819444 | 0.870335 | 0.348171 | 0 | 0 | 0 | 0 | 0.074254 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.111111 | 0 | 0.388889 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70e7e39111d0157dda74c8f4263d957f3c0f7757 | 309 | py | Python | docs/version.py | inteplus/mtdoc | a496c5bab6b6db6c740045f2bba5c8af5882088e | [
"MIT"
] | null | null | null | docs/version.py | inteplus/mtdoc | a496c5bab6b6db6c740045f2bba5c8af5882088e | [
"MIT"
] | null | null | null | docs/version.py | inteplus/mtdoc | a496c5bab6b6db6c740045f2bba5c8af5882088e | [
"MIT"
] | null | null | null | VERSION_YEAR = 2021
VERSION_MONTH = int('09')
VERSION_DAY = int('10')
VERSION_HOUR = int('13')
VERSION_MINUTE = int('29')
MAJOR_VERSION = 2021
MINOR_VERSION = '90910'
PATCH_VERSION = '91329'
version_date = '2021/09/10 13:29'
version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
| 28.090909 | 73 | 0.695793 | 43 | 309 | 4.72093 | 0.44186 | 0.118227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.143396 | 0.142395 | 309 | 10 | 74 | 30.9 | 0.622642 | 0 | 0 | 0 | 0 | 0 | 0.140468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70e9e01625f8e46f366a94fcc8effabde7c79e90 | 13,489 | py | Python | application/tests.py | dhosterman/hebrew_order_david | c86a83c9e3e1e22dd0427c7c03525f2503fff574 | [
"MIT"
] | null | null | null | application/tests.py | dhosterman/hebrew_order_david | c86a83c9e3e1e22dd0427c7c03525f2503fff574 | [
"MIT"
] | null | null | null | application/tests.py | dhosterman/hebrew_order_david | c86a83c9e3e1e22dd0427c7c03525f2503fff574 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.core import mail
from accounts.models import User
from application.forms import UserForm
from .validators import is_valid_tn
from .notify import on_new_user, on_updated_user
# Create your tests here.
class TestTNValidation(TestCase):
def test_valid_non_blank_tn(self):
valid_tn = '123-456-7890'
is_valid = is_valid_tn(valid_tn)
self.assertEqual(is_valid, True)
def test_non_valid_non_blank_tn(self):
invalid_tn = '1234-345-1234'
is_valid = is_valid_tn(invalid_tn)
self.assertEqual(is_valid, False)
def test_valid_blank_tn(self):
valid_tn = ''
is_valid = is_valid_tn(valid_tn, blank=True)
self.assertEqual(is_valid, True)
def test_non_valid_blank_tn(self):
invalid_tn = ''
is_valid = is_valid_tn(invalid_tn)
self.assertEqual(is_valid, False)
class ApplicationViewsTests(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user(
email='test@test.com',
first_name='first',
last_name='last',
password='test'
)
# all of this data must pass all validations in post view
self.valid_contact_data = {
'home_address': '123 Main St',
'home_city': 'Dallas',
'home_state': 'TX',
'home_zip': '12345',
'postal_same_as_home': 'on',
'postal_address': '',
'postal_city': '',
'postal_state': '',
'postal_zip': '',
'home_phone': '123-456-7890',
'work_phone': '123-456-7890',
'mobile_phone': '123-456-7890'
}
self.valid_personal_data = {
'date_of_birth_month': '1',
'date_of_birth_day': '1',
'date_of_birth_year': '1977',
'city_of_birth': 'Dallas',
'country_of_birth': 'USA',
'married': 'on',
'children': 'on'
}
self.valid_wife_data = {
'name': 'Martha Smith',
'hebrew_name': 'Hebrew Smith',
'date_of_birth_month': '1',
'date_of_birth_day': '1',
'date_of_birth_year': '1977',
'date_of_marriage_month': '1',
'date_of_marriage_day': '1',
'date_of_marriage_year': '1980',
'email': 'wife@email.com',
'country_of_marriage': 'USA',
'city_of_marriage': 'Chicago',
'mobile_phone': '123-456-7890',
}
self.valid_occupation_data = {
'occupation-occupation': 'Carpenter',
'occupation-business_name': 'Carpenters, Inc.',
'occupation-address': '123 Main St',
'occupation-city': 'Dallas',
'occupation-state': 'TX',
'occupation-zip': '12345',
'occupation-phone': '123-456-7890'
}
self.valid_hod_data = {
'synagogue_or_temple': 'Synagogue',
'sponsor': 'Tom Thumb',
'sponsor_phone': '123-456-7890',
'previous_member_of_hod': '',
'previous_lodges': '',
'skills_or_hobbies': '',
'other_organizations': ''
}
self.valid_user_data = {
'email': 'valid@valid.com',
'first_name': 'First',
'last_name': 'Last'
}
self.valid_formset_management = {
'children-TOTAL_FORMS': 0,
'children-INITIAL_FORMS': 0,
'children-MIN_NUM_FORMS': 0,
'children-MAX_NUM_FORMS': 0,
'committees-TOTAL_FORMS': 0,
'committees-INITIAL_FORMS': 0,
'committees-MIN_NUM_FORMS': 0,
'committees-MAX_NUM_FORMS': 0
}
def tearDown(self):
self.c.logout()
def test_new_redirects_to_show_if_user_active(self):
self.c.login(email=self.user.email, password='test')
url = reverse('application.views.new')
expected_url = reverse('application.views.show')
response = self.c.get(url)
self.assertRedirects(response, expected_url)
def test_new_shows_registration_form_if_user_not_active(self):
url = reverse('application.views.new')
response = self.c.get(url)
self.assertTemplateUsed(response, 'new.html')
def test_user_must_be_logged_in_to_view_show(self):
url = reverse('application.views.show')
response = self.c.get(url)
expected_url = reverse('accounts.views.login_view')
expected_url += '?next=/application/show/'
self.assertRedirects(response, expected_url)
self.c.login(email=self.user.email, password='test')
response = self.c.get(url)
self.assertEqual(response.status_code, 200)
def test_user_must_be_logged_in_to_view_update(self):
url = reverse('application.views.update')
response = self.c.post(url, self.valid_formset_management)
expected_url = reverse('accounts.views.login_view')
expected_url += '?next=/application/update/'
self.assertRedirects(response, expected_url)
self.c.login(email=self.user.email, password='test')
response = self.c.post(url, self.valid_formset_management)
expected_url = reverse('application.views.thank_you')
self.assertRedirects(response, expected_url)
# a complete application requires that an applicant enters information
# for the User models as well as for ContactDeails, PersonalDetails,
# and OtherDetails. If any of these models do not have the required
# information, the form should not submit
def test_post_saves_if_all_submissions_are_valid(self):
valid_post_data = {}
valid_post_data.update(self.valid_user_data)
valid_post_data.update(self.valid_contact_data)
valid_post_data.update(self.valid_personal_data)
valid_post_data.update(self.valid_wife_data)
valid_post_data.update(self.valid_occupation_data)
valid_post_data.update(self.valid_hod_data)
valid_post_data.update(self.valid_formset_management)
url = reverse('application.views.post')
expected_url = reverse('application.views.thank_you')
response = self.c.post(url, valid_post_data)
self.assertRedirects(response, expected_url)
def test_post_fails_if_contact_invalid(self):
invalid_post_data = {}
invalid_post_data.update(self.valid_user_data)
invalid_post_data.update(self.valid_contact_data)
invalid_post_data.update(self.valid_personal_data)
invalid_post_data.update(self.valid_wife_data)
invalid_post_data.update(self.valid_occupation_data)
invalid_post_data.update(self.valid_hod_data)
invalid_post_data.update(self.valid_formset_management)
invalid_post_data['home_address'] = ''
url = reverse('application.views.post')
expected_url = reverse('application.views.error')
response = self.c.post(url, invalid_post_data)
self.assertRedirects(response, expected_url)
def test_post_fails_if_personal_invalid(self):
invalid_post_data = {}
invalid_post_data.update(self.valid_user_data)
invalid_post_data.update(self.valid_contact_data)
invalid_post_data.update(self.valid_personal_data)
invalid_post_data.update(self.valid_wife_data)
invalid_post_data.update(self.valid_occupation_data)
invalid_post_data.update(self.valid_hod_data)
invalid_post_data.update(self.valid_formset_management)
invalid_post_data['date_of_birth_year'] = ''
url = reverse('application.views.post')
expected_url = reverse('application.views.error')
response = self.c.post(url, invalid_post_data)
self.assertRedirects(response, expected_url)
def test_post_fails_if_occupation_invalid(self):
invalid_post_data = {}
invalid_post_data.update(self.valid_user_data)
invalid_post_data.update(self.valid_contact_data)
invalid_post_data.update(self.valid_personal_data)
invalid_post_data.update(self.valid_wife_data)
invalid_post_data.update(self.valid_occupation_data)
invalid_post_data.update(self.valid_hod_data)
invalid_post_data.update(self.valid_formset_management)
invalid_post_data['occupation-occupation'] = ''
url = reverse('application.views.post')
expected_url = reverse('application.views.error')
response = self.c.post(url, invalid_post_data)
self.assertRedirects(response, expected_url)
def test_post_fails_if_hod_invalid(self):
invalid_post_data = {}
invalid_post_data.update(self.valid_user_data)
invalid_post_data.update(self.valid_contact_data)
invalid_post_data.update(self.valid_personal_data)
invalid_post_data.update(self.valid_wife_data)
invalid_post_data.update(self.valid_occupation_data)
invalid_post_data.update(self.valid_hod_data)
invalid_post_data.update(self.valid_formset_management)
invalid_post_data['sponsor'] = ''
url = reverse('application.views.post')
expected_url = reverse('application.views.error')
response = self.c.post(url, invalid_post_data)
self.assertRedirects(response, expected_url)
def test_post_fails_if_user_invalid(self):
invalid_post_data = {}
invalid_post_data.update(self.valid_user_data)
invalid_post_data.update(self.valid_contact_data)
invalid_post_data.update(self.valid_personal_data)
invalid_post_data.update(self.valid_wife_data)
invalid_post_data.update(self.valid_occupation_data)
invalid_post_data.update(self.valid_hod_data)
invalid_post_data.update(self.valid_formset_management)
invalid_post_data['first_name'] = ''
url = reverse('application.views.post')
expected_url = reverse('application.views.error')
response = self.c.post(url, invalid_post_data)
self.assertRedirects(response, expected_url)
def test_user_must_be_logged_in_and_staff_to_export_excel(self):
staff_user = User.objects.create_superuser(
email='staff@user.com',
first_name='Staff',
last_name='User',
password='test',
)
url = reverse('application.views.export_as_excel')
expected_url = reverse('accounts.views.login_view')
expected_url += '?next=/application/export_excel/'
self.c.login(email=self.user.email, password='test')
response = self.c.get(url)
self.assertRedirects(response, expected_url)
self.c.login(email=staff_user.email, password='test')
response = self.c.get(url)
self.assertEqual(response.status_code, 200)
class NotifyTests(TestCase):
def setUp(self):
self.user = User.objects.create_superuser(
first_name = 'Steve',
last_name = 'Smith',
email = 'ssmith@fakeemail.com',
password = 'password'
)
self.user.hebrew_name = 'Aaron'
self.user.save()
User.objects.create_superuser(
first_name = 'Natasha',
last_name = 'Romanov',
email = 'natasha@avengers.com',
password = 'password'
)
def tearDown(self):
pass
def test_on_new_user_subject_is_correct(self):
expected = 'New Application'
on_new_user(self.user)
result = mail.outbox[0].subject
self.assertIn(expected, result)
def test_on_new_user_sends_proper_number(self):
expected = 2
on_new_user(self.user)
result = len(mail.outbox)
self.assertEqual(expected, result)
def test_on_new_user_body_contains_applicant_name(self):
expected = 'Applicant: Steve Smith'
on_new_user(self.user)
result = str(mail.outbox[0].message())
self.assertIn(expected, result)
def test_on_new_user_body_contains_applicant_email(self):
expected = 'Email: ssmith@fakeemail.com'
on_new_user(self.user)
result = str(mail.outbox[0].message())
self.assertIn(expected, result)
def test_on_new_user_body_contains_appliant_hebrew_name(self):
expected = 'Hebrew Name: Aaron'
on_new_user(self.user)
result = str(mail.outbox[0].message())
self.assertIn(expected, result)
def test_on_update_user_subject_is_correct(self):
expected = 'Updated Application'
on_updated_user(self.user, [])
result = mail.outbox[0].subject
self.assertIn(expected, result)
def test_on_update_user_body_contains_applicant_name(self):
expected = 'Applicant: Steve Smith'
on_updated_user(self.user, [])
result = str(mail.outbox[0].message())
self.assertIn(expected, result)
def test_on_update_body_contains_changed_field_names(self):
post = {
'first_name': 'Steve',
'last_name': 'Smith',
'email': 'ssmith@fakeemail.com',
'hebrew_name': 'David'
}
form = UserForm(post, instance=self.user)
form.is_valid()
expected = '* Hebrew name: changed from Aaron to David'
on_updated_user(self.user, [form])
result = str(mail.outbox[0].message())
self.assertIn(expected, result)
| 39.098551 | 74 | 0.649122 | 1,633 | 13,489 | 5.028781 | 0.132272 | 0.057477 | 0.09133 | 0.09206 | 0.701047 | 0.657331 | 0.623234 | 0.562591 | 0.562591 | 0.536288 | 0 | 0.013452 | 0.245014 | 13,489 | 344 | 75 | 39.212209 | 0.79291 | 0.023797 | 0 | 0.431438 | 0 | 0 | 0.168832 | 0.067776 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.090301 | false | 0.033445 | 0.023411 | 0 | 0.123746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70ec381c6c2e9e6fb122d063a7166dbc75a700f8 | 463 | py | Python | tests/helpers.py | LesterFreamon/disaster_response_pipeline | 388cdb2fe74da8dc8c4cea14298ef3fd36348dc6 | [
"MIT"
] | null | null | null | tests/helpers.py | LesterFreamon/disaster_response_pipeline | 388cdb2fe74da8dc8c4cea14298ef3fd36348dc6 | [
"MIT"
] | null | null | null | tests/helpers.py | LesterFreamon/disaster_response_pipeline | 388cdb2fe74da8dc8c4cea14298ef3fd36348dc6 | [
"MIT"
] | null | null | null | import pandas as pd
from pandas.testing import assert_frame_equal
def sort_and_assert_frame_equal(df_1: pd.DataFrame, df_2: pd.DataFrame) -> None:
"""Sort the data frames by columns and index and then compare"""
df_1 = df_1.reindex(sorted(df_1.columns), axis=1)
df_1 = df_1.sort_values(df_1.columns.tolist())
df_2 = df_2.reindex(sorted(df_1.columns), axis=1)
df_2 = df_2.sort_values(df_1.columns.tolist())
assert_frame_equal(df_1, df_2)
| 38.583333 | 80 | 0.736501 | 86 | 463 | 3.662791 | 0.348837 | 0.095238 | 0.126984 | 0.114286 | 0.47619 | 0.355556 | 0.190476 | 0.190476 | 0 | 0 | 0 | 0.04557 | 0.146868 | 463 | 11 | 81 | 42.090909 | 0.751899 | 0.12527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.375 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70ec52b6f164ace0ac1a346189390e7e209dfb04 | 6,266 | py | Python | brainstorm/structure/construction.py | PyCN/brainstorm | 8f1fc886faf268b25085fa5c95bf106b1805d766 | [
"MIT"
] | 1,473 | 2015-10-25T19:12:45.000Z | 2022-03-13T01:00:51.000Z | brainstorm/structure/construction.py | PyCN/brainstorm | 8f1fc886faf268b25085fa5c95bf106b1805d766 | [
"MIT"
] | 50 | 2015-10-25T19:14:17.000Z | 2018-10-03T07:48:25.000Z | brainstorm/structure/construction.py | PyCN/brainstorm | 8f1fc886faf268b25085fa5c95bf106b1805d766 | [
"MIT"
] | 209 | 2015-10-25T20:22:06.000Z | 2021-07-23T00:00:39.000Z | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import six
from brainstorm.utils import NetworkValidationError, is_valid_layer_name
class UniquelyNamed(object):
"""
An object that maintains a scope of names and ensures that its name is
unique within that scope by appending an appropriate index.
If there are no collisions then its name is the same as the given basename.
If there are multiple objects with the same name in the scope, then
its name is the basename + _index where index is a number given according
to the order in which the objects where created.
"""
global_counter = 0
def __init__(self, basename):
self._basename = basename
self.scope = {basename: [self]}
self.creation_id = UniquelyNamed.global_counter
UniquelyNamed.global_counter += 1
def merge_scopes(self, other):
new_scope = self.scope
for name, scoped_names in other.scope.items():
if name not in self.scope:
new_scope[name] = []
new_scope[name] = sorted(set(self.scope[name] + scoped_names),
key=lambda x: x.creation_id)
for n in new_scope:
for sn in new_scope[n]:
sn.scope = new_scope
@property
def name(self):
if len(self.scope[self._basename]) == 1:
return self._basename
i = 1
for un in self.scope[self._basename]:
name = "{}_{}".format(self._basename, i)
# see if this derived name is already taken
# increase the index if need be
while name in self.scope and len(self.scope[name]) == 1:
i += 1
name = "{}_{}".format(self._basename, i)
if un is self:
return name
i += 1
class LayerDetails(UniquelyNamed):
"""
Contains all details about a layer at construction time.
This information is later used to generate an architecture, from which the
actual layers are instantiated and combined into a network.
"""
def __init__(self, layer_type, name=None, **kwargs):
if not is_valid_layer_name(layer_type):
raise NetworkValidationError(
"Invalid layer_type: '{}'".format(layer_type))
if not (name is None or is_valid_layer_name(name)):
raise NetworkValidationError(
"Invalid name for layer: '{}'".format(name))
super(LayerDetails, self).__init__(name or layer_type)
self.layer_type = layer_type
"""The type this layer should have when later being instantiated."""
self.incoming = []
"""A list of all incoming connections, including input/output names.
Each entry of the list has the form:
(incoming_layer, output_name, input_name)
and the type:
tuple[LayerDetails, str, str]
"""
self.outgoing = []
"""A list of all outgoing connections, including input/output names.
Each entry of the list has the form:
(output_name, input_name, outgoing_layer)
and the type:
tuple[str, str, LayerDetails]
"""
self.layer_kwargs = kwargs
"""Dictionary of additional parameters for this layer"""
self._traversing = False
def collect_connected_layers(self):
"""Return a set of all layers that are somehow connected to this"""
connectom = set()
new_layers = {self}
while new_layers:
very_new_layers = set()
for l in new_layers:
very_new_layers |= {o[2] for o in l.outgoing}
very_new_layers |= {i[0] for i in l.incoming}
connectom |= new_layers
new_layers = very_new_layers - connectom
return connectom
def __repr__(self):
return "<Layer: {}>".format(self.name)
class ConstructionWrapper(object):
"""
Class to realize the python interface for setting up architectures.
Internally it keeps a LayerDetails object, which is updated with
connections.
It also implements the shift operation (>>) for wiring up layers, and the
subtraction operation (-) for specifying named inputs or outputs.
"""
@classmethod
def create(cls, layer_type, name=None, **kwargs):
if isinstance(layer_type, six.string_types):
layer_type_name = layer_type
else:
layer_type_name = layer_type.__name__
if not layer_type_name.endswith('LayerImpl'):
raise NetworkValidationError("{} should end with 'LayerImpl'"
.format(layer_type_name))
layer_type_name = layer_type_name[:-9]
details = LayerDetails(layer_type_name, name=name, **kwargs)
return ConstructionWrapper(details)
def __init__(self, layer_details, input_name='default',
output_name='default'):
self.layer = layer_details
self.input_name = input_name
self.output_name = output_name
def __rshift__(self, other):
if not isinstance(other, ConstructionWrapper):
return NotImplemented
self.layer.outgoing.append((self.output_name, other.input_name,
other.layer))
other.layer.incoming.append((self.layer, self.output_name,
other.input_name))
self.layer.merge_scopes(other.layer)
return other
def __sub__(self, other):
if not isinstance(other, six.string_types):
return NotImplemented
return ConstructionWrapper(self.layer, output_name=other,
input_name=self.input_name)
def __rsub__(self, other):
if not isinstance(other, six.string_types):
return NotImplemented
return ConstructionWrapper(self.layer, input_name=other,
output_name=self.output_name)
def __repr__(self):
return "<Layer: '{}' - {} - '{}'>".format(self.input_name,
self.layer.name,
self.output_name)
| 36.011494 | 79 | 0.607405 | 743 | 6,266 | 4.920592 | 0.265141 | 0.044311 | 0.035558 | 0.019694 | 0.214442 | 0.169037 | 0.10558 | 0.088074 | 0.088074 | 0.088074 | 0 | 0.002547 | 0.310884 | 6,266 | 173 | 80 | 36.219653 | 0.844141 | 0.167092 | 0 | 0.128713 | 0 | 0 | 0.03337 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118812 | false | 0 | 0.029703 | 0.019802 | 0.306931 | 0.009901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70f0e0fe5503d0801f88a83895d280619dc2b960 | 1,333 | py | Python | predict.py | TonyMTH/Digit-Recognizer | 0bbdc244c86bc6d4b07bbfb49f2b52abbed2155e | [
"MIT"
] | null | null | null | predict.py | TonyMTH/Digit-Recognizer | 0bbdc244c86bc6d4b07bbfb49f2b52abbed2155e | [
"MIT"
] | null | null | null | predict.py | TonyMTH/Digit-Recognizer | 0bbdc244c86bc6d4b07bbfb49f2b52abbed2155e | [
"MIT"
] | null | null | null | # Import libraries
import torch
from PIL import Image
from matplotlib import pyplot as plt
from numpy import mean
from process_user_input import Process, TransformAll
class Predict:
def __init__(self, model_path, img_pth):
self.path = model_path
self.im = img_pth
def predict(self, original_image, kernel_size, min_thresh, n, dim):
# Load Model
model = torch.load(self.path)
# Load and transform Data
pro = Process(self.im, original_image, kernel_size, min_thresh, n, dim)
tra = TransformAll(self.im, original_image, kernel_size, min_thresh, n, dim)
images = tra.split_numbers()
tensor_images = tra.distribute_collect_images()
img = pro.resize(True)
probs = []
preds = []
raw_im = []
for im, rim in tensor_images:
# Predict
model.eval()
im = im.view(im.shape[0], -1)
output = model(im)
ps = torch.exp(output)
_, top_class = ps.topk(1, dim=1)
pred = top_class[0][0].item()
prob = torch.softmax(output, dim=1)[0][pred].item()
probs.append(prob)
preds.append(pred)
raw_im.append(rim)
return int(mean(probs) * 100), ' '.join([str(i) for i in preds]), images, raw_im
| 28.361702 | 88 | 0.590398 | 176 | 1,333 | 4.301136 | 0.420455 | 0.023778 | 0.075297 | 0.091149 | 0.15852 | 0.15852 | 0.15852 | 0.15852 | 0.110964 | 0.110964 | 0 | 0.011841 | 0.303076 | 1,333 | 46 | 89 | 28.978261 | 0.803014 | 0.044261 | 0 | 0 | 0 | 0 | 0.000788 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.16129 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70f430c5772303d1b2abc0c04662d32dc344f913 | 4,666 | py | Python | src/main/docker/kibana/restore.py | lfntac/clamp | 3e118724141917299ad3e2f535544500b5c459b0 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/main/docker/kibana/restore.py | lfntac/clamp | 3e118724141917299ad3e2f535544500b5c459b0 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/main/docker/kibana/restore.py | lfntac/clamp | 3e118724141917299ad3e2f535544500b5c459b0 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
###
# ============LICENSE_START=======================================================
# ONAP CLAMP
# ================================================================================
# Copyright (C) 2018 AT&T Intellectual Property. All rights
# reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END============================================
# ===================================================================
#
###
import json
import logging
import os
import sys
import requests
if sys.version_info < (3,):
# for HTTPStatus.OK only
import httplib as HTTPStatus
else:
from http import HTTPStatus
OBJECT_TYPES = ['index-pattern', 'config', 'search', 'visualization', 'dashboard']
def parse_args(args):
""" Parse arguments given to this script"""
import argparse
parser = argparse.ArgumentParser(
description=('Restores the kibana configuration.'))
parser.add_argument('-v', '--verbose', dest='log_level', action='store_const',
const=logging.DEBUG, default=logging.INFO,
help='Use verbose logging')
parser.add_argument('-C', '--configuration_path',
default='./default',
help=('Path of the configuration to be restored.'
'Should contain at least one folder named %s or %s' %
(','.join(OBJECT_TYPES[:-1]), OBJECT_TYPES[-1])
)
)
parser.add_argument('-H', '--kibana-host', default='http://localhost:5601',
help='Kibana endpoint.')
parser.add_argument('-f', '--force', action='store_const',
const=True, default=False,
help='Overwrite configuration if needed.')
return parser.parse_args(args)
def get_logger(args):
"""Creates the logger based on the provided arguments"""
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(args.log_level)
return logger
def main():
''' Main script function'''
args = parse_args(sys.argv[1:])
logger = get_logger(args)
base_config_path = args.configuration_path
# order to ensure dependency order is ok
for obj_type in OBJECT_TYPES:
obj_dir = os.path.sep.join((base_config_path, obj_type))
if not os.path.exists(obj_dir):
logger.info('No %s to restore, skipping.', obj_type)
continue
for obj_filename in os.listdir(obj_dir):
with open(os.path.sep.join((obj_dir, obj_filename))) as obj_file:
payload = obj_file.read()
obj = json.loads(payload)
obj_id = obj['id']
for key in ('id', 'version', 'type', 'updated_at'):
try:
del obj[key]
except KeyError:
logger.info("Could not find key %s in %s[%s]", key, obj_type, obj_id)
logger.info('Restoring %s id:%s (overwrite:%s)', obj_type, obj_id, args.force)
url = "%s/api/saved_objects/%s/%s" % (args.kibana_host.rstrip("/"), obj_type, obj_id)
params = {'overwrite': True} if args.force else {}
post_object_req = requests.post(url,
headers={'content-type': 'application/json',
'kbn-xsrf': 'True'},
params=params,
data=json.dumps(obj))
if post_object_req.status_code == HTTPStatus.OK:
logger.info('%s id:%s restored.', obj_type, obj_id)
else:
logger.warning(('Something bad happend while restoring %s id:%s. '
' Received status code: %s'),
obj_type, obj_id, post_object_req.status_code)
logger.warning('Body: %s', post_object_req.content)
if __name__ == "__main__":
main()
| 40.224138 | 97 | 0.52979 | 510 | 4,666 | 4.703922 | 0.417647 | 0.023343 | 0.020842 | 0.02501 | 0.030013 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004838 | 0.291256 | 4,666 | 115 | 98 | 40.573913 | 0.720593 | 0.256108 | 0 | 0.028169 | 0 | 0 | 0.192117 | 0.007591 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.112676 | 0 | 0.183099 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70f673918fbdca74579f739699016b688ed90c3c | 2,376 | py | Python | graph/adjacency/graph.py | sumitkumar25/Algorithms_Py | 4af74c3e3f868997a2cdfbd145b74420a5d238ca | [
"MIT"
] | null | null | null | graph/adjacency/graph.py | sumitkumar25/Algorithms_Py | 4af74c3e3f868997a2cdfbd145b74420a5d238ca | [
"MIT"
] | null | null | null | graph/adjacency/graph.py | sumitkumar25/Algorithms_Py | 4af74c3e3f868997a2cdfbd145b74420a5d238ca | [
"MIT"
] | null | null | null | from graph.graphConstruct import GraphConstruct
from ds_queue.dsqueue import DsQueue
class Graph:
def __init__(self):
self.construct = GraphConstruct()
self.color = {}
self.predecessor = {}
self.distance = {}
self.startTime = {}
self.endTime = {}
self.time = 0
def setGraphMatrix(self, rows, columns, edges, isWeighted=False, directed=False):
self.adjMat = self.construct.getAdjacency(rows, columns, None)
# 1 at index [i,j] edge represented edge i --> j
# for weighted graph use object instead on 1,0
for E in edges:
self.adjMat[int(list(E)[0])][int(list(E)[1])] = 1
if directed:
self.adjMat[int(list(E)[1])][int(list(E)[0])] = 1
def dfs_matrix(self, s=0):
if self.adjMat:
self.setTraversalAttrs()
print('\n DFS of adj Matrix \n')
q = DsQueue()
q.enqueue(s)
# self.color[s] = 'grey'
# self.distance[s] = 0
# self.startTime[s] = 0
while not q.isEmpty():
curr = q.dequeue()
if self.color[curr] == 'white':
self.time += 1
self.startTime[curr] = self.time
self.color[curr] = 'grey'
print('greyed ', curr)
for i in range(len(self.adjMat[curr])):
if self.adjMat[curr][i]:
self.predecessor[i] = curr
self.distance[i] = self.distance[curr]+1
q.enqueue(i)
self.color[curr] = 'black'
self.time += 1
self.endTime[curr] = self.time
# helpers
# not optimised for nodes
def setTraversalAttrs(self):
if self.adjMat:
for i in range(len(self.adjMat)):
self.color[i] = 'white'
self.predecessor[i] = None
self.distance[i] = 0
self.startTime[i] = 0
self.endTime[i] = 0
def printGrapt_adjMat(self):
for i in range(len(self.adjMat)):
print(*self.adjMat[i])
def printEdgesFromVertx_adjMat(self, s):
for j in range(len(self.adjMat[s])):
if self.adjMat[s][j]:
print(s, '--->', j)
| 33.942857 | 85 | 0.490741 | 272 | 2,376 | 4.257353 | 0.268382 | 0.103627 | 0.027634 | 0.048359 | 0.110535 | 0.062176 | 0.062176 | 0 | 0 | 0 | 0 | 0.013032 | 0.386364 | 2,376 | 69 | 86 | 34.434783 | 0.781207 | 0.080808 | 0 | 0.113208 | 0 | 0 | 0.024357 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.037736 | 0 | 0.169811 | 0.113208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70f9a09d12b7a58109c3cbad3a916c219206c741 | 517 | py | Python | EcoFin/math/utils.py | LucaCamerani/EcoFin-library | ad8d628e0d447d1b5e8d3b16610d382e7df086e1 | [
"BSD-4-Clause"
] | 9 | 2020-09-13T11:53:19.000Z | 2022-03-31T16:30:56.000Z | EcoFin/math/utils.py | LucaCamerani/EcoFin-library | ad8d628e0d447d1b5e8d3b16610d382e7df086e1 | [
"BSD-4-Clause"
] | null | null | null | EcoFin/math/utils.py | LucaCamerani/EcoFin-library | ad8d628e0d447d1b5e8d3b16610d382e7df086e1 | [
"BSD-4-Clause"
] | 2 | 2021-03-05T13:45:46.000Z | 2021-07-19T20:38:27.000Z | """
equity.py
Created by Luca Camerani at 31/08/2020, University of Milano-Bicocca.
(l.camerani@campus.unimib.it)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
import numpy as np
def findNearest(array, value):
if sum(np.isnan(array)) == len(array):
return None
else:
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
| 23.5 | 89 | 0.678917 | 76 | 517 | 4.618421 | 0.763158 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019277 | 0.197292 | 517 | 21 | 90 | 24.619048 | 0.826506 | 0.531915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70fa22ba2c0da02145058f3796086ae60b56c582 | 706 | py | Python | detect.py | jskrzypek/flask-video-streaming | 3c1fdff06f794f80e5d259f15bb6bbf4992e12c2 | [
"MIT"
] | null | null | null | detect.py | jskrzypek/flask-video-streaming | 3c1fdff06f794f80e5d259f15bb6bbf4992e12c2 | [
"MIT"
] | null | null | null | detect.py | jskrzypek/flask-video-streaming | 3c1fdff06f794f80e5d259f15bb6bbf4992e12c2 | [
"MIT"
] | null | null | null | #import numpy as np
import cv2
import os
print("Path at terminal when executing this file")
print(os.getcwd())
# Load our cascade classifier from cars3.xml
car_cascade = cv2.CascadeClassifier(r'classifier/banana_classifier.xml')
image = cv2.imread('images/image1.jpg')
# Crop so that only the roads remain, eliminatives the distraction.
#image = image[120:,:-20]
# Use Cascade Classifier to detect cars, may have to tune the
# parameters for less false positives.
cars = car_cascade.detectMultiScale(image, 1.1, 2)
for (x,y,w,h) in cars:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
print('Processing', 1, ' : bananas detected : ', len(cars))
cv2.imwrite('images/'+ 'processed.jpg', image)
| 27.153846 | 72 | 0.725212 | 112 | 706 | 4.544643 | 0.633929 | 0.066798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036007 | 0.134561 | 706 | 25 | 73 | 28.24 | 0.797054 | 0.351275 | 0 | 0 | 0 | 0 | 0.314856 | 0.070953 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70fb0db0bb527d0e7f0c61e7486ebafc3851e661 | 2,420 | py | Python | tests/func/test_state.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | null | null | null | tests/func/test_state.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | null | null | null | tests/func/test_state.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | null | null | null | import os
import mock
from dvc.path.local import PathLOCAL
from dvc.utils.compat import str
from dvc.state import State
from dvc.utils import file_md5
from dvc.main import main
from dvc.utils.fs import get_inode
from tests.basic_env import TestDvc
class TestState(TestDvc):
def test_update(self):
path = os.path.join(self.dvc.root_dir, self.FOO)
path_info = PathLOCAL(path=path)
md5 = file_md5(path)[0]
state = State(self.dvc, self.dvc.config.config)
with state:
state.save(path_info, md5)
entry_md5 = state.get(path_info)
self.assertEqual(entry_md5, md5)
os.unlink(path)
with open(path, "a") as fd:
fd.write("1")
entry_md5 = state.get(path_info)
self.assertTrue(entry_md5 is None)
md5 = file_md5(path)[0]
state.save(path_info, md5)
entry_md5 = state.get(path_info)
self.assertEqual(entry_md5, md5)
class TestStateOverflow(TestDvc):
def test(self):
# NOTE: trying to add more entries than state can handle,
# to see if it will clean up and vacuum successfully
ret = main(["config", "state.row_limit", "10"])
self.assertEqual(ret, 0)
dname = "dir"
os.mkdir(dname)
for i in range(20):
with open(os.path.join(dname, str(i)), "w+") as fobj:
fobj.write(str(i))
ret = main(["add", "dir"])
self.assertEqual(ret, 0)
class TestGetStateRecordForInode(TestDvc):
@staticmethod
def mock_get_inode(special_path, special_value):
def get_inode_mocked(path):
if path == special_path:
return special_value
else:
return get_inode(path)
return get_inode_mocked
@mock.patch("dvc.state.get_inode", autospec=True)
def test_transforms_inode(self, get_inode_mock):
state = State(self.dvc, self.dvc.config.config)
inode = state.MAX_INT + 2
self.assertNotEqual(inode, state._to_sqlite(inode))
path = os.path.join(self.dvc.root_dir, self.FOO)
md5 = file_md5(path)[0]
get_inode_mock.side_effect = self.mock_get_inode(path, inode)
with state:
state.save(PathLOCAL(path=path), md5)
ret = state.get_state_record_for_inode(inode)
self.assertIsNotNone(ret)
| 28.809524 | 69 | 0.61405 | 327 | 2,420 | 4.391437 | 0.308869 | 0.050139 | 0.02507 | 0.029248 | 0.247214 | 0.236769 | 0.215877 | 0.196379 | 0.14624 | 0.14624 | 0 | 0.016734 | 0.283884 | 2,420 | 83 | 70 | 29.156627 | 0.811887 | 0.043802 | 0 | 0.3 | 0 | 0 | 0.023799 | 0 | 0 | 0 | 0 | 0 | 0.116667 | 1 | 0.083333 | false | 0 | 0.15 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70fbcf54b139dc159c48738902fa8742a6e212ae | 3,412 | py | Python | app/models/__init__.py | OhBonsai/flask-boilerplate | 51c165e19ec47cf3aeee5c20ed12093a87131af7 | [
"Apache-2.0"
] | 2 | 2019-01-21T05:44:48.000Z | 2021-06-02T20:18:39.000Z | app/models/__init__.py | OhBonsai/flask-boilerplate | 51c165e19ec47cf3aeee5c20ed12093a87131af7 | [
"Apache-2.0"
] | null | null | null | app/models/__init__.py | OhBonsai/flask-boilerplate | 51c165e19ec47cf3aeee5c20ed12093a87131af7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Created by OhBonsai at 2018/3/7
"""This package handles setting up and providing the database connection."""
from flask_login import current_user
from flask_sqlalchemy import BaseQuery, SQLAlchemy, Model
from flask_restful import reqparse
from sqlalchemy import (
Column,
DateTime,
func,
Integer
)
from sqlalchemy.ext.declarative import declared_attr
from app.api.errors import NoPermission
class AclBaseQuery(BaseQuery):
"""query_cls = AclBaseQuery. So All query in orm will execute get_with_acl"""
def get_with_acl(self, model_id):
""" Get a instance with permission
:param model_id: the integer pk id of model
:return: model instance
"""
obj = self.get(model_id)
if not obj:
raise LookupError
try:
if obj.get_status.status == 'deleted':
raise LookupError
except AttributeError:
# It doesn't matter when model hadn't status field
pass
if obj.is_public:
return obj
if not obj.has_permission(user=current_user, permission='read'):
raise NoPermission
return obj
class Pager(object):
"""Util for paginate"""
parser = reqparse.RequestParser()\
.add_argument('page', type=int, default=1, store_missing=True)\
.add_argument('size', type=int, default=10, store_missing=True)
def __init__(self, page, count, size):
self.count = count
self.size = size
self.page = page
self.offset = (page - 1) * size
@classmethod
def paginate(cls, query):
params = cls.parser.parse_args()
count = query.count()
page = params.get('page')
size = params.get('size')
pager = cls(page, count, size)
return query.limit(size).offset(pager.offset), pager
@property
def args(self):
return {
'total': self.count,
'size': self.size,
'page': self.page,
'offset': self.offset
}
class BaseModel(Model):
"""Base class of models, It adds common models which are `id`, `created_at`, `updated_at`
And provide common method `get_or_create`
"""
@declared_attr
def __tablename__(self):
return self.__name__.lower()
id = Column(Integer, primary_key=True)
created_at = Column(DateTime(), default=func.now())
updated_at = Column(DateTime(), default=func.now(), onupdate=func.now())
@classmethod
def get_or_create(cls, **kwargs):
"""Get or create a database object.
:param kwargs: model field:value dict
:return: a model instance
"""
instance = cls.query.filter_by(**kwargs).first()
if not instance:
instance = cls(**kwargs)
db.session.add(instance)
db.session.commit()
return instance
@classmethod
def get(cls, id):
return cls.query().get(id)
@classmethod
def exists(cls, **kw):
return cls.query(**kw).first() is not None
def apply_kwargs(self, kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return self
db = SQLAlchemy(model_class=BaseModel,
query_class=AclBaseQuery,
session_options=dict(expire_on_commit=False))
from .user import User, Group
from .blog import Post
| 27.079365 | 93 | 0.614889 | 416 | 3,412 | 4.920673 | 0.377404 | 0.027357 | 0.016121 | 0.022472 | 0.029311 | 0.029311 | 0 | 0 | 0 | 0 | 0 | 0.004493 | 0.282532 | 3,412 | 125 | 94 | 27.296 | 0.831699 | 0.171161 | 0 | 0.098765 | 0 | 0 | 0.016819 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.012346 | 0.098765 | 0.049383 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb018fd4a881782e1f3860da6f9cd15e6e97323d | 607 | py | Python | datastructures/trees/kth_smallest_element_in_bst.py | sikakente/educative-io-python | be6e6c3534bf76e6f77addce16d1ab0c40e3e48d | [
"MIT"
] | 1 | 2021-12-28T21:19:53.000Z | 2021-12-28T21:19:53.000Z | datastructures/trees/kth_smallest_element_in_bst.py | sikakente/educative-io-python | be6e6c3534bf76e6f77addce16d1ab0c40e3e48d | [
"MIT"
] | 72 | 2022-02-01T18:18:47.000Z | 2022-03-13T12:31:26.000Z | datastructures/trees/kth_smallest_element_in_bst.py | sikakente/educative-io-python | be6e6c3534bf76e6f77addce16d1ab0c40e3e48d | [
"MIT"
] | null | null | null | """
Problem Statement
----------------
Given the root of a binary search tree, and an integer k, return the kth smallest value (1-indexed) of all the values of the nodes in the tree.
Input
-----
tree node
Output
-------
the kth smallest value
"""
def kth_smallest_element(root, k):
def inorder(node, accumulator):
if node:
inorder(node.left, accumulator)
accumulator.append(node.value)
inorder(node.right, accumulator)
return accumulator
return inorder(root, [])[k - 1]
if __name__ == '__main__':
import doctest
doctest.testmod()
| 18.96875 | 143 | 0.630972 | 77 | 607 | 4.844156 | 0.519481 | 0.088472 | 0.075067 | 0.101877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004348 | 0.242175 | 607 | 31 | 144 | 19.580645 | 0.806522 | 0.397035 | 0 | 0 | 0 | 0 | 0.022346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb02554472835836c33f41fca99bf7eb82a8aedb | 13,685 | py | Python | 5-Use-Spark/Jupyter-Notebooks/aws_databricks_notebooks/8-SparkML.py | jayshilj/learning-hadoop-and-spark | c1327f9573f1bff830ed7c2489b86aff5fdf8abf | [
"Apache-2.0"
] | 121 | 2020-02-16T17:18:10.000Z | 2022-03-29T07:28:05.000Z | 5-Use-Spark/Jupyter-Notebooks/aws_databricks_notebooks/8-SparkML.py | jayshilj/learning-hadoop-and-spark | c1327f9573f1bff830ed7c2489b86aff5fdf8abf | [
"Apache-2.0"
] | 1 | 2020-06-17T23:37:10.000Z | 2020-06-17T23:37:10.000Z | 5-Use-Spark/Jupyter-Notebooks/aws_databricks_notebooks/8-SparkML.py | jayshilj/learning-hadoop-and-spark | c1327f9573f1bff830ed7c2489b86aff5fdf8abf | [
"Apache-2.0"
] | 99 | 2020-03-21T08:55:41.000Z | 2022-03-12T02:42:21.000Z | # Databricks notebook source
# MAGIC %md ## Apache Spark MLLib
# MAGIC Let's return to the farmer's market dataset and use Spark to explore the hypothesis:
# MAGIC * The number of farmer's markets in a given zip code can be predicted from the income and taxes paid in a given area.
# MAGIC
# MAGIC There are serveral steps to this process:
# MAGIC 1. **Part One - Load and prepare the data**
# MAGIC * Verify and/or load table data
# MAGIC * Prepare the data by aggregating, grouping and counting table data values
# MAGIC * Join data from the prepared tables
# MAGIC * Convert 'null' values in the joined data to '0'
# MAGIC 2. **Part Two - Use the Spark ML Library**
# MAGIC * Create and display a vector with the the features you'd like to explore in a scatterplot
# MAGIC * Split the dataset into testing and training sets, cache both and call an action to load the cache
# MAGIC * Create a linear regression model and fit the model with your training data
# MAGIC * Use your model by calling predict on it
# MAGIC * Evaluate and update your model
# MAGIC * Train and use the most optimal model
# COMMAND ----------
# MAGIC %md ### Part One - Load and Prepare the data
# MAGIC * Load the table `cleaned_taxes` into a dataframe (created in previous exercise)
# COMMAND ----------
cleanedTaxes = sqlContext.sql("SELECT * FROM cleaned_taxes")
cleanedTaxes.show()
# COMMAND ----------
# MAGIC %md NOTE: If the table did NOT load, then run the next couple of cells to re-load the data.
# COMMAND ----------
# taxes2013 = spark.read\
# .option("header", "true")\
# .csv("dbfs:/databricks-datasets/data.gov/irs_zip_code_data/data-001/2013_soi_zipcode_agi.csv")
# taxes2013.createOrReplaceTempView("taxes2013")
# COMMAND ----------
# %sql
# DROP TABLE IF EXISTS cleaned_taxes;
# CREATE TABLE cleaned_taxes AS
# SELECT
# state,
# int(zipcode / 10) as zipcode,
# int(mars1) as single_returns,
# int(mars2) as joint_returns,
# int(numdep) as numdep,
# double(A02650) as total_income_amount,
# double(A00300) as taxable_interest_amount,
# double(a01000) as net_capital_gains,
# double(a00900) as biz_net_income
# FROM taxes2013
# COMMAND ----------
# MAGIC %md * Load the market dataset to a permanent table named `markets`
# COMMAND ----------
markets = spark.read\
.option("header", "true")\
.csv("dbfs:/databricks-datasets/data.gov/farmers_markets_geographic_data/data-001/market_data.csv")
# COMMAND ----------
# MAGIC %md
# MAGIC * Use `sum` to aggreggate all the columns in the `cleanedTaxes` dataset -- NOTE: Some data will be nonsense (i.e.summing zipcode) but other data could become useful features (i.e. summing AGI in the zipcode).
# MAGIC * Group the `cleanedTaxes` dataframe by zipcode, then `sum` to aggregate across all columns.
# MAGIC * Save the resulting dataframe in `summedTaxes`
# MAGIC * `show` the `summedTaxes` dataframe
# COMMAND ----------
summedTaxes = cleanedTaxes\
.groupBy("zipcode")\
.sum()
summedTaxes.show()
# COMMAND ----------
# MAGIC %md Group the market data into buckets and count the number of farmer's markets in each bucket.
# MAGIC
# MAGIC * Use `selectExpr` to transform the market data into labels that identify which zip group they belong to (we used `int(zip/10)` to group the tax data) call the new value `zipcode`. `selectExpr` is short for "Select Expression" and can process similar operations to SQL statements.
# MAGIC * Group by the `zipcode` you just created, then `count` the groups.
# MAGIC * Use another `selectExpr` to transform the data, you only need to keep the `count` and the `zipcode as zip`.
# MAGIC * Store the results in a new dataset called `cleanedMarkets`.
# MAGIC * `show` `cleanedMarkets`
# COMMAND ----------
cleanedMarkets = markets\
.selectExpr("*", "int(zip / 10) as zipcode")\
.groupBy("zipcode")\
.count()\
.selectExpr("double(count) as count", "zipcode as zip")
cleanedMarkets.show()
# COMMAND ----------
# MAGIC %md Join the two cleaned datasets into one dataset for analysis.
# MAGIC
# MAGIC * Outer join `cleanedMarkets` to `summedTaxes` using `zip` and `zipcode` as the join variable.
# MAGIC * Name the resulting dataset `joined`.
# COMMAND ----------
joined = cleanedMarkets\
.join(summedTaxes, cleanedMarkets["zip"] == summedTaxes["zipcode"], "outer")
# COMMAND ----------
# MAGIC %md * `display` the `joined` data - do you see the 'null' values?
# COMMAND ----------
display(joined)
# COMMAND ----------
# MAGIC %md MLLib doesn't allow null values. These values came up as `null` in the join because there were no farmer's markets in that zip code "basket". It makes sense to replace the `null` values with zeros.
# MAGIC * Use the `na` prefix to `fill` the empty cells with `0`.
# MAGIC * Name the resulting dataset `prepped` and `display` it.
# COMMAND ----------
prepped = joined.na.fill(0)
display(prepped)
# COMMAND ----------
# MAGIC %md ### Part Two -Use MLLib with Spark
# MAGIC * Put all the features into a single vector.
# MAGIC * Create an array to list the names of all the **non-feature** columns: `zip`, `zipcode`, `count`, call it `nonFeatureCols`.
# MAGIC * Create a list of names called `featureCols` which excludes the columns in `nonFeatureCols`.
# MAGIC * `print` the `featureCols`.
# COMMAND ----------
nonFeatureCols = {'zip', 'zipcode', 'count'}
featureCols = [column for column in prepped.columns if column not in nonFeatureCols]
print(featureCols)
# COMMAND ----------
# MAGIC %md * Use the `VectorAssembler` from `pyspark.ml.feature` to add a `features` vector to the `prepped` dataset.
# MAGIC * Call the new dataset `finalPrep`, then `display` only the `zipcode` and `features` from `finalPrep`.
# COMMAND ----------
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(
inputCols=[column for column in featureCols],
outputCol='features')
finalPrep = assembler.transform(prepped)
display(finalPrep.select('zipcode', 'features'))
# COMMAND ----------
# MAGIC %md * Display the feature columns graphed out against each other as a scatter plot (hint: exclude `zip`, `zipcode` and `features` using `drop`)
# COMMAND ----------
display(finalPrep.drop("zip").drop("zipcode").drop("features"))
# COMMAND ----------
# MAGIC %md * Split the `finalPrep` data set into training and testing subsets. The sets should be randomly selected, 70 percent of the samples should go into the `training` set, and 30 percent should go into the `test` set.
# MAGIC * Cache `training` and `test`.
# MAGIC * Perform an action such as `count` to populate the cache.
# COMMAND ----------
(training, test) = finalPrep.randomSplit((0.7, 0.3))
training.cache()
test.cache()
print(training.count())
print(test.count())
# COMMAND ----------
# MAGIC %md Spark MLLib supports both `regressors` and `classifiers`, in this example you will use linear regression. Once you create the `regressor` you will train it, and it will return a `Model`. The `Model` will be the object you use to make predictions.
# MAGIC
# MAGIC * Create an instance of the `LinearRegression` algorithm called `lrModel`:
# MAGIC * Set the label column to "count"
# MAGIC * Set the features column to "features"
# MAGIC * Set the "ElasticNetParam" to 0.5 (this controlls the mix of l1 and l2 regularization--we'll just use an equal amount of each)
# MAGIC * Print the results of calling `explainParams` on `lrModel`. This will show you all the possible parameters, and whether or not you have customized them.
# COMMAND ----------
from pyspark.ml.regression import LinearRegression
lrModel = LinearRegression()\
.setLabelCol("count")\
.setFeaturesCol("features")\
.setElasticNetParam(0.5)
print("Printing out the model Parameters:")
print("-"*20)
print(lrModel.explainParams())
print("-"*20)
# COMMAND ----------
# MAGIC %md
# MAGIC * Use the `fit` method on `lrModel` to provide the `training` dataset for fitting.
# MAGIC * Store the results in `lrFitted`.
# COMMAND ----------
lrFitted = lrModel.fit(training)
# COMMAND ----------
# MAGIC %md
# MAGIC * Make a prediction by using the `transform` method on `lrFitted`, passing it the `test` dataset.
# MAGIC * Store the results in `holdout`.
# MAGIC * `transform` adds a new column called "prediction" to the data we passed into it.
# MAGIC * Display the `prediction` and `count` from `holdout`
# COMMAND ----------
holdout = lrFitted.transform(test)
display(holdout.select("prediction", "count"))
# COMMAND ----------
# MAGIC %md The `transform` method shows us how many farmer's markets the `lrFitted` method predicts there will be in each zip code based on the features we provided. The raw predictions are not rounded at all.
# MAGIC
# MAGIC * Use a `selectExpr` to relabel `prediction` as `raw_prediction`.
# MAGIC * `round` the `prediction` and call it `prediction` inside the expression
# MAGIC * Select `count` for comparison purposes.
# MAGIC * Create a column called `equal` that will let us know if the model predicted correctly.
# MAGIC
# COMMAND ----------
holdout = holdout.selectExpr(\
"prediction as raw_prediction", \
"double(round(prediction)) as prediction", \
"count", \
"""CASE double(round(prediction)) = count
WHEN true then 1
ELSE 0
END as equal""")
display(holdout)
# COMMAND ----------
# MAGIC %md * Use another `selectExpr` to `display` the proportion of predictions that were exactly correct.
# COMMAND ----------
display(holdout.selectExpr("sum(equal)/sum(1)"))
# COMMAND ----------
# MAGIC %md * Use `RegressionMetrics` to get more insight into the model performance. NOTE: Regression metrics requires input formatted as tuples of `double`s where the first item is the `prediction` and the second item is the observation (in this case the observation is `count`). Once you have `map`ped these values from `holdout` you can directly pass them to the `RegressionMetrics` constructor.
# COMMAND ----------
from pyspark.mllib.evaluation import RegressionMetrics
mapped = holdout.select("prediction", "count").rdd.map(lambda x: (float(x[0]), float(x[1])))
rm = RegressionMetrics(mapped)
print ("MSE: ", rm.meanSquaredError)
print ("MAE: ", rm.meanAbsoluteError)
print ("RMSE Squared: ", rm.rootMeanSquaredError)
print ("R Squared: ", rm.r2)
print ("Explained Variance: ", rm.explainedVariance)
# COMMAND ----------
# MAGIC %md Because these results still aren't very good, rather than training a single-model, let's train several using a pipeline.
# MAGIC
# MAGIC * Use a `RandomForestRegressor` algorithm. This algorithm has several `hyperparameters` that we can tune, rather than tune them individually, we will use a `ParamGridBuilder` to search the "hyperparameter space" for us. This can take some time on small clusters, so be patient.
# MAGIC
# MAGIC * Use the `Pipeline` to feed the algorithm into a `CrossValidator` to help prevent "overfitting".
# MAGIC * Use the `CrossValidator` uses a `RegressionEvaluator` to test the model results against a metric (default is RMSE).
# MAGIC
# MAGIC * NOTE: In production, using AWS EC2 compute-optimized instance speed this up -- 3 min (c3.4xlarge) vs 10 min (r3.xlarge)
# COMMAND ----------
from pyspark.ml.regression import RandomForestRegressor
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import RegressionEvaluator
rfModel = RandomForestRegressor()\
.setLabelCol("count")\
.setFeaturesCol("features")
paramGrid = ParamGridBuilder()\
.addGrid(rfModel.maxDepth, [5, 10])\
.addGrid(rfModel.numTrees, [20, 60])\
.build()
steps = [rfModel]
pipeline = Pipeline().setStages(steps)
cv = CrossValidator()\
.setEstimator(pipeline)\
.setEstimatorParamMaps(paramGrid)\
.setEvaluator(RegressionEvaluator().setLabelCol("count"))
pipelineFitted = cv.fit(training)
# COMMAND ----------
# MAGIC %md * Access the best model on the `pipelineFitted` object by accessing the first stage of the `bestModel` attribute.
# COMMAND ----------
print("The Best Parameters:\n--------------------")
print(pipelineFitted.bestModel.stages[0])
# COMMAND ----------
# MAGIC %md * Use the `bestModel` to `transform` the `test` dataset.
# MAGIC * Use a `selectExpr` to show the raw prediction, rounded prediction, count, and whether or not the prediction exactly matched (hint: this is the same `selectExpr` you used on the previous model results).
# MAGIC * Store the results in `holdout2`, then display.
# COMMAND ----------
holdout2 = pipelineFitted.bestModel\
.transform(test)\
.selectExpr("prediction as raw_prediction", \
"double(round(prediction)) as prediction", \
"count", \
"""CASE double(round(prediction)) = count
WHEN true then 1
ELSE 0
END as equal""")
display(holdout2)
# COMMAND ----------
# MAGIC %md * Show the `RegressionMetrics` for the new model results.
# COMMAND ----------
from pyspark.mllib.evaluation import RegressionMetrics
mapped2 = holdout2.select("prediction", "count").rdd.map(lambda x: (float(x[0]), float(x[1])))
rm2 = RegressionMetrics(mapped2)
print ("MSE: ", rm2.meanSquaredError)
print ("MAE: ", rm2.meanAbsoluteError)
print ("RMSE Squared: ", rm2.rootMeanSquaredError)
print ("R Squared: ", rm2.r2)
print ("Explained Variance: ", rm2.explainedVariance)
# COMMAND ----------
# MAGIC %md * See if there an improvement in the "exactly right" proportion.
# COMMAND ----------
display(holdout2.selectExpr("sum(equal)/sum(1)"))
| 36.493333 | 400 | 0.693168 | 1,809 | 13,685 | 5.228856 | 0.260365 | 0.017761 | 0.034042 | 0.008458 | 0.127075 | 0.083518 | 0.075907 | 0.058357 | 0.05138 | 0.05138 | 0 | 0.010347 | 0.173694 | 13,685 | 374 | 401 | 36.590909 | 0.826141 | 0.677457 | 0 | 0.122449 | 0 | 0 | 0.179674 | 0.043664 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.081633 | 0 | 0.081633 | 0.193878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb043514e63c8cfdd3fa49afc65ccc94cff33698 | 581 | py | Python | gui.py | arthur-ronconi/woodpecker | ce5cf771c77706b76369bc70d007df4af16af164 | [
"MIT"
] | 1 | 2020-03-03T02:14:45.000Z | 2020-03-03T02:14:45.000Z | gui.py | arthur-ronconi/woodpecker | ce5cf771c77706b76369bc70d007df4af16af164 | [
"MIT"
] | null | null | null | gui.py | arthur-ronconi/woodpecker | ce5cf771c77706b76369bc70d007df4af16af164 | [
"MIT"
] | null | null | null | import tkinter as tk
import likert
import config
def runScreen():
window = tk.Tk()
window.geometry("300x300")
window.title("Woodpecker")
tk.Label(text="Woodpecker", height="3", font=(
"Ubuntu", 24), fg="#0280F7").pack()
btn = tk.Button
runBtn = btn(text="Run", command=run, width="25", relief="flat",
fg="white", bg="#0280F7").pack()
quitBtn = btn(text="Quit", command=window.destroy, width="25", relief="flat",
fg="white", bg="#CC3B3B").pack()
window.mainloop()
def run():
app.run()
runScreen()
| 23.24 | 81 | 0.583477 | 72 | 581 | 4.708333 | 0.555556 | 0.058997 | 0.076696 | 0.100295 | 0.153392 | 0.153392 | 0.153392 | 0 | 0 | 0 | 0 | 0.055432 | 0.223752 | 581 | 24 | 82 | 24.208333 | 0.696231 | 0 | 0 | 0 | 0 | 0 | 0.144578 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb082a506171bb7d8832a2d70f6fb1e61b3c4912 | 4,167 | py | Python | 184431/shooter_game.py | Ili-py/Shooter- | b02759496e90c1571607fb66ffb67384ece41cde | [
"CC0-1.0"
] | null | null | null | 184431/shooter_game.py | Ili-py/Shooter- | b02759496e90c1571607fb66ffb67384ece41cde | [
"CC0-1.0"
] | null | null | null | 184431/shooter_game.py | Ili-py/Shooter- | b02759496e90c1571607fb66ffb67384ece41cde | [
"CC0-1.0"
] | null | null | null |
from pygame import *
from random import randint
from time import time as timer
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y,size_x,size_y,player_speed):
super().__init__()
self.image = transform.scale(image.load(player_image), (size_x,size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image,(self.rect.x, self.rect.y))
class Player(GameSprite):
def update(self):
keys = key.get_pressed()
if keys [K_LEFT]and self.rect.x >10:
self.rect.x -= self.speed
if keys [K_RIGHT]and self.rect.x < 630:
self.rect.x += self.speed
def fire(self):
bullet = Bullet('bullet.png', self.rect.centerx, self.rect.top, 20,20,20)
bullets.add(bullet)
class Enemy(GameSprite):
def update(self):
self.rect.y += self.speed
global lost
if self.rect.y > 500:
self.rect.x = randint(0,620)
self.rect.y = 0
lost += 1
class Bullet(GameSprite):
def update(self):
self.rect.y -= self.speed
if self.rect.y < 0:
self.kill()
window = display.set_mode((700,500))
display.set_caption('Pygame Window')
background = transform.scale(image.load('fon.jpg'),(700,500))
player = Player('stard.png',500,410,150,150,10)
game = True
finish = False
clock = time.Clock()
FPS = 60
mixer.init()
mixer.music.load('space.ogg')
mixer.music.play()
lost = 0
score = 0
font.init()
font1 = font.SysFont('Arial',36)
bullets = sprite.Group()
num_fire = 0
reck_time = False
life = 5
monsters = sprite.Group()
for i in range(5):
monster = Enemy('ship2.png',randint(0,620),0,65,65,randint(1,3))
monsters.add(monster)
while game:
for e in event.get():
if e.type == QUIT:
game = False
elif e.type == KEYDOWN:
if e.key == K_SPACE:
if num_fire <= 5 and reck_time == False:
num_fire = num_fire + 1
player.fire()
elif num_fire >= 5 and reck_time == False:
last_time = timer()
reck_time = True
if finish != True:
window.blit(background,(0,0))
player.update()
player.reset()
monsters.draw(window)
monsters.update()
bullets.update()
bullets.draw(window)
if reck_time == True:
now_time = timer()
if now_time - last_time <1:
text_reload = font1.render('ПЕРЕЗАРЯДКА',1,(255,255,255))
window.blit(text_reload,(250,450))
else:
reck_time = False
num_fire = 0
text_life = font1.render('Жизни:'+ str(life), 1,(255,255,255))
window.blit(text_life,(10,10))
text_score = font1.render('Сбито:'+ str(score),1,(255,255,255))
window.blit(text_score,(10,40))
collides = sprite.groupcollide(monsters,bullets,True,True)
for coll in collides:
monster = Enemy('ship2.png',randint(0,620),0,65,65,randint(1,3))
monsters.add(monster)
score += 1
if sprite.spritecollide(player,monsters,True):
life = life - 1
monster = Enemy('ship2.png',randint(0,620),0,65,65,randint(1,3))
monsters.add(monster)
if score >=10:
finish = True
win = font1.render(' YOU WIN!!! ',1,(255,0,0))
window.blit(win,(250,250))
if life == 0:
finish = True
text_lose1 = font1.render('You Lose =(',1,(255,0,0))
window.blit(text_lose1,(250,250))
display.update()
else:
finish = False
score = 0
num_fire = 0
life = 5
for bullet in bullets:
bullet.kill()
for monster in monsters:
monster.kill()
for i in range(5):
monster = Enemy('ship2.png',randint(0,600),0,65,65,randint(1,3))
monsters.add(monster)
clock.tick(FPS) | 26.04375 | 84 | 0.557475 | 562 | 4,167 | 4.040925 | 0.233096 | 0.059886 | 0.027741 | 0.035227 | 0.250991 | 0.22325 | 0.209159 | 0.156319 | 0.156319 | 0.106121 | 0 | 0.068127 | 0.309575 | 4,167 | 160 | 85 | 26.04375 | 0.721237 | 0 | 0 | 0.221311 | 0 | 0 | 0.032397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.02459 | 0 | 0.106557 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb092df57c51b4f56349fb8e8b6983b120fe34cd | 270 | py | Python | relay.py | sankao/rpi_ventilator | c6ada37f17267f80e4c54ca8ecb4b1b7a1385151 | [
"MIT"
] | null | null | null | relay.py | sankao/rpi_ventilator | c6ada37f17267f80e4c54ca8ecb4b1b7a1385151 | [
"MIT"
] | null | null | null | relay.py | sankao/rpi_ventilator | c6ada37f17267f80e4c54ca8ecb4b1b7a1385151 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import piplates.RELAYplate as rel
relay_id = int(sys.argv[1])
is_on = int(sys.argv[2])
on_off = 'on' if is_on else 'off'
print ('relay : ',relay_id,' ',on_off)
if is_on:
rel.relayON(0, relay_id)
else:
rel.relayOFF(0, relay_id)
| 19.285714 | 38 | 0.681481 | 51 | 270 | 3.431373 | 0.490196 | 0.16 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017621 | 0.159259 | 270 | 13 | 39 | 20.769231 | 0.753304 | 0.074074 | 0 | 0 | 0 | 0 | 0.05668 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb0a9eba4249d2b3fac8d37e5e649d54503dcf16 | 2,068 | py | Python | effunet/unet.py | pranshu97/effunet | 71dfae6a7a2a9558b08389ea8025e9477e18b32a | [
"MIT"
] | 19 | 2020-12-17T17:01:55.000Z | 2022-02-08T10:10:05.000Z | effunet/unet.py | pranshu97/effunet | 71dfae6a7a2a9558b08389ea8025e9477e18b32a | [
"MIT"
] | 2 | 2020-12-24T14:02:52.000Z | 2021-05-27T06:35:50.000Z | effunet/unet.py | pranshu97/effunet | 71dfae6a7a2a9558b08389ea8025e9477e18b32a | [
"MIT"
] | 3 | 2021-01-19T06:26:24.000Z | 2021-12-06T11:33:35.000Z | import torch
import torch.nn as nn
import torchvision.transforms as T
def double_conv(in_,out_,drop):
conv = nn.Sequential(
nn.Conv2d(in_,out_,kernel_size=3,padding=(1,1)),
nn.ReLU(inplace=True),
nn.Conv2d(out_,out_,kernel_size=3,padding=(1,1)),
nn.ReLU(inplace=True),
nn.Dropout(drop)
)
return conv
def crop(tensor,target_tensor):
target_shape = target_tensor.shape[2]
return T.CenterCrop(target_shape)(tensor)
class UNet(nn.Module):
def __init__(self,dropout=0.1):
super(UNet,self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.enc_conv_1 = double_conv(1,64,dropout)
self.enc_conv_2= double_conv(64,128,dropout)
self.enc_conv_3 = double_conv(128,256,dropout)
self.enc_conv_4 = double_conv(256,512,dropout)
self.enc_conv_5 = double_conv(512,1024,dropout)
self.up_trans_1 = nn.ConvTranspose2d(1024,512,kernel_size=2,stride=2)
self.dec_conv_1 = double_conv(1024,512,dropout)
self.up_trans_2 = nn.ConvTranspose2d(512,256,kernel_size=2,stride=2)
self.dec_conv_2 = double_conv(512,256,dropout)
self.up_trans_3 = nn.ConvTranspose2d(256,128,kernel_size=2,stride=2)
self.dec_conv_3 = double_conv(256,128,dropout)
self.up_trans_4 = nn.ConvTranspose2d(128,64,kernel_size=2,stride=2)
self.dec_conv_4 = double_conv(128,64,dropout)
self.out = nn.Conv2d(64,2,kernel_size=1)
def forward(self, image):
# Encoder
x1 = self.enc_conv_1(image)
x = self.pool(x1)
x2 = self.enc_conv_2(x)
x = self.pool(x2)
x3 = self.enc_conv_3(x)
x = self.pool(x3)
x4 = self.enc_conv_4(x)
x = self.pool(x4)
x = self.enc_conv_5(x)
#Decoder
x = self.up_trans_1(x)
x = self.dec_conv_1(torch.cat([x,crop(x4,x)],axis=1))
x = self.up_trans_2(x)
x = self.dec_conv_2(torch.cat([x,crop(x3,x)],axis=1))
x = self.up_trans_3(x)
x = self.dec_conv_3(torch.cat([x,crop(x2,x)],axis=1))
x = self.up_trans_4(x)
x = self.dec_conv_4(torch.cat([x,crop(x1,x)],axis=1))
#out
x = self.out(x)
return x
# image = torch.rand((1,1,576,576))
# model = UNet()
# out = model(image)
# print(out.shape)
| 26.512821 | 71 | 0.707447 | 386 | 2,068 | 3.562176 | 0.176166 | 0.050909 | 0.08 | 0.061818 | 0.238545 | 0.200727 | 0.184727 | 0.145455 | 0.061091 | 0.061091 | 0 | 0.086908 | 0.132012 | 2,068 | 77 | 72 | 26.857143 | 0.679109 | 0.049323 | 0 | 0.037736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.056604 | 0 | 0.207547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb0b10b2dccdcb7ffb9d10aeb78f7e52bca58281 | 2,141 | py | Python | data/depth_dataset.py | sherry0037/pytorch-CycleGAN-and-pix2pix | 4e4c09070183c8ddf74cc0c3d499d0b2e9171bfd | [
"BSD-3-Clause"
] | 1 | 2018-12-14T04:50:05.000Z | 2018-12-14T04:50:05.000Z | data/depth_dataset.py | sherry0037/pytorch-CycleGAN-and-pix2pix | 4e4c09070183c8ddf74cc0c3d499d0b2e9171bfd | [
"BSD-3-Clause"
] | null | null | null | data/depth_dataset.py | sherry0037/pytorch-CycleGAN-and-pix2pix | 4e4c09070183c8ddf74cc0c3d499d0b2e9171bfd | [
"BSD-3-Clause"
] | 1 | 2018-12-14T04:50:11.000Z | 2018-12-14T04:50:11.000Z | ### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import os.path
from data.base_dataset import BaseDataset, get_transform
import h5py
import numpy as np
import torch
max_depth = np.inf
class DepthDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.data_dir = os.path.join(opt.dataroot, 'train')
# self.A_paths = sorted(make_dataset(self.dir_A))
self.data_paths = []
self.dataset_size = 0
assert os.path.isdir(self.data_dir), '%s is not a valid directory' % self.data_dir
for root, _, fnames in sorted(os.walk(self.data_dir)):
for fname in fnames:
path = os.path.join(root, fname)
self.data_paths.append(path)
self.dataset_size += 1
self.transform = get_transform(opt)
def __getitem__(self, index):
data_path = self.data_paths[index]
try:
h5f = h5py.File(data_path, "r")
except OSError:
return dict()
rgb = np.array(h5f['rgb'])
depth = np.array(h5f['depth'])
depth = np.dstack((depth, depth, depth))
"""if self.opt.sparse:
rgbd = self.create_sparse_depth(rgb, depth)
rgbd = np.transpose(rgbd, (2, 0, 1))
#print("rgbd", rgbd.shape)
rgbd = torch.tensor(rgbd, dtype=torch.float)
"""
rgb = torch.tensor(rgb, dtype=torch.float)
depth = np.transpose(depth, (2, 0, 1)) # chanel first
depth = torch.tensor(depth, dtype=torch.float)
rgb = self.transform(rgb)
depth = self.transform(depth)
input_dict = {'A': rgb, 'B': depth,
'A_paths': data_path, 'B_paths': data_path}
"""if self.opt.sparse:
input_dict['A'] = rgbd
"""
return input_dict
def __len__(self):
return len(self.data_paths) // self.opt.batch_size * self.opt.batch_size
def name(self):
return 'DepthDataset'
| 33.453125 | 109 | 0.590845 | 283 | 2,141 | 4.332155 | 0.360424 | 0.052202 | 0.035889 | 0.011419 | 0.013051 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013699 | 0.283979 | 2,141 | 63 | 110 | 33.984127 | 0.78604 | 0.106025 | 0 | 0 | 0 | 0 | 0.043179 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0.097561 | false | 0 | 0.121951 | 0.04878 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb0b99f98836cd0103f82d5c2f3f99fe0ad5363b | 574 | py | Python | Python/problem0246.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0246.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0246.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | class Solution:
def isStrobogrammatic(self, num: str) -> bool:
if len(num) == 1:
return num == '0' or num == '8' or num == '1'
if num[-1] == '0':
return False
d = {'0':'0', '6':'9', '8':'8', '9':'6', '1':'1'}
tmp = list(num)
for k in range(len(tmp)):
try:
tmp[k] = d[tmp[k]]
except:
return False
tmp.reverse()
return num == ''.join(tmp)
solu = Solution()
num = '69'
num = '88'
num = '962'
num = '1'
print(solu.isStrobogrammatic(num)) | 26.090909 | 57 | 0.43554 | 76 | 574 | 3.289474 | 0.447368 | 0.064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066298 | 0.369338 | 574 | 22 | 58 | 26.090909 | 0.624309 | 0 | 0 | 0.095238 | 0 | 0 | 0.038261 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.285714 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb0d53bcb4642526db0d5a1a191124b5d53404d0 | 9,741 | py | Python | sdl2/rwops.py | papagiannakis/py-sdl2 | c8ff267761ce19d7714e72a4a3eb97a375c06fc6 | [
"CC0-1.0"
] | 1 | 2021-09-11T20:54:14.000Z | 2021-09-11T20:54:14.000Z | sdl2/rwops.py | Sahil-pixel/py-sdl2 | e5c8cbaccfda4f20f35f58bc8d00e0f533b30c3b | [
"CC0-1.0"
] | null | null | null | sdl2/rwops.py | Sahil-pixel/py-sdl2 | e5c8cbaccfda4f20f35f58bc8d00e0f533b30c3b | [
"CC0-1.0"
] | null | null | null | import io
import sys
from ctypes import Structure, POINTER, CFUNCTYPE, c_int, c_size_t, c_void_p, \
c_char_p, memmove, string_at, Union, _Pointer
from .dll import _bind, version
from .stdinc import Sint64, Uint8, Uint16, Uint32, Uint64, SDL_bool
__all__ = ["SDL_RWOPS_UNKNOWN", "SDL_RWOPS_WINFILE", "SDL_RWOPS_STDFILE",
"SDL_RWOPS_JNIFILE", "SDL_RWOPS_MEMORY", "SDL_RWOPS_MEMORY_RO",
"SDL_RWops", "SDL_RWFromFile", "SDL_RWFromFP", "SDL_RWFromMem",
"SDL_RWFromConstMem", "SDL_AllocRW", "SDL_FreeRW", "RW_SEEK_SET",
"RW_SEEK_CUR", "RW_SEEK_END", "SDL_RWsize", "SDL_RWseek",
"SDL_RWtell", "SDL_RWread", "SDL_RWwrite", "SDL_RWclose",
"SDL_ReadU8", "SDL_ReadLE16", "SDL_ReadBE16", "SDL_ReadLE32",
"SDL_ReadBE32", "SDL_ReadLE64", "SDL_ReadBE64", "SDL_WriteU8",
"SDL_WriteLE16", "SDL_WriteBE16", "SDL_WriteLE32", "SDL_WriteBE32",
"SDL_WriteLE64", "SDL_WriteBE64", "rw_from_object",
"SDL_LoadFile_RW", "SDL_LoadFile"
]
SDL_RWOPS_UNKNOWN = 0
SDL_RWOPS_WINFILE = 1
SDL_RWOPS_STDFILE = 2
SDL_RWOPS_JNIFILE = 3
SDL_RWOPS_MEMORY = 4
SDL_RWOPS_MEMORY_RO = 5
class SDL_RWops(Structure):
pass
class _hidden(Union):
pass
_sdlsize = CFUNCTYPE(Sint64, POINTER(SDL_RWops))
_sdlseek = CFUNCTYPE(Sint64, POINTER(SDL_RWops), Sint64, c_int)
_sdlread = CFUNCTYPE(c_size_t, POINTER(SDL_RWops), c_void_p, c_size_t, c_size_t)
_sdlwrite = CFUNCTYPE(c_size_t, POINTER(SDL_RWops), c_void_p, c_size_t, c_size_t)
_sdlclose = CFUNCTYPE(c_int, POINTER(SDL_RWops))
SDL_RWops._fields_ = [("size", _sdlsize),
("seek", _sdlseek),
("read", _sdlread),
("write", _sdlwrite),
("close", _sdlclose),
("type", Uint32),
("hidden", _hidden)
]
SDL_RWFromFile = _bind("SDL_RWFromFile", [c_char_p, c_char_p], POINTER(SDL_RWops))
SDL_RWFromFP = _bind("SDL_RWFromFP", [c_void_p, SDL_bool], POINTER(SDL_RWops))
SDL_RWFromMem = _bind("SDL_RWFromMem", [c_void_p, c_int], POINTER(SDL_RWops))
SDL_RWFromConstMem = _bind("SDL_RWFromConstMem", [c_void_p, c_int], POINTER(SDL_RWops))
SDL_AllocRW = _bind("SDL_AllocRW", None, POINTER(SDL_RWops))
SDL_FreeRW = _bind("SDL_FreeRW", [POINTER(SDL_RWops)])
SDL_LoadFile_RW = _bind("SDL_LoadFile_RW", [POINTER(SDL_RWops), POINTER(c_size_t), c_int], c_void_p, added='2.0.6')
# SDL_LoadFile was a macro in SDL <= 2.0.9, added as a function in 2.0.10
if version >= 2010:
SDL_LoadFile = _bind("SDL_LoadFile", [c_char_p, c_size_t], c_void_p)
else:
SDL_LoadFile = lambda fname, ds: SDL_LoadFile_RW(SDL_RWFromFile(fname, "rb"), ds, 1)
RW_SEEK_SET = 0
RW_SEEK_CUR = 1
RW_SEEK_END = 2
def _ptr2obj(ptr):
"""If a pointer, returns its contents. Otherwise, returns the passed object.
"""
if isinstance(ptr, _Pointer):
return ptr.contents
return ptr
# The following set of functions were macros in SDL <= 2.0.9 but became full
# functions in SDL 2.0.10. Lambda functions are to mimic macro behaviour with
# earlier SDL2 versions.
if version >= 2010:
SDL_RWsize = _bind("SDL_RWsize", [POINTER(SDL_RWops)], Sint64)
SDL_RWseek = _bind("SDL_RWseek", [POINTER(SDL_RWops), Sint64, c_int], Sint64)
SDL_RWtell = _bind("SDL_RWtell", [POINTER(SDL_RWops)], Sint64)
SDL_RWread = _bind("SDL_RWread", [POINTER(SDL_RWops), c_void_p, c_size_t, c_size_t], c_size_t)
SDL_RWwrite = _bind("SDL_RWwrite", [POINTER(SDL_RWops), c_void_p, c_size_t, c_size_t], c_size_t)
SDL_RWclose = _bind("SDL_RWclose", [POINTER(SDL_RWops)], c_int)
else:
_p = _ptr2obj # allow pointers to be passed directly to these functions
SDL_RWsize = lambda ctx: _p(ctx).size(_p(ctx))
SDL_RWseek = lambda ctx, offset, whence: _p(ctx).seek(_p(ctx), offset, whence)
SDL_RWtell = lambda ctx: _p(ctx).seek(_p(ctx), 0, RW_SEEK_CUR)
SDL_RWread = lambda ctx, ptr, size, n: _p(ctx).read(_p(ctx), ptr, size, n)
SDL_RWwrite = lambda ctx, ptr, size, n: _p(ctx).write(_p(ctx), ptr, size, n)
SDL_RWclose = lambda ctx: _p(ctx).close(_p(ctx))
SDL_ReadU8 = _bind("SDL_ReadU8", [POINTER(SDL_RWops)], Uint8)
SDL_ReadLE16 = _bind("SDL_ReadLE16", [POINTER(SDL_RWops)], Uint16)
SDL_ReadBE16 = _bind("SDL_ReadBE16", [POINTER(SDL_RWops)], Uint16)
SDL_ReadLE32 = _bind("SDL_ReadLE32", [POINTER(SDL_RWops)], Uint32)
SDL_ReadBE32 = _bind("SDL_ReadBE32", [POINTER(SDL_RWops)], Uint32)
SDL_ReadLE64 = _bind("SDL_ReadLE64", [POINTER(SDL_RWops)], Uint64)
SDL_ReadBE64 = _bind("SDL_ReadBE64", [POINTER(SDL_RWops)], Uint64)
SDL_WriteU8 = _bind("SDL_WriteU8", [POINTER(SDL_RWops), Uint8], c_size_t)
SDL_WriteLE16 = _bind("SDL_WriteLE16", [POINTER(SDL_RWops), Uint16], c_size_t)
SDL_WriteBE16 = _bind("SDL_WriteBE16", [POINTER(SDL_RWops), Uint16], c_size_t)
SDL_WriteLE32 = _bind("SDL_WriteLE32", [POINTER(SDL_RWops), Uint32], c_size_t)
SDL_WriteBE32 = _bind("SDL_WriteBE32", [POINTER(SDL_RWops), Uint32], c_size_t)
SDL_WriteLE64 = _bind("SDL_WriteLE64", [POINTER(SDL_RWops), Uint64], c_size_t)
SDL_WriteBE64 = _bind("SDL_WriteBE64", [POINTER(SDL_RWops), Uint64], c_size_t)
if sys.version_info[0] >= 3:
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
callable = lambda x: isinstance(x, Callable)
def rw_from_object(obj):
"""Creats a SDL_RWops from any Python object.
The Python object must at least support the following methods:
read(length) -> data
length is the size in bytes to be read. A call to len(data) must
return the correct amount of bytes for the data, so that
len(data) / [size in bytes for a single element from data] returns
the amount of elements.
Must raise an error on failure.
seek(offset, whence) -> int
offset denotes the offset to move the read/write pointer of the
object to. whence indicates the movement behaviour and can be one
of the following values:
RW_SEEK_SET - move to offset from the start of the file
RW_SEEK_CUR - move by offset from the relative location
RW_SEEK_END - move to offset from the end of the file
If it could not move read/write pointer to the desired location,
an error must be raised.
tell() -> int
Must return the current offset. This method must only be
provided, if seek() does not return any value.
close() -> None
Closes the object(or its internal data access methods). Must raise
an error on failure.
write(data) -> None
Writes the passed data(which is a string of bytes) to the object.
Must raise an error on failure.
Note: The write() method is optional and only necessary, if the passed
object should be able to write data.
The returned SDL_RWops is a pure Python object and must not be freed via
free_rw().
"""
if not hasattr(obj, "read"):
raise TypeError("obj must have a read(len) -> data method")
if not hasattr(obj, "seek") or not callable(obj.seek):
raise TypeError("obj must have a seek(offset, whence) method")
if not hasattr(obj, "close") or not callable(obj.close):
raise TypeError("obj must have a close() -> int method")
rwops = SDL_RWops()
def _rwsize(context):
try:
if hasattr(obj, "size"):
if callable(obj.size):
return obj.size()
else:
return obj.size
else:
cur = obj.seek(0, RW_SEEK_CUR)
length = obj.seek(0, RW_SEEK_END)
obj.seek(cur, RW_SEEK_CUR)
return length
except Exception:
#print(e)
return -1
rwops.size = _sdlsize(_rwsize)
def _rwseek(context, offset, whence):
try:
retval = obj.seek(offset, whence)
if retval is None:
retval = obj.tell()
return retval
except Exception:
#print(e)
return -1
rwops.seek = _sdlseek(_rwseek)
def _rwread(context, ptr, size, maxnum):
try:
data = obj.read(size * maxnum)
num = len(data)
memmove(ptr, data, num)
return num // size
except Exception:
#print(e)
return 0
rwops.read = _sdlread(_rwread)
def _rwclose(context):
try:
retval = obj.close()
if retval is None:
# No return value; we assume that everything is okay.
return 0
return retval
except Exception:
#print(e)
return -1
rwops.close = _sdlclose(_rwclose)
def _rwwrite(context, ptr, size, num):
try:
# string_at feels wrong, since we access a raw byte buffer...
retval = obj.write(string_at(ptr, size * num))
if issubclass(type(obj), io.IOBase):
if retval is None: # Means write error
return 0
return retval // size
# If not an io object, try to interpret retval as bytes written
# and, failing that, just assume success if no exception raised
# and return num
try:
return int(retval) // size
except TypeError:
return num
except Exception:
#print(e)
return 0
if hasattr(obj, "write") and callable(obj.write):
rwops.write = _sdlwrite(_rwwrite)
else:
rwops.write = _sdlwrite()
return rwops
| 40.757322 | 115 | 0.63474 | 1,347 | 9,741 | 4.323682 | 0.185598 | 0.068681 | 0.082418 | 0.010817 | 0.222527 | 0.15625 | 0.098729 | 0.07658 | 0.055975 | 0.030907 | 0 | 0.024851 | 0.260548 | 9,741 | 238 | 116 | 40.928571 | 0.783701 | 0.229237 | 0 | 0.203704 | 0 | 0 | 0.138025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04321 | false | 0.012346 | 0.049383 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb0d9c6932d743aeed7dc6fe06e706d6e456a86b | 3,180 | py | Python | MEPDesign.tab/Split Pipes.panel/SplitPipes.pushbutton/script.py | andrerdsilva/MEPDesign | 37227bed68f50fda83fb925c338b599e190d17ce | [
"Apache-2.0"
] | 6 | 2021-02-03T04:36:40.000Z | 2022-02-18T04:02:30.000Z | MEPDesign.tab/Split Pipes.panel/SplitPipes.pushbutton/script.py | andrerdsilva/MEPDesign | 37227bed68f50fda83fb925c338b599e190d17ce | [
"Apache-2.0"
] | 3 | 2021-02-02T21:24:22.000Z | 2022-03-09T00:16:50.000Z | MEPDesign.tab/Split Pipes.panel/SplitPipes.pushbutton/script.py | andrerdsilva/MEPDesign | 37227bed68f50fda83fb925c338b599e190d17ce | [
"Apache-2.0"
] | 3 | 2021-01-25T15:06:40.000Z | 2022-03-09T00:12:50.000Z | # -*- coding: utf-8 -*-
""" Split ALL Pipes by PipeType and distance."""
__title__ = 'Split Pipes'
__author__ = 'André Rodrigues da Silva'
from rpw import revit, db
from rpw.ui.forms import (FlexForm, Label, ComboBox, TextBox, TextBox,Separator, Button, CheckBox)
from Autodesk.Revit.DB import Transaction
from Autodesk.Revit.DB.Plumbing.PlumbingUtils import BreakCurve
from rpw.db.xyz import XYZ
try:
Tubos = db.Collector(of_category='OST_PipeCurves',is_not_type=True)
TipoTubo = db.Collector(of_category='OST_PipeCurves',is_type=True)
PipeTypes = []
for i in range(0,len(Tubos)):
PipeTypes.append(Tubos[i].Name)
PipeTypes = list(dict.fromkeys(PipeTypes))
PipeTypes = dict(zip(PipeTypes, PipeTypes))
components = [Label('Select the pipe type:'),
ComboBox('PipeType', PipeTypes),
Label('Distance:'),
TextBox('distance', Text="3.0"),
Label('Parameters separated by ",":'),
TextBox('parameters', Text=""),
Separator(),
Button('Process')]
form = FlexForm('Split Pipes', components)
form.show()
TuboMaterial = form.values['PipeType']
L = float(form.values['distance'])*3.28084
P = form.values['parameters']
P = P.split(",")
TuboSelecionado = []
for i in range(0,len(Tubos)):
if(Tubos[i].Name == TuboMaterial):
TuboSelecionado.append(Tubos[i])
#Tubos com comprimento maior que o selecionado
TuboSecionado3 = []
for i in range(0,len(TuboSelecionado)):
if(TuboSelecionado[i].Location.Curve.Length > L):
TuboSecionado3.append(TuboSelecionado[i])
points = []
pointsAUX = []
for i in range(0,len(TuboSecionado3)):
if((TuboSecionado3[i].Location.Curve.Length/L)>int(TuboSecionado3[i].Location.Curve.Length/L)):
t = int(TuboSecionado3[i].Location.Curve.Length/L) + 1
else:
t = int(TuboSecionado3[i].Location.Curve.Length/L)
d = L *TuboSecionado3[i].Location.Curve.Direction
for n in range(0,t):
if(n ==0):
continue
else:
pointsAUX.append(TuboSecionado3[i].Location.Curve.GetEndPoint(0) + n*d)
points.append(pointsAUX)
pointsAUX = []
pipes = TuboSecionado3
# Typical Transaction in Revit Python Shell / pyRevit
doc = __revit__.ActiveUIDocument.Document
transaction = Transaction(doc, 'Delete Object')
transaction.Start()
try:
for t in range(0,len(pipes),1):
for i in range(0,len(points[t]),1):
dbPoint = points[t][i]
pipe = pipes[t]
newPipeId = BreakCurve(doc, pipe.Id, dbPoint)
newPipe = doc.GetElement(newPipeId)
if(P[0]!=''):
for z in range(0,len(P)):
newPipe.LookupParameter(P[z]).Set(str(pipe.LookupParameter(P[z]).AsString()))
newPipeConnectors = newPipe.ConnectorManager.Connectors
connA = None
connB = None
for c in pipe.ConnectorManager.Connectors:
pc = c.Origin
nearest = [x for x in newPipeConnectors if pc.DistanceTo(x.Origin) < 0.01]
if nearest:
connA = c
connB = nearest[0]
takeoff = doc.Create.NewUnionFitting(connA, connB)
if(P[0]!=''):
for z in range(0,len(P)):
takeoff.LookupParameter(P[z]).Set(str(pipe.LookupParameter(P[z]).AsString()))
except:
transaction.RollBack()
else:
transaction.Commit()
except:
pass | 28.909091 | 98 | 0.681447 | 416 | 3,180 | 5.163462 | 0.338942 | 0.02933 | 0.03352 | 0.040968 | 0.220205 | 0.210428 | 0.173184 | 0.103352 | 0.067039 | 0.067039 | 0 | 0.01479 | 0.170755 | 3,180 | 110 | 99 | 28.909091 | 0.799772 | 0.050629 | 0 | 0.176471 | 0 | 0 | 0.06908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.011765 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb0ef5b3e0c491167accd9cba8f4ce705028fed3 | 956 | py | Python | test/units/sinks/test_ppjson.py | larsborn/refinery | c8b19156b17e5fa5de5c72bc668a14d646584560 | [
"BSD-3-Clause"
] | null | null | null | test/units/sinks/test_ppjson.py | larsborn/refinery | c8b19156b17e5fa5de5c72bc668a14d646584560 | [
"BSD-3-Clause"
] | null | null | null | test/units/sinks/test_ppjson.py | larsborn/refinery | c8b19156b17e5fa5de5c72bc668a14d646584560 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from .. import TestUnitBase
class TestJSON(TestUnitBase):
def test_trailing_commas(self):
unit = self.load()
test = json.loads(unit(B'{"foo": 9, "bar": [1,2,3,], "baz": 11,}'))
self.assertEqual(test, {'foo': 9, 'bar': [1, 2, 3], 'baz': 11})
def test_leave_string_literals_unchanged(self):
unit = self.load()
test = json.loads(unit(
BR'''{
"[key,]": 9,
"{\"foo\": 7, \"bar\":6,}": 10
}
'''
))
self.assertIn('[key,]', test)
self.assertIn('{"foo": 7, "bar":6,}', test)
def test_minify_json(self):
unit = self.load(indent=0)
data = {"A": [1, 2, 3], "B": {"C": "Yes", "D": "No"}}
test = unit(json.dumps(data, indent=4).encode(unit.codec))
self.assertEqual(len(test), 38)
self.assertEqual(json.loads(test), data)
| 28.969697 | 75 | 0.498954 | 121 | 956 | 3.876033 | 0.46281 | 0.044776 | 0.076759 | 0.102345 | 0.204691 | 0.204691 | 0.204691 | 0.204691 | 0 | 0 | 0 | 0.041116 | 0.287657 | 956 | 32 | 76 | 29.875 | 0.647577 | 0.044979 | 0 | 0.1 | 0 | 0 | 0.10362 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb12e3fbb03aef3968221deca8a6c8059bebf905 | 26,983 | py | Python | itsh5py/hdf_support.py | max3-2/itsh5py | 6100cb181951202552d60093a9db070c8d824674 | [
"MIT"
] | null | null | null | itsh5py/hdf_support.py | max3-2/itsh5py | 6100cb181951202552d60093a9db070c8d824674 | [
"MIT"
] | null | null | null | itsh5py/hdf_support.py | max3-2/itsh5py | 6100cb181951202552d60093a9db070c8d824674 | [
"MIT"
] | null | null | null | """
Functions to handle h5 save and load with all types present in python.
Currently, deepdish is still used due to dependecy issues with old files,
however it will be deprecated in future releases
"""
import os
import platform
from pathlib import Path, PureWindowsPath
from collections import UserDict
from datetime import datetime
import h5py
import numpy as np
import pandas as pd
import yaml
from logging import getLogger
from .queue_handler import add_open_file, is_open, remove_from_queue
from . import config
logger = getLogger(__package__)
TYPEID = '_TYPE_'
def _tree(hdf, levels=[], max_depth=None, buffer=None, printout=True):
"""
Displays the hdf tree for lazy dicts.
This function displays a representation of the hdf file tree without
loading the actual datasets. Basic information is printed.
"""
if max_depth and len(levels) > max_depth:
return
markers = ''.join(' ' if last else '│ ' for last in levels[:-1])
markers += '' if not levels else '└─ ' if levels[-1] else '├─ '
if buffer is None:
buffer = ''
if isinstance(hdf, h5py.File):
msg = f'{markers}{os.path.basename(hdf.filename)}'
if printout:
print(msg)
buffer += msg + '\n'
children = hdf.keys()
last = len(children) - 1
for (index, child) in enumerate(children):
buffer = _tree(
hdf[child], levels + [index == last], max_depth, buffer=buffer,
printout=printout)
elif isinstance(hdf, h5py.Group):
msg = f'{markers}Group {hdf.name}'
if printout:
print(msg)
buffer += msg + '\n'
children = hdf.keys()
last = len(children) - 1
for (index, child) in enumerate(children):
buffer = _tree(
hdf[child], levels + [index == last], max_depth, buffer=buffer,
printout=printout)
elif isinstance(hdf, h5py.Dataset):
if hdf.ndim == 0 and TYPEID not in hdf.attrs:
msg = f'{markers}{hdf.name}::{hdf[()]}'
else:
msg = f'{markers}{hdf.name}::{hdf.shape}'
if TYPEID in hdf.attrs:
msg += f' (py-type: {hdf.attrs[TYPEID]})'
if printout:
print(msg)
buffer += msg + '\n'
else:
...
return buffer
class LazyHdfDict(UserDict):
"""
Helps loading data only if values from the dict are requested. This is
done by reimplementing the __getitem__ method from dict. Other convenience
functions are added to work with the hdf files as backend.
Parameters
------------
_h5file: 'h5py.File', optional
h5py File object or None
group: `str`, optional
Group to anchor the LazyHdfDict into.
args, kwargs:
Passed to the parent `UserDcit` implemented type.
"""
def __init__(self, _h5file=None, group='/', *args, **kwargs):
super().__init__(*args, **kwargs)
self._h5file = None
self._h5filename = None
self.h5file = _h5file
self.group = group
def __str__(self):
return self.__repr__()
def __repr__(self):
buffer = _tree(self.h5file, printout=False)
return buffer
@property
def h5file(self):
"""File handle of the `h5py.File()` object behind the `LazyHdfDict`."""
return self._h5file
@h5file.setter
def h5file(self, handle):
if handle is not None:
if not isinstance(handle, (h5py.File, h5py.Dataset)):
raise TypeError('Invalid h5file handle type')
self._h5file = handle
self._h5filename = handle.filename
logger.debug(f'Added handle and file to LazyDict: {handle}::{handle.filename}')
@property
def group(self):
"""Root group of the `LazyHdfDict`."""
return self._group
@group.setter
def group(self, group):
if isinstance(group, str):
if group.startswith('/'):
self._group = group
return
logger.warning('Cant set group, must be a string that starts with a /')
def __getitem__(self, key):
"""
Returns item and loads dataset if needed. Emergency fallback when
accessing a closed file (e.g. when using long file lists preloaded)
is included."""
if not self.h5file:
# Check if this was unwrapped anyway...catching tuples etc.
item = super().__getitem__(key)
if not isinstance(item, h5py.Dataset):
return item
if config.allow_fallback_open:
logger.debug(f'File {self._h5filename} was already closed, reopening...')
self.h5file = h5py.File(self._h5filename, 'r')
sub = self.h5file
if self._group != '/':
for level in [g for g in self._group.split('/') if g != '']:
logger.debug(f'Access to subgroup iter.: {level}')
sub = sub[level]
item = unpack_dataset(sub[key])
else:
item = unpack_dataset(self.h5file[key])
self.h5file.close()
else:
logger.error('Cant access data in closed file which is not '
'unwrapped.')
return None
else:
item = super().__getitem__(key)
if isinstance(item, h5py.Dataset):
try:
item = unpack_dataset(item)
self.__setitem__(key, item)
except ValueError:
logger.exception(f'Error reading {key} from {self.group} in {self.h5file}')
return item
def unlazy(self):
"""Unpacks all datasets and closes the Lazy reference
"""
unlazied = dict(self)
self.close()
return unlazied
def close(self):
"""Closes the h5file if provided at initialization.
Unpackig will keep on working using the fallback routine if enabled.
"""
if self._h5file is not None: # set
if self._h5file: # ...and open
if self._group == '/': # Only if this is a root file...
remove_from_queue(self._h5file.filename)
def __del__(self):
try:
self.close()
except ImportError: # this can happen on ipython crtl+D
...
def _ipython_key_completions_(self):
"""Returns a tuple of keys.
Special Method for ipython to get key completion support.
"""
return tuple(self.keys())
def unpack_dataset(item):
"""Reconstruct a hdfdict dataset.
This holds all special **unpacking** procedures for types not natively
supported by `h5py`.
Parameters
----------
item: `h5py.Dataset`
The dataset to unpack
Returns
-------
value:
Unpacked Data
"""
if TYPEID in item.attrs:
if item.attrs[TYPEID] == 'datetime':
value = item[()]
if hasattr(value, '__iter__'):
value = [datetime.fromtimestamp(
ts) for ts in value]
else:
value = datetime.fromtimestamp(value)
elif item.attrs[TYPEID] == 'yaml':
value = item[()]
try:
value = yaml.safe_load(value.decode())
except AttributeError: # already decoded string
value = yaml.safe_load(value)
elif item.attrs[TYPEID] == 'tuple':
value = 0
elif item.attrs[TYPEID] == 'list_str':
try:
value = [it.decode() for it in item[()]]
except UnicodeDecodeError:
try:
value = [it.decode('latin-1') for it in item[()]]
except UnicodeDecodeError:
logger.exception(f'Cant decode bytes in {item.name}')
value = None
elif item.attrs[TYPEID] == 'strArray':
logger.warning('The strArray typeID is deprecated!')
value = item[()]
try:
value = yaml.safe_load(value.decode())
except AttributeError: # already decoded string
value = yaml.safe_load(value)
value = np.array(value)
elif item.attrs[TYPEID] == 'str_array':
value = item[()]
init_shape = value.shape
try:
value = np.array(
[v.decode() for v in value.ravel()]).reshape(init_shape)
except UnicodeDecodeError:
try:
value = np.array(
[v.decode() for v in value.ravel()]).reshape(init_shape)
except UnicodeDecodeError:
logger.exception(f'Cant decode bytes in {item.name}')
value = None
elif item.attrs[TYPEID] == 'list_arr':
value = list(item[()])
elif item.attrs[TYPEID] == 'path':
value = Path(item[()].decode())
else:
raise RuntimeError('Invalid TYPEID in h5 database')
else:
value = item[()]
if isinstance(value, bytes):
# This is most likely a str...trying to decode that right away
try:
value = item.asstr()[()]
except Exception as e:
logger.warning(f'Converting bytes to str failed: {e}')
value = item[()]
return value
def load(hdf, unpack_attrs=False, unpacker=unpack_dataset):
"""Returns a dictionary containing the groups as keys and the datasets as
values from given hdf file.
Parameters
----------
hdf: `string, Path`
Path to hdf file.
unpack_attrs : `bool`, optional
If True attrs from h5 file will be unpacked and are available as dict
key attrs, no matter if lazy or not. Defaults to False.
unpacker : `callable`
Unpack function gets `value` of type h5py.Dataset.
Must return the data you would like to have it in the returned dict.
Returns
-------
result : `dict`, `LazyHdfDict`
The dictionary containing all groupnames as keys and datasets as
values. Can be lazy and thus not unwrapped.
"""
lazy = config.use_lazy
def _recurse_iter_data(value, is_tuple=False):
dl = list()
for _, v in value.items():
# Tuples wont work lazy so we have to unpack them right
# away, anything else is way to complicated
if TYPEID in v.attrs:
if v.attrs[TYPEID] == 'tuple':
dl.append(_recurse_iter_data(v, True))
elif v.attrs[TYPEID] == 'list':
dl.append(_recurse_iter_data(v))
elif v.attrs[TYPEID] == 'path_list' or v.attrs[TYPEID] == 'path_tuple':
dl.append(_recurse_iter_data(v))
else:
dl.append(unpacker(v))
else:
dl.append(unpacker(v))
if is_tuple:
dl = tuple(dl)
return dl
def _recurse(hdfobject, datadict):
for key, value in hdfobject.items():
if 'pandas_type' in value.attrs:
# This is a dataframe or a series...might be in subgroup
if isinstance(hdfobject, h5py.File):
datadict[key] = pd.read_hdf(hdfobject.filename, key)
else:
datadict[key] = pd.read_hdf(hdfobject.file.filename,
f'{hdfobject.name}/{key}')
else:
if TYPEID in value.attrs:
if value.attrs[TYPEID] == 'tuple':
datadict[key] = _recurse_iter_data(value, True)
elif value.attrs[TYPEID] == 'list':
datadict[key] = _recurse_iter_data(value)
elif value.attrs[TYPEID] == 'path_list' or value.attrs[TYPEID] == 'path_tuple':
datadict[key] = _recurse_iter_data(value, 'tuple' in value.attrs[TYPEID])
else:
if lazy:
datadict[key] = value
else:
datadict[key] = unpacker(value)
elif isinstance(value, h5py.Group) or isinstance(value, LazyHdfDict):
if lazy:
datadict[key] = LazyHdfDict()
if isinstance(value, h5py.Group):
logger.debug('LazyDict from Group - searching parent...')
datadict[key].h5file = value.file
datadict[key].group = value.name
logger.debug(
f'Created child LazyDict of Group {datadict[key].group} in File {datadict[key].h5file}')
else:
datadict[key].h5file = hdfobject
else:
datadict[key] = {}
datadict[key] = _recurse(value, datadict[key])
elif isinstance(value, h5py.Dataset):
if lazy:
datadict[key] = value
else:
datadict[key] = unpacker(value)
return datadict
if isinstance(hdf, str):
# Fixing windows issues with manually specified pathes
if platform.system() == 'Windows':
hdf = PureWindowsPath(hdf)
hdf = Path(hdf)
if not hdf.suffix:
hdf = hdf.parent / (hdf.name + config.default_suffix)
# First check if lazy and file is already loaded
if lazy:
data = is_open(hdf)
if data is not None:
if 'attrs' not in data and unpack_attrs:
logger.debug('Reloading file attributes to unwrap...')
data['attrs'] = {k: v for k, v in data.h5file.attrs.items()}
return data
else:
return data
# Else open the file and go on
hdf_handle = h5py.File(hdf, 'r')
if lazy:
data = LazyHdfDict(_h5file=hdf_handle)
add_open_file(data)
else:
data = {}
# Attributes are loaded into a dict if asked for. Else they will remain
# in the h5file
if unpack_attrs:
data['attrs'] = {k: v for k, v in hdf_handle.attrs.items()}
# Finally, add the rest from the file. If not lazy, close it right away.
# If lazy, the file must stay open.
data = _recurse(hdf_handle, data)
if lazy:
return data
hdf_handle.close()
# squeeze singleton data from dict, only if enabled. Default is off
if config.squeeze_single and len(data.keys()) == 1:
data = data[list(data.keys())[0]]
return data
def pack_dataset(hdfobject, key, value, compress):
"""Packs a given key value pair into a dataset in the given hdfobject.
This holds all special **packing** procedures for types not natively
supported by `h5py`. If a value exists that is not conformable with hdf,
the the function tries to adapt or serialize the value using yaml as last
resort, raising a TypeWarning on the go.
If yaml fails, the exception of the failure is raised and not handled, thus
having the code fail, e.g. saving is only successful if all datasets were
packable!
Parameters
------------
hdfobject: `h5py.File` or similar to save the data to.
The object to pack the key-value in to.
key: `string`
Indetifier to write the data to.
value: `any`
Data value
compress: `tuple`
Tuple of (bool compress, 0-9 level) which specifies the compression.
"""
def _dump_array(name, array, group, compress, type_id=None):
if len(array) == 0:
return
# This is a string array - to avoid unicode this will be made binary
# and stored with a unique typeid
if array.dtype.str.startswith('<U'):
logger.debug('(unicode) str array found, making list')
init_shape = array.shape
array = np.array([str(v).encode() for v in array.ravel()]).reshape(init_shape)
if compress[0]:
subset = group.create_dataset(
name=name, data=array, compression='gzip',
compression_opts=compress[1])
else:
subset = group.create_dataset(
name=name, data=array)
subset.attrs.create(
name=TYPEID,
data=str('str_array'))
return
logger.debug(f'Dumping array {name} to file')
if compress[0]:
subset = group.create_dataset(
name=name, data=array, compression='gzip',
compression_opts=compress[1])
else:
subset = group.create_dataset(
name=name, data=array)
if type_id is not None:
subset.attrs.create(
name=TYPEID,
data=str(type_id))
def _iterate_iter_data(hdfobject, key, value, typeID, inner_id=None):
ds = hdfobject.create_group(key)
elementsOrder = int(np.floor(np.log10(len(value))) + 1)
fmt = 'i_{:0' + str(elementsOrder) + 'd}'
for i, v in enumerate(value):
if isinstance(v, tuple):
_iterate_iter_data(ds, fmt.format(i), v, "tuple", inner_id)
elif isinstance(v, list):
# check for mixed type, if yes, dump to group as tuple
if not all([isinstance(v, type(value[0])) for v in value]):
_iterate_iter_data(hdfobject, key, value, "list", inner_id)
else:
_iterate_iter_data(ds, fmt.format(i), v, "list", inner_id)
else:
if isinstance(v, np.ndarray):
_dump_array(fmt.format(i), v, ds, compress)
else:
if isinstance(v, np.str_):
v = str(v)
inner = ds.create_dataset(name=fmt.format(i), data=v)
if inner_id is not None:
logger.debug(f'Adding innermost id {inner_id} to {inner}')
inner.attrs.create(
name=TYPEID,
data=str(inner_id))
ds.attrs.create(
name=TYPEID,
data=str(typeID))
logger.debug(f'Packing {key}, with type {type(value)}')
isdt = False
if isinstance(value, datetime):
value = value.timestamp()
isdt = True
elif hasattr(value, '__iter__'):
if all(isinstance(i, datetime) for i in value):
value = [item.timestamp() for item in value]
isdt = True
try:
manual_type = None
# Catch a list or tuple of Path as a special cases
if isinstance(value, tuple) or isinstance(value, list):
if isinstance(value[0], Path):
if not all([isinstance(v, type(value[0])) for v in value]):
error = 'Path iterables are only supported in homogeneoeus packs'
logger.error(error)
raise RuntimeError(error)
if isinstance(value, tuple): path_type = 'tuple'
elif isinstance(value, list): path_type = 'list'
else:
error = 'Unsupported Path iterable'
logger.error(error)
raise RuntimeError(error)
_iterate_iter_data(
hdfobject, key, [str(v) for v in value], path_type, inner_id='path')
return
if isinstance(value, tuple):
_iterate_iter_data(hdfobject, key, value, "tuple")
return
# Catching list of strings or list of np.str_ or mixed lists..
if isinstance(value, list):
# check if all float or all int, then its ok to pass on
if all([isinstance(v, (int, float)) for v in value]):
value = np.array(value)
manual_type = 'list_arr'
# check for mixed type if yes, dump to group
# using the same as tuple
elif not all([isinstance(v, type(value[0])) for v in value]):
_iterate_iter_data(hdfobject, key, value, "list")
return
# check for nested list if yes, dump to group
# using the same as tuple
elif (all([isinstance(v, type(value[0])) for v in value])
and isinstance(value[0], list)):
logger.debug('Packing list of lists')
_iterate_iter_data(hdfobject, key, value, "list")
return
# List of (np) string
elif all([isinstance(v, (str, np.str_)) for v in value]):
value = np.array([str(v).encode() for v in value])
logger.debug('List of strings will be binarized as array, adding type '
f'attribute for later decompression for {key}...')
manual_type = 'list_str'
# List of numpy arrays (changing shape possible)
elif all([isinstance(v, np.ndarray) for v in value]):
_iterate_iter_data(hdfobject, key, value, "list")
return
logger.debug(f'Trying to save {key} with type {type(value)}')
if isinstance(value, np.ndarray):
_dump_array(key, value, hdfobject, compress, type_id=manual_type)
isdt = False
elif isinstance(value, Path):
ds = hdfobject.create_dataset(name=key, data=str(value))
ds.attrs.create(
name=TYPEID,
data=str('path'))
else:
if compress[0]:
if isdt:
logger.debug('No compression for datetime...')
else:
logger.debug('No compression for unknown type...')
ds = hdfobject.create_dataset(name=key, data=value)
if isdt:
ds.attrs.create(
name=TYPEID,
data=str("datetime"))
except TypeError:
# Typecast to def. string for yaml. If it was a string, no action
# needed but to dump it
if isinstance(value, np.str_) or isinstance(value, str):
value = str(value)
ds = hdfobject.create_dataset(
name=key,
data=value
)
else:
# Obviously the data was not serializable. To give it
# a last try; serialize it to yaml but expect this to go down the
# crapper
try:
ds = hdfobject.create_dataset(
name=key,
data=yaml.safe_dump(value)
)
ds.attrs.create(
name=TYPEID,
data=str("yaml"))
except yaml.representer.RepresenterError:
logger.error(
'Cannot dump {:s} to h5, incompatible data format '
'even when using serialization.'.format(key))
logger.error(50*'-')
raise RuntimeError(f'Cant save {key}')
def save(hdf, data, compress=config.default_compression, packer=pack_dataset,
*args, **kwargs):
"""
Adds keys of given dict as groups and values as datasets to the given
hdf-file (by string or object) or group object. Iterative dicts are
supported.
The dict can have the `attrs` key containing a dict of key, value pairs
which are added as root level attributes to the hdf file. Those must be
scalar, else exceptions will occur.
`\*args` and `\*\*kwargs` will be passed to the `h5py.File` constructor.
Parameters
-----------
hdf: `string`, `Path`
Path to File
data: `dict`
The dictionary containing *only string or tuple* keys and
data values or dicts as above again.
packer: `callable`
Callable gets `hdfobject, key, value` as input.
`hdfobject` is considered to be either a h5py.File or a h5py.Group.
`key` is the name of the dataset.
`value` is the dataset to be packed and accepted by h5py.
Defaults to `pack_dataset()`
compress: `tuple`
Try to compress arrays, use carefully. If on, gzip mode is used in
every case. Defaults to `(False, 0)`. When `(True,...)` the second
element specifies the level from `0-9`, see h5py doc.
Returns
--------
hdf: `string`
Path to new file
"""
def _recurse(datadict, hdfobject):
for key, value in datadict.items():
if isinstance(key, tuple):
key = '_'.join((str(i) for i in key))
if isinstance(value, (dict, LazyHdfDict)):
hdfgroup = hdfobject.create_group(key)
_recurse(value, hdfgroup)
else:
if isinstance(value, (pd.DataFrame, pd.Series)):
raise TypeError('pandas Data must be stored in root group')
else:
packer(hdfobject, key, value, compress)
if isinstance(hdf, str):
# Fixing windows issues with manually specified pathes
if platform.system() == 'Windows':
hdf = PureWindowsPath(hdf)
hdf = Path(hdf)
if not hdf.suffix == config.default_suffix:
hdf = hdf.parent / (hdf.name + config.default_suffix)
# Single dataframe
if isinstance(data, (pd.DataFrame, pd.Series)):
if compress[0]:
store = pd.HDFStore(hdf, compress=compress[1], complib='zlib')
else:
store = pd.HDFStore(hdf, compress=None)
store.put('pd_dataframe', data)
store.close()
return hdf
if config.allow_overwrite:
file_mode = 'w'
else:
file_mode = 'a'
# Dataframe in dict. Pandas is stored in advance...stupid file lock in
# pandas prevents otherwise.
pandas_keys = list()
for k, v in data.items():
if isinstance(v, (pd.DataFrame, pd.Series)):
if compress[0]:
v.to_hdf(hdf, key=k, mode=file_mode, complevel=compress[1], complib='zlib')
else:
v.to_hdf(hdf, key=k, mode=file_mode, complib=None)
pandas_keys.append(k)
file_mode = 'r+'
data = data.copy() # this is needed so popping wont change the input data
for k in pandas_keys:
_ = data.pop(k)
with h5py.File(hdf, file_mode, *args, **kwargs) as hdf_handle:
# Handle manual attrs setup
if 'attrs' in data:
for k, v in data['attrs'].items():
hdf_handle.attrs[k] = v
_ = data.pop('attrs')
# Finally save the data
_recurse(data, hdf_handle)
return hdf
| 35.088427 | 120 | 0.54538 | 3,158 | 26,983 | 4.576631 | 0.157695 | 0.019927 | 0.005397 | 0.009133 | 0.270532 | 0.23566 | 0.20321 | 0.171729 | 0.142808 | 0.130215 | 0 | 0.005764 | 0.35704 | 26,983 | 768 | 121 | 35.134115 | 0.827022 | 0.217174 | 0 | 0.38809 | 0 | 0.002053 | 0.086779 | 0.009475 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045175 | false | 0 | 0.026694 | 0.002053 | 0.131417 | 0.020534 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb13b099c181375e6bb8cebbcd89c8c91b3a360e | 12,759 | py | Python | utils/test/reporting/functest/reporting-status.py | hwoarang/releng | fc8cb9f1bc3a37039d19ae7a5a17699d8a36bfb8 | [
"Apache-2.0"
] | null | null | null | utils/test/reporting/functest/reporting-status.py | hwoarang/releng | fc8cb9f1bc3a37039d19ae7a5a17699d8a36bfb8 | [
"Apache-2.0"
] | null | null | null | utils/test/reporting/functest/reporting-status.py | hwoarang/releng | fc8cb9f1bc3a37039d19ae7a5a17699d8a36bfb8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import datetime
import jinja2
import os
import requests
import sys
import time
import yaml
import testCase as tc
import scenarioResult as sr
# manage conf
import utils.reporting_utils as rp_utils
# Logger
logger = rp_utils.getLogger("Functest-Status")
# Initialization
testValid = []
otherTestCases = []
reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# init just tempest to get the list of scenarios
# as all the scenarios run Tempest
tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
# Retrieve the Functest configuration to detect which tests are relevant
# according to the installer, scenario
cf = rp_utils.get_config('functest.test_conf')
period = rp_utils.get_config('general.period')
versions = rp_utils.get_config('general.versions')
installers = rp_utils.get_config('general.installers')
blacklist = rp_utils.get_config('functest.blacklist')
log_level = rp_utils.get_config('general.log.log_level')
response = requests.get(cf)
functest_yaml_config = yaml.safe_load(response.text)
logger.info("*******************************************")
logger.info("* *")
logger.info("* Generating reporting scenario status *")
logger.info("* Data retention: %s days *" % period)
logger.info("* Log level: %s *" % log_level)
logger.info("* *")
logger.info("*******************************************")
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
# we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
# to validate scenarios
# Tier > 4 are not used to validate scenarios but we display the results anyway
# tricky thing for the API as some tests are Functest tests
# other tests are declared directly in the feature projects
for tier in config_tiers:
if tier['order'] > 0 and tier['order'] < 3:
for case in tier['testcases']:
if case['name'] not in blacklist:
testValid.append(tc.TestCase(case['name'],
"functest",
case['dependencies']))
elif tier['order'] == 3:
for case in tier['testcases']:
if case['name'] not in blacklist:
testValid.append(tc.TestCase(case['name'],
case['name'],
case['dependencies']))
elif tier['order'] > 3:
for case in tier['testcases']:
if case['name'] not in blacklist:
otherTestCases.append(tc.TestCase(case['name'],
"functest",
case['dependencies']))
logger.debug("Functest reporting start")
# For all the versions
for version in versions:
# For all the installers
for installer in installers:
# get scenarios
scenario_results = rp_utils.getScenarios(tempest, installer, version)
scenario_stats = rp_utils.getScenarioStats(scenario_results)
items = {}
scenario_result_criteria = {}
scenario_file_name = ("./display/" + version +
"/functest/scenario_history.txt")
# initiate scenario file if it does not exist
if not os.path.isfile(scenario_file_name):
with open(scenario_file_name, "a") as my_file:
logger.debug("Create scenario file: %s" % scenario_file_name)
my_file.write("date,scenario,installer,detail,score\n")
# For all the scenarios get results
for s, s_result in scenario_results.items():
logger.info("---------------------------------")
logger.info("installer %s, version %s, scenario %s:" %
(installer, version, s))
logger.debug("Scenario results: %s" % s_result)
# Green or Red light for a given scenario
nb_test_runnable_for_this_scenario = 0
scenario_score = 0
# url of the last jenkins log corresponding to a given
# scenario
s_url = ""
if len(s_result) > 0:
build_tag = s_result[len(s_result)-1]['build_tag']
logger.debug("Build tag: %s" % build_tag)
s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
logger.info("last jenkins url: %s" % s_url)
testCases2BeDisplayed = []
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
# 1) Manage the test cases for the scenario validation
# concretely Tiers 0-3
for test_case in testValid:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
logger.debug("testcase %s (%s) is %s" %
(test_case.getDisplayName(),
test_case.getName(),
test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
displayName = test_case.getDisplayName()
project = test_case.getProject()
nb_test_runnable_for_this_scenario += 1
logger.info(" Searching results for case %s " %
(displayName))
result = rp_utils.getResult(dbName, installer,
s, version)
# if no result set the value to 0
if result < 0:
result = 0
logger.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
project,
"",
result,
True,
1))
scenario_score = scenario_score + result
# 2) Manage the test cases for the scenario qualification
# concretely Tiers > 3
for test_case in otherTestCases:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
logger.debug("testcase %s (%s) is %s" %
(test_case.getDisplayName(),
test_case.getName(),
test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
displayName = test_case.getDisplayName()
project = test_case.getProject()
logger.info(" Searching results for case %s " %
(displayName))
result = rp_utils.getResult(dbName, installer,
s, version)
# at least 1 result for the test
if result > -1:
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
project,
"",
result,
True,
4))
else:
logger.debug("No results found")
items[s] = testCases2BeDisplayed
except:
logger.error("Error: installer %s, version %s, scenario %s" %
(installer, version, s))
logger.error("No data available: %s " % (sys.exc_info()[0]))
# **********************************************
# Evaluate the results for scenario validation
# **********************************************
# the validation criteria = nb runnable tests x 3
# because each test case = 0,1,2 or 3
scenario_criteria = nb_test_runnable_for_this_scenario * 3
# if 0 runnable tests set criteria at a high value
if scenario_criteria < 1:
scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
s_score = str(scenario_score) + "/" + str(scenario_criteria)
s_score_percent = rp_utils.getScenarioPercent(scenario_score,
scenario_criteria)
s_status = "KO"
if scenario_score < scenario_criteria:
logger.info(">>>> scenario not OK, score = %s/%s" %
(scenario_score, scenario_criteria))
s_status = "KO"
else:
logger.info(">>>>> scenario OK, save the information")
s_status = "OK"
path_validation_file = ("./display/" + version +
"/functest/" +
"validated_scenario_history.txt")
with open(path_validation_file, "a") as f:
time_format = "%Y-%m-%d %H:%M"
info = (datetime.datetime.now().strftime(time_format) +
";" + installer + ";" + s + "\n")
f.write(info)
# Save daily results in a file
with open(scenario_file_name, "a") as f:
info = (reportingDate + "," + s + "," + installer +
"," + s_score + "," +
str(round(s_score_percent)) + "\n")
f.write(info)
scenario_result_criteria[s] = sr.ScenarioResult(s_status,
s_score,
s_score_percent,
s_url)
logger.info("--------------------------")
templateLoader = jinja2.FileSystemLoader(".")
templateEnv = jinja2.Environment(
loader=templateLoader, autoescape=True)
TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_stats=scenario_stats,
scenario_results=scenario_result_criteria,
items=items,
installer=installer,
period=period,
version=version,
date=reportingDate)
with open("./display/" + version +
"/functest/status-" + installer + ".html", "wb") as fh:
fh.write(outputText)
logger.info("Manage export CSV & PDF")
rp_utils.export_csv(scenario_file_name, installer, version)
logger.error("CSV generated...")
# Generate outputs for export
# pdf
# TODO Change once web site updated...use the current one
# to test pdf production
url_pdf = rp_utils.get_config('general.url')
pdf_path = ("./display/" + version +
"/functest/status-" + installer + ".html")
pdf_doc_name = ("./display/" + version +
"/functest/status-" + installer + ".pdf")
rp_utils.export_pdf(pdf_path, pdf_doc_name)
logger.info("PDF generated...")
| 46.565693 | 79 | 0.477545 | 1,166 | 12,759 | 5.078902 | 0.222985 | 0.039176 | 0.01182 | 0.018913 | 0.32945 | 0.28926 | 0.260723 | 0.227964 | 0.218507 | 0.218507 | 0 | 0.006899 | 0.420644 | 12,759 | 273 | 80 | 46.736264 | 0.794237 | 0.137628 | 0 | 0.358586 | 0 | 0 | 0.13228 | 0.027935 | 0 | 0 | 0 | 0.003663 | 0 | 1 | 0 | false | 0 | 0.050505 | 0 | 0.050505 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb14e5a358e387d4eec0d62f92ec4110f726d8b6 | 2,346 | py | Python | tests/example_labeller/management/commands/populate.py | claytonbrown/image-labelling-tool | 4a8f046d729f68a2fb214104a7522111a88c100a | [
"MIT"
] | 61 | 2017-07-31T00:38:49.000Z | 2022-01-22T02:09:36.000Z | tests/example_labeller/management/commands/populate.py | JinyongJeong/image-labelling-tool | 4620fcc73d88f8df4261c62267e768595de96ed1 | [
"MIT"
] | 4 | 2017-08-24T21:51:16.000Z | 2020-04-03T09:32:59.000Z | tests/example_labeller/management/commands/populate.py | JinyongJeong/image-labelling-tool | 4620fcc73d88f8df4261c62267e768595de96ed1 | [
"MIT"
] | 22 | 2017-08-09T14:36:53.000Z | 2022-02-07T01:37:54.000Z | import os, mimetypes, json, datetime
from django.core.management.base import BaseCommand, CommandError
from django.core.files import File
from image_labelling_tool import labelling_tool
from image_labelling_tool import models as lt_models
from ... import models
class Command(BaseCommand):
help = 'Populates the image database from a directory'
def add_arguments(self, parser):
parser.add_argument('dir', type=str)
def handle(self, *args, **options):
images_dir = options['dir']
image_and_label_files = []
for filename in os.listdir(images_dir):
path = os.path.join(images_dir, filename)
if os.path.isfile(path):
mt, encoding = mimetypes.guess_type(path)
if mt is not None and mt.startswith('image/'):
image_path = path
labels_path = os.path.splitext(path)[0] + '__labels.json'
if os.path.exists(labels_path) and os.path.isfile(labels_path):
image_and_label_files.append((image_path, labels_path))
else:
image_and_label_files.append((image_path, None))
for image_path, labels_path in image_and_label_files:
if labels_path is not None:
self.stdout.write('Adding image {} with labels from {}'.format(image_path, labels_path))
wrapped_labels = json.load(open(labels_path, 'r'))
labels, complete = labelling_tool.PersistentLabelledImage._unwrap_labels(
image_path, wrapped_labels)
complete = complete if isinstance(complete, bool) else False
labels_model = lt_models.Labels(
labels_json_str=json.dumps(labels.labels_json), complete=complete,
creation_date=datetime.date.today())
labels_model.save()
else:
self.stdout.write('Adding image {}'.format(image_path))
labels_model = lt_models.Labels(creation_date=datetime.date.today())
labels_model.save()
image_model = models.ImageWithLabels(labels=labels_model)
image_model.image.save(os.path.basename(image_path),
File(open(image_path, 'rb')))
image_model.save()
| 47.877551 | 104 | 0.615942 | 274 | 2,346 | 5.043796 | 0.321168 | 0.058611 | 0.037627 | 0.052098 | 0.22576 | 0.111433 | 0.111433 | 0.063676 | 0 | 0 | 0 | 0.000604 | 0.294118 | 2,346 | 48 | 105 | 48.875 | 0.833937 | 0 | 0 | 0.093023 | 0 | 0 | 0.05243 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.139535 | 0 | 0.232558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb17c897f38cc57a6e3c0aa92ef3453974c411f2 | 2,214 | py | Python | safespace_tests/test_safespace.py | valohai/django-safespace | 9cfe790abdced53e16fddc70b019d989db886370 | [
"MIT"
] | 4 | 2017-02-07T13:14:58.000Z | 2020-10-24T06:48:07.000Z | safespace_tests/test_safespace.py | valohai/django-safespace | 9cfe790abdced53e16fddc70b019d989db886370 | [
"MIT"
] | 4 | 2016-11-22T12:39:28.000Z | 2020-10-12T15:10:18.000Z | safespace_tests/test_safespace.py | valohai/django-safespace | 9cfe790abdced53e16fddc70b019d989db886370 | [
"MIT"
] | 1 | 2017-04-23T19:56:49.000Z | 2017-04-23T19:56:49.000Z | import json
import pytest
from django.db.utils import DatabaseError
@pytest.mark.parametrize('code', (False, True))
@pytest.mark.parametrize('ajax', (False, True))
def test_basic_usage(client, code, ajax):
response = client.get(
'/problem/',
{'code': ('oops' if code else '')},
HTTP_X_REQUESTED_WITH=('XMLHttpRequest' if ajax else 'An Unicorn'),
)
assert response.status_code == 406
assert b'woeful error' in response.content
assert (b'oops' in response.content) == bool(code)
if code:
assert response['X-Error-Code'] == 'oops'
if ajax:
data = json.loads(response.content.decode())
assert data['error'] == 'A woeful error'
if code:
assert data['code'] == 'oops'
def test_custom(client):
response = client.get('/custom/')
assert response.status_code == 406
assert b'Oopsy daisy' in response.content
def test_raised_404(client):
"""
Test that raised Http404s can be caught.
"""
response = client.get('/404/')
assert response.status_code == 406
def test_natural_404(client):
"""
Test that "natural" 404s from the router aren't caught by the middleware.
"""
response = client.get('/dsfargeg/')
assert response.status_code == 404
def test_passthrough(client):
"""
Test that exceptions that we don't want to catch are passed through.
"""
with pytest.raises(DatabaseError):
client.get('/db/', {'exc': 'db'})
def test_arbitrary_response(client):
"""
Test that exceptions may carry `response`s
"""
response = client.get('/exception-response/')
assert response.content == b'nice.'
def test_custom_template(client, settings):
"""
Test that SAFESPACE_TEMPLATE_NAMES can be used for customization.
"""
response = client.get('/problem/', {'exc': 'problem', 'code': 'foo'})
assert response.status_code == 406
assert b'a foo error occurred, boo' in response.content
def test_accept_json(client):
response = client.get('/problem/', HTTP_ACCEPT=('application/json; text/html'))
assert response.status_code == 406
assert json.loads(response.content.decode())['error'] == 'A woeful error'
| 28.384615 | 83 | 0.65402 | 281 | 2,214 | 5.060498 | 0.341637 | 0.039381 | 0.083685 | 0.101266 | 0.189873 | 0.094937 | 0.07173 | 0 | 0 | 0 | 0 | 0.018814 | 0.207769 | 2,214 | 77 | 84 | 28.753247 | 0.791904 | 0.131888 | 0 | 0.155556 | 0 | 0 | 0.152763 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.177778 | false | 0.022222 | 0.066667 | 0 | 0.244444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb1923de3d600b20a1e65b511a89bb42eed24140 | 928 | py | Python | 2017/02.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | 2017/02.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | 2017/02.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | import itertools
EXAMPLES1 = (
('02-exemple1.txt', 18),
)
EXAMPLES2 = (
('02-exemple2.txt', 9),
)
INPUT = '02-input.txt'
def read_mat(fn):
mat = list()
with open(fn) as f:
for line in f:
mat.append([int(x) for x in line.split()])
return mat
def code1(mat):
return sum(max(line) - min(line) for line in mat)
def code2(mat):
sigma = 0
for line in mat:
for x, y in itertools.combinations(sorted(line, reverse=True), 2):
r = x // y
if r == x / y:
sigma += r
break
return sigma
def test(n, code, examples, myinput):
for fn, result in examples:
data = read_mat(fn)
assert code(data) == result, (data, result, code(data))
print(f'{n}>', code(read_mat(myinput)))
test(1, code1, EXAMPLES1, INPUT)
test(2, code2, EXAMPLES2, INPUT)
| 19.333333 | 75 | 0.523707 | 127 | 928 | 3.80315 | 0.425197 | 0.043478 | 0.055901 | 0.049689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037643 | 0.341595 | 928 | 47 | 76 | 19.744681 | 0.752864 | 0 | 0 | 0 | 0 | 0 | 0.052213 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 1 | 0.125 | false | 0 | 0.03125 | 0.03125 | 0.25 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb19d45881c89c8569a6dbcadd244b23ccc7639f | 40,053 | py | Python | mustache/mustache.py | sa501428/mustache | c831f736d077acd862a5559caf64bf43dde7d983 | [
"MIT"
] | null | null | null | mustache/mustache.py | sa501428/mustache | c831f736d077acd862a5559caf64bf43dde7d983 | [
"MIT"
] | null | null | null | mustache/mustache.py | sa501428/mustache | c831f736d077acd862a5559caf64bf43dde7d983 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import sys
import re
import math
import warnings
import time
import struct
from collections import defaultdict
import pandas as pd
import numpy as np
import hicstraw
import cooler
from scipy.stats import expon
from scipy.ndimage import gaussian_filter
from scipy.ndimage.filters import maximum_filter
from scipy.signal import convolve2d
import scipy.ndimage.measurements as scipy_measurements
from scipy import sparse
from statsmodels.stats.multitest import multipletests
from multiprocessing import Process, Manager
def parseBP(s):
"""
:param s: string
:return: string converted to number, taking account for kb or mb
"""
if not s:
return False
if s.isnumeric():
return int(s)
s = s.lower()
if "kb" in s:
n = s.split("kb")[0]
if not n.isnumeric():
return False
return int(n) * 1000
elif "mb" in s:
n = s.split("mb")[0]
if not n.isnumeric():
return False
return int(n) * 1000000
return False
def parse_args(args):
parser = argparse.ArgumentParser(description="Check the help flag")
parser.add_argument("-f",
"--file",
dest="f_path",
help="REQUIRED: Contact map",
required=False)
parser.add_argument("-d",
"--distance",
dest="distFilter",
help="REQUIRED: Maximum distance (in bp) allowed between loop loci",
required=False)
parser.add_argument("-o",
"--outfile",
dest="outdir",
help="REQUIRED: Name of the output file.\
Output is a numpy binary.",
required=True)
parser.add_argument("-r",
"--resolution",
dest="resolution",
help="REQUIRED: Resolution used for the contact maps",
required=True)
parser.add_argument("-bed", "--bed", dest="bed",
help="BED file for HiC-Pro type input",
default="",
required=False)
parser.add_argument("-m", "--matrix", dest="mat",
help="MATRIX file for HiC-Pro type input",
default="",
required=False)
parser.add_argument("-b", "--biases", dest="biasfile",
help="RECOMMENDED: biases calculated by\
ICE or KR norm for each locus for contact map are read from BIASFILE",
required=False)
parser.add_argument(
"-cz",
"--chromosomeSize",
default="",
dest="chrSize_file",
help="RECOMMENDED: .hic corressponfing chromosome size file.",
required=False)
parser.add_argument(
"-norm",
"--normalization",
default=False,
dest="norm_method",
help="RECOMMENDED: Hi-C normalization method (KR, VC,...).",
required=False)
# parser.add_argument("-cb",
# '--cooler-balance',
# dest='cooler_balance',
# default=False,
# #action='store_false',
# required=False,
# help="OPTIONAL: The cooler data was normalized prior to creating the .cool file.")
# parser.set_defaults(cooler_balance=False)
parser.add_argument(
"-st",
"--sparsityThreshold",
dest="st",
type=float,
default=0.88,
help="OPTIONAL: Mustache filters out contacts in sparse areas, you can relax this for sparse datasets(i.e. -st 0.8). Default value is 0.88.",
required=False)
parser.add_argument(
"-pt",
"--pThreshold",
dest="pt",
type=float,
default=0.2,
help="OPTIONAL: P-value threshold for the results in the final output. Default is 0.2",
required=False)
parser.add_argument(
"-sz",
"--sigmaZero",
dest="s_z",
type=float,
default=1.6,
help="OPTIONAL: sigma0 value for the method. DEFAULT is 1.6. \
Experimentally chosen for 5Kb resolution",
required=False)
parser.add_argument("-oc", "--octaves", dest="octaves", default=2,
type=int,
help="OPTIONAL: Octave count for the method. \
DEFAULT is 2.",
required=False)
parser.add_argument("-i", "--iterations", dest="s", default=10,
type=int,
help="OPTIONAL: iteration count for the method. \
DEFAULT is 10. Experimentally chosen for \
5Kb resolution",
required=False)
parser.add_argument("-p", "--processes", dest="nprocesses", default=4, type=int,
help="OPTIONAL: Number of parallel processes to run. DEFAULT is 4. Increasing this will also increase the memory usage",
required=False)
# parser.add_argument("-c",
# "--changefile",
# dest="changedir",
# help="...",
# required=False,
# default="")
parser.add_argument(
"-ch",
"--chromosome",
dest="chromosome",
nargs='+',
help="REQUIRED: Specify which chromosome to run the program for. Optional for cooler files.",
default='n',
required=False)
parser.add_argument(
"-ch2",
"--chromosome2",
dest="chromosome2",
nargs='+',
help="Optional: Specify the second chromosome for interchromosomal analysis.",
default='n',
required=False)
parser.add_argument("-v",
"--verbose",
dest="verbose",
type=bool,
default=True,
help="OPTIONAL: Verbosity of the program",
required=False)
return parser.parse_args()
def kth_diag_indices(a, k):
rows, cols = np.diag_indices_from(a)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
def is_chr(s, c):
# if 'X' == c or 'chrX':
# return 'X' in c
# if 'Y' == c:
# return 'Y' in c
return str(c).replace('chr', '') == str(s).replace('chr', '') # re.findall("[1-9][0-9]*", str(s))
def get_sep(f):
"""
:param f: file path
:return: Guesses the value separator in the file.
"""
with open(f) as file:
for line in file:
if "\t" in line:
return '\t'
if " " in line.strip():
return ' '
if "," in line:
return ','
if len(line.split(' ')) == 1:
return ' '
break
raise FileNotFoundError
def read_bias(f, chromosome, res):
"""
:param f: Path to the bias file
:return: Dictionary where keys are the bin coordinates and values are the bias values.
"""
d = defaultdict(lambda: 1.0)
if f:
sep = get_sep(f)
with open(f) as file:
for pos, line in enumerate(file):
line = line.strip().split(sep)
if len(line) == 3:
if is_chr(line[0], chromosome):
val = float(line[2])
if not np.isnan(val):
if val < 0.2:
d[(float(line[1]) // res)] = np.Inf
else:
d[(float(line[1]) // res)] = val
else:
d[(float(line[1]) // res)] = np.Inf
elif len(line) == 1:
val = float(line[0])
if not np.isnan(val):
if val < 0.2:
d[pos] = np.Inf
else:
d[pos] = val
else:
d[pos] = np.Inf
return d
return False
def read_pd(f, distance_in_bp, bias, chromosome, res):
sep = get_sep(f)
df = pd.read_csv(f, sep=sep, header=None)
df.dropna(inplace=True)
if df.shape[1] == 5:
df = df[np.vectorize(is_chr)(df[0], chromosome)]
if df.shape[0] == 0:
print('Could\'t read any interaction for this chromosome!')
return
df = df[np.vectorize(is_chr)(df[2], chromosome)]
df = df.loc[np.abs(df[1] - df[3]) <= ((distance_in_bp / res + 1) * res), :]
df[1] //= res
df[3] //= res
bias = read_bias(bias, chromosome, res)
if bias:
factors = np.vectorize(bias.get)(df[1], 1)
df[4] = np.divide(df[4], factors)
factors = np.vectorize(bias.get)(df[3], 1)
df[4] = np.divide(df[4], factors)
df = df.loc[df[4] > 0, :]
x = np.min(df.loc[:, [1, 3]], axis=1)
y = np.max(df.loc[:, [1, 3]], axis=1)
val = np.array(df[4])
elif df.shape[1] == 3:
df = df.loc[np.abs(df[1] - df[0]) <= ((distance_in_bp / res + 1) * res), :]
df[0] //= res
df[1] //= res
bias = read_bias(bias, chromosome, res)
if bias:
factors = np.vectorize(bias.get)(df[0], 1)
df[2] = np.divide(df[2], factors)
factors = np.vectorize(bias.get)(df[1], 1)
df[2] = np.divide(df[2], factors)
df = df.loc[df[2] > 0, :]
x = np.min(df.loc[:, [0, 1]], axis=1)
y = np.max(df.loc[:, [0, 1]], axis=1)
val = np.array(df[2])
return x, y, val
def read_hic_file(f, norm_method, CHRM_SIZE, distance_in_bp, chr1, chr2, res):
"""
:param f: .hic file path
:param chr: Which chromosome to read the file for
:param res: Resolution to extract information from
:return: Numpy matrix of contact counts
"""
if not CHRM_SIZE:
hic = hicstraw.HiCFile(f)
chromosomes = hic.getChromosomes()
chrSize_in_bp = {}
for i in range(1, len(chromosomes)):
chrSize_in_bp["chr" + str(chromosomes[i].name).replace("chr", '')] = chromosomes[i].length
CHRM_SIZE = chrSize_in_bp["chr" + chr1.replace("chr", '')]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHRM_SIZE, CHUNK_SIZE * res) # CHUNK_SIZE*res
result = []
val = []
while start < CHRM_SIZE:
print(int(start), int(end))
if not norm_method:
temp = hicstraw.straw("observed", "KR", f, str(chr1) + ":" + str(int(start)) + ":" + str(int(end)),
str(chr2) + ":" + str(int(start)) + ":" + str(int(end)), "BP", res)
else:
temp = hicstraw.straw("observed", str(norm_method), f,
str(chr1) + ":" + str(int(start)) + ":" + str(int(end)),
str(chr2) + ":" + str(int(start)) + ":" + str(int(end)), "BP", res)
if len(temp) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [[int(record.binX), int(record.binY), record.counts] for record in temp]
prev_block = set([(record.binX, record.binY, record.counts) for record in temp])
else:
cur_block = set([(int(record.binX), int(record.binY), record.counts) for record in temp])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
x = np.array(result[0]) // res
y = np.array(result[1]) // res
val = np.array(result[2])
nan_indx = np.logical_or.reduce((np.isnan(result[0]), np.isnan(result[1]), np.isnan(result[2])))
x = x[~nan_indx]
y = y[~nan_indx]
val = val[~nan_indx]
x = x.astype(int)
y = y.astype(int)
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
if len(val > 0):
return x, y, val
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
def read_cooler(f, distance_in_bp, chr1, chr2, cooler_balance):
"""
:param f: .cool file path
:param chr: Which chromosome to read the file for
:return: Numpy matrix of contact counts
"""
clr = cooler.Cooler(f)
res = clr.binsize
print(f'Your cooler data resolution is {res}')
if chr1 not in clr.chromnames or chr2 not in clr.chromnames:
raise NameError('wrong chromosome name!')
CHRM_SIZE = clr.chromsizes[chr1]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHUNK_SIZE * res, CHRM_SIZE) # CHUNK_SIZE*res
result = []
val = []
###########################
if chr1 == chr2:
# try:
# normVec = clr.bins()['weight'].fetch(chr1)
# result = clr.matrix(balance=True,sparse=True).fetch(chr1)#as_pixels=True, join=True
while start < CHRM_SIZE:
print(int(start), int(end))
if not cooler_balance:
temp = clr.matrix(balance=True, sparse=True).fetch((chr1, int(start), int(end)))
else:
temp = clr.matrix(balance=cooler_balance, sparse=True).fetch((chr1, int(start), int(end)))
temp = sparse.triu(temp)
np.nan_to_num(temp, copy=False, nan=0, posinf=0, neginf=0)
start_in_px = int(start / res)
if len(temp.row) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [list(start_in_px + temp.row), list(start_in_px + temp.col), list(temp.data)]
prev_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
else:
cur_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
# except:
# raise NameError('Reading from the file failed!')
if len(result) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
x = np.array(result[0])
y = np.array(result[1])
val = np.array(result[2])
else:
result = clr.matrix(balance=True, sparse=True).fetch(chr1, chr2)
result = sparse.triu(result)
np.nan_to_num(result, copy=False, nan=0, posinf=0, neginf=0)
x = result.row
y = result.col
val = result.data
##########################
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
# return np.array(x),np.array(y),np.array(val), res, normVec
if len(val > 0):
return np.array(x), np.array(y), np.array(val), res
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
def read_mcooler(f, distance_in_bp, chr1, chr2, res, cooler_balance):
"""
:param f: .cool file path
:param chr: Which chromosome to read the file for
:param res: Resolution to extract information from
:return: Numpy matrix of contact counts
"""
uri = '%s::/resolutions/%s' % (f, res)
# uri = '%s::/7' % (f)
clr = cooler.Cooler(uri)
# print(clr.bins()[:100])
if chr1 not in clr.chromnames or chr2 not in clr.chromnames:
raise NameError('wrong chromosome name!')
CHRM_SIZE = clr.chromsizes[chr1]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHRM_SIZE, CHUNK_SIZE * res) # CHUNK_SIZE*res
result = []
val = []
if chr1 == chr2:
try:
# result = clr.matrix(balance=True,sparse=True).fetch(chr1)#as_pixels=True, join=True
while start < CHRM_SIZE:
print(int(start), int(end))
if not cooler_balance:
temp = clr.matrix(balance=True, sparse=True).fetch((chr1, int(start), int(end)))
else:
temp = clr.matrix(balance=cooler_balance, sparse=True).fetch((chr1, int(start), int(end)))
temp = sparse.triu(temp)
np.nan_to_num(temp, copy=False, nan=0, posinf=0, neginf=0)
start_in_px = int(start / res)
if len(temp.row) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [list(start_in_px + temp.row), list(start_in_px + temp.col), list(temp.data)]
prev_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
# print('result==[]')
else:
cur_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
except:
raise NameError('Reading from the file failed!')
if len(result) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
x = np.array(result[0])
y = np.array(result[1])
val = np.array(result[2])
else:
result = clr.matrix(balance=True, sparse=True).fetch(chr1, chr2)
result = sparse.triu(result)
np.nan_to_num(result, copy=False, nan=0, posinf=0, neginf=0)
x = result.row
y = result.col
val = result.data
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
if len(val > 0):
return np.array(x), np.array(y), np.array(val)
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
def get_diags(map):
"""
:param map: Contact map, numpy matrix
:return: 2 Dictionaries where keys are the diagonal number and values are the mean of that diagonal in one dictionary and the std. in the other dictionary.
"""
means = {}
stds = {}
for i in range(len(map)):
diag = map.diagonal(i)
diag = diag[diag != 0]
if len(diag) > 0:
mean = np.mean(diag)
std = np.std(diag) if np.std(diag) != 0 else 1
if math.isnan(mean):
means[i] = 0
else:
means[i] = mean
if math.isnan(std):
stds[i] = 1
else:
stds[i] = std
else:
means[i] = 0
stds[i] = 1
return means, stds
def normalize_sparse(x, y, v, resolution, distance_in_px):
n = max(max(x), max(y)) + 1
# distance_in_px = min(distance_in_px, n)
pval_weights = []
distances = np.abs(y - x)
if (n - distance_in_px) * resolution > 2000000:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
filter_size = int(2000000 / resolution)
for d in range(2 + distance_in_px):
indices = distances == d
vals = np.zeros(n - d)
vals[x[indices]] = v[indices] + 0.001
if vals.size == 0:
continue
std = np.std(v[indices])
mean = np.mean(v[indices])
if math.isnan(mean):
mean = 0
if math.isnan(std):
std = 1
kernel = np.ones(filter_size)
counts = np.convolve(vals != 0, kernel, mode='same')
s = np.convolve(vals, kernel, mode='same')
s2 = np.convolve(vals ** 2, kernel, mode='same')
local_var = (s2 - s ** 2 / counts) / (counts - 1)
std2 = std ** 2
np.nan_to_num(local_var, copy=False,
neginf=std2, posinf=std2, nan=std2)
local_mean = s / counts
local_mean[counts < 30] = mean
local_var[counts < 30] = std2
np.nan_to_num(local_mean, copy=False,
neginf=mean, posinf=mean, nan=mean)
local_std = np.sqrt(local_var)
vals[x[indices]] -= local_mean[x[indices]]
vals[x[indices]] /= local_std[x[indices]]
np.nan_to_num(vals, copy=False, nan=0, posinf=0, neginf=0)
vals = vals * (1 + math.log(1 + mean, 30))
pval_weights += [1 + math.log(1 + mean, 30)]
v[indices] = vals[x[indices]]
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
np.nan_to_num(v, copy=False, neginf=0, posinf=0, nan=0)
distance_in_px = min(distance_in_px, n)
for d in range(distance_in_px):
indices = distances == d
std = np.std(v[indices])
mean = np.mean(v[indices])
if math.isnan(mean):
mean = 0
if math.isnan(std):
std = 1
# print(std)
v[indices] = (v[indices] - mean) / std
np.nan_to_num(v, copy=False, nan=0, posinf=0, neginf=0)
return pval_weights
def inter_nrmalize_map(vals):
m = np.mean(vals)
s = np.std(vals)
cmap -= m
cmap /= s
np.nan_to_num(cmap, copy=False, nan=0, posinf=0, neginf=0)
def mustache(c, chromosome, chromosome2, res, pval_weights, start, end, mask_size, distance_in_px, octave_values, st,
pt):
nz = np.logical_and(c != 0, np.triu(c, 4))
nz_temp = np.logical_and.reduce((c != 0, np.triu(c, 4) > 0, np.tril(c, distance_in_px) > 0))
if np.sum(nz) < 50:
return []
c[np.tril_indices_from(c, 4)] = 2
if chromosome == chromosome2:
c[np.triu_indices_from(c, k=(distance_in_px + 1))] = 2
pAll = np.ones_like(c[nz]) * 2
Scales = np.ones_like(pAll)
vAll = np.zeros_like(pAll)
s = 10
# curr_filter = 1
scales = {}
for o in octave_values:
scales[o] = {}
sigma = o
w = 2 * math.ceil(2 * sigma) + 1
t = (((w - 1) / 2) - 0.5) / sigma
Gp = gaussian_filter(c, o, truncate=t, order=0)
scales[o][1] = sigma
sigma = o * 2 ** ((2 - 1) / s)
w = 2 * math.ceil(2 * sigma) + 1
t = (((w - 1) / 2) - 0.5) / sigma
Gc = gaussian_filter(c, sigma, truncate=t, order=0)
scales[o][2] = sigma
Lp = Gp - Gc
Gp = []
sigma = o * 2 ** ((3 - 1) / s)
w = 2 * math.ceil(2 * sigma) + 1
t = (((w - 1) / 2) - 0.5) / sigma
Gn = gaussian_filter(c, sigma, truncate=t, order=0)
scales[o][3] = sigma
# Lp = Gp - Gc
Lc = Gc - Gn
locMaxP = maximum_filter(
Lp, footprint=np.ones((3, 3)), mode='constant')
locMaxC = maximum_filter(
Lc, footprint=np.ones((3, 3)), mode='constant')
for i in range(3, s + 2):
# curr_filter += 1
Gc = Gn
sigma = o * 2 ** ((i) / s)
w = 2 * math.ceil(2 * sigma) + 1
t = ((w - 1) / 2 - 0.5) / sigma
Gn = gaussian_filter(c, sigma, truncate=t, order=0)
scales[o][i + 1] = sigma
Ln = Gc - Gn
dist_params = expon.fit(np.abs(Lc[nz]))
pval = 1 - expon.cdf(np.abs(Lc[nz]), *dist_params)
locMaxN = maximum_filter(
Ln, footprint=np.ones((3, 3)), mode='constant')
willUpdate = np.logical_and \
.reduce((Lc[nz] > vAll, Lc[nz] == locMaxC[nz],
np.logical_or(Lp[nz] == locMaxP[nz],
Ln[nz] == locMaxN[nz]),
Lc[nz] > locMaxP[nz],
Lc[nz] > locMaxN[nz]))
vAll[willUpdate] = Lc[nz][willUpdate]
Scales[willUpdate] = scales[o][i]
pAll[willUpdate] = pval[willUpdate]
Lp = Lc
Lc = Ln
locMaxP = locMaxC
locMaxC = locMaxN
pFound = pAll != 2
if len(pFound) < 10000:
return []
_, pCorrect, _, _ = multipletests(pAll[pFound], method='fdr_bh')
pAll[pFound] = pCorrect
#################
# o = np.ones_like(c)
# o[nz] = pAll
# x, y = np.where(nz_temp)
# o[x,y]*=np.array(pval_weights)[y-x]
# o[x,y]/=10
# pAll = o[nz]
#################
o = np.ones_like(c)
o[nz] = pAll
sig_count = np.sum(o < pt) # change
x, y = np.unravel_index(np.argsort(o.ravel()), o.shape)
so = np.ones_like(c)
so[nz] = Scales
x = x[:sig_count]
y = y[:sig_count]
xyScales = so[x, y]
nonsparse = x != 0
for i in range(len(xyScales)):
s = math.ceil(xyScales[i])
c1 = np.sum(nz[x[i] - s:x[i] + s + 1, y[i] - s:y[i] + s + 1]) / \
((2 * s + 1) ** 2)
s = 2 * s
c2 = np.sum(nz[x[i] - s:x[i] + s + 1, y[i] - s:y[i] + s + 1]) / \
((2 * s + 1) ** 2)
if c1 < st or c2 < 0.6:
nonsparse[i] = False
x = x[nonsparse]
y = y[nonsparse]
if len(x) == 0:
return []
def nz_mean(vals):
return np.mean(vals[vals != 0])
def diag_mean(k, map):
return nz_mean(map[kth_diag_indices(map, k)])
if chromosome == chromosome2:
means = np.vectorize(diag_mean, excluded=['map'])(k=y - x, map=c)
passing_indices = c[x, y] > 2 * means # change
if len(passing_indices) == 0 or np.sum(passing_indices) == 0:
return []
x = x[passing_indices]
y = y[passing_indices]
label_matrix = np.zeros((np.max(y) + 2, np.max(y) + 2), dtype=np.float32)
label_matrix[x, y] = o[x, y] + 1
label_matrix[x + 1, y] = 2
label_matrix[x + 1, y + 1] = 2
label_matrix[x, y + 1] = 2
label_matrix[x - 1, y] = 2
label_matrix[x - 1, y - 1] = 2
label_matrix[x, y - 1] = 2
label_matrix[x + 1, y - 1] = 2
label_matrix[x - 1, y + 1] = 2
num_features = scipy_measurements.label(
label_matrix, output=label_matrix, structure=np.ones((3, 3)))
out = []
for label in range(1, num_features + 1):
indices = np.argwhere(label_matrix == label)
i = np.argmin(o[indices[:, 0], indices[:, 1]])
_x, _y = indices[i, 0], indices[i, 1]
out.append([_x + start, _y + start, o[_x, _y], so[_x, _y]])
return out
def regulator(f, norm_method, CHRM_SIZE, outdir, bed="",
res=5000,
sigma0=1.6,
s=10,
pt=0.1,
st=0.88,
octaves=2,
verbose=True,
nprocesses=4,
distance_filter=2000000,
bias=False,
chromosome='n',
chromosome2=None):
if not chromosome2 or chromosome2 == 'n':
chromosome2 = chromosome
if (chromosome != chromosome2) and not ((('.hic' in f) or ('.cool' in f) or ('.mcool' in f))):
print(
"Interchromosomal analysis is only supported for .hic and .cool input formats.")
raise FileNotFoundError
octave_values = [sigma0 * (2 ** i) for i in range(octaves)]
distance_in_bp = distance_filter
print("Reading contact map...")
if f.endswith(".hic"):
x, y, v = read_hic_file(f, norm_method, CHRM_SIZE, distance_in_bp, chromosome, chromosome2, res)
elif f.endswith(".cool"):
x, y, v, res = read_cooler(f, distance_in_bp, chromosome, chromosome2, norm_method)
elif f.endswith(".mcool"):
x, y, v = read_mcooler(f, distance_in_bp, chromosome, chromosome2, res, norm_method)
else:
x, y, v = read_pd(f, distance_in_bp, bias, chromosome, res)
if len(v) == 0:
return []
print("Normalizing contact map...")
distance_in_px = int(math.ceil(distance_in_bp // res))
if chromosome == chromosome2:
n = max(max(x), max(y)) + 1
pval_weights = normalize_sparse(x, y, v, res, distance_in_px)
CHUNK_SIZE = max(2 * distance_in_px, 2000)
overlap_size = distance_in_px
if n <= CHUNK_SIZE:
start = [0]
end = [n]
else:
start = [0]
end = [CHUNK_SIZE]
while end[-1] < n:
start.append(end[-1] - overlap_size)
end.append(start[-1] + CHUNK_SIZE)
end[-1] = n
start[-1] = end[-1] - CHUNK_SIZE
print("Loop calling...")
with Manager() as manager:
o = manager.list()
i = 0
processes = []
for i in range(len(start)):
# create the currnet block
indx = np.logical_and.reduce((x >= start[i], x < end[i], y >= start[i], y < end[i]))
xc = x[indx] - start[i]
yc = y[indx] - start[i]
vc = v[indx]
cc = np.zeros((CHUNK_SIZE, CHUNK_SIZE))
cc[xc, yc] = vc
#
p = Process(target=process_block, args=(
i, start, end, overlap_size, cc, chromosome, chromosome2, res, pval_weights, distance_in_px,
octave_values, o, st, pt))
p.start()
processes.append(p)
if len(processes) >= nprocesses or i == (len(start) - 1):
for p in processes:
p.join()
processes = []
# o_corrected = [[e[0],e[1],e[2]/pval_weights[e[1]-e[0]],e[3]] for e in list(o)]
return list(o)
else: # interchromosomal
n1 = max(x) + 1
n2 = max(y) + 1
inter_normalize_map(x, y, v, res)
def process_block(i, start, end, overlap_size, cc, chromosome, chromosome2, res, pval_weights, distance_in_px,
octave_values, o, st, pt):
print("Starting block ", i + 1, "/", len(start), "...", sep='')
if i == 0:
mask_size = -1
elif i == len(start) - 1:
mask_size = end[i - 1] - start[i]
else:
mask_size = overlap_size
loops = mustache(
cc, chromosome, chromosome2, res, pval_weights, start[i], end[i], mask_size, distance_in_px, octave_values, st,
pt)
for loop in list(loops):
if loop[0] >= start[i] + mask_size or loop[1] >= start[i] + mask_size:
o.append([loop[0], loop[1], loop[2], loop[3]])
print("Block", i + 1, "done.")
def main():
start_time = time.time()
args = parse_args(sys.argv[1:])
print("\n")
f = args.f_path
if args.bed and args.mat:
f = args.mat
if not os.path.exists(f):
print("Error: Couldn't find the specified contact files")
return
res = parseBP(args.resolution)
if not res:
print("Error: Invalid resolution")
return
CHR_LIST_FLAG = False
CHR_COOL_FLAG = False
CHR_HIC_FLAG = False
if not args.chromosome or args.chromosome == 'n':
if f.endswith(".cool") or f.endswith(".mcool"):
CHR_COOL_FLAG = True
elif f.endswith(".hic"):
CHR_HIC_FLAG = True
elif len(args.chromosome > 1):
print("Error: For this data type you should enter only one chromosome name.")
return
else:
print("Error: Please enter the chromosome name.")
return
elif len(args.chromosome) > 1:
CHR_LIST_FLAG = True
distFilter = parseBP(args.distFilter) # change
if not distFilter:
if 200 * res >= 2000000:
distFilter = 200 * res
print("The distance limit is set to {}bp".format(200 * res))
elif 2000 * res <= 2000000:
distFilter = 2000 * res
print("The distance limit is set to {}bp".format(2000 * res))
else:
distFilter = 2000000
print("The distance limit is set to 2Mbp")
elif distFilter < 200 * res:
print("The distance limit is set to {}bp".format(200 * res))
distFilter = 200 * res
elif distFilter > 2000 * res:
print("The distance limit is set to {}bp".format(2000 * res))
distFilter = 2000 * res
elif distFilter > 2000000:
distFilter = 2000000
print("The distance limit is set to 2Mbp")
# distFilter = 4000000
chrSize_in_bp = False
if CHR_COOL_FLAG:
# extract all the chromosome names big enough to run mustache on
chr_list = []
if f.endswith(".cool"):
clr = cooler.Cooler(f)
else: # mcooler
uri = '%s::/resolutions/%s' % (f, res)
clr = cooler.Cooler(uri)
for i, chrm in enumerate(clr.chromnames):
if clr.chromsizes[i] > 1000000:
chr_list.append(chrm)
elif CHR_HIC_FLAG:
hic = hicstraw.HiCFile(f)
chromosomes = hic.getChromosomes()
chrSize_in_bp = {}
for i in range(1, len(chromosomes)):
chrSize_in_bp["chr" + str(chromosomes[i].name).replace("chr", '')] = chromosomes[i].length
else:
chr_list = args.chromosome.copy()
if (args.chromosome2 and args.chromosome2 != 'n') and (len(chr_list) != len(args.chromosome2)):
print("Error: the same number of chromosome1 and chromosome2 should be provided.")
return
elif type(args.chromosome2) == list:
chr_list2 = args.chromosome2.copy()
else:
chr_list2 = chr_list.copy()
CHRM_SIZE = False
if args.chrSize_file and (not chrSize_in_bp):
csz_file = args.chrSize_file
csz = pd.read_csv(csz_file, header=None, sep='\t')
chrSize_in_bp = {}
for i in range(csz.shape[0]):
chrSize_in_bp["chr" + str(csz.iloc[i, 0]).replace('chr', '')] = csz.iloc[i, 1]
first_chr_to_write = True
for i, (chromosome, chromosome2) in enumerate(zip(chr_list, chr_list2)):
if chrSize_in_bp:
CHRM_SIZE = chrSize_in_bp["chr" + str(chromosome).replace('chr', '')]
biasf = False
if args.biasfile:
if os.path.exists(args.biasfile):
biasf = args.biasfile
else:
print("Error: Couldn't find specified bias file")
return
o = regulator(f, args.norm_method, CHRM_SIZE, args.outdir,
bed=args.bed,
res=res,
sigma0=args.s_z,
s=args.s,
verbose=args.verbose,
pt=args.pt,
st=args.st,
distance_filter=distFilter,
nprocesses=args.nprocesses,
bias=biasf,
chromosome=chromosome,
chromosome2=chromosome2,
octaves=args.octaves)
if i == 0:
with open(args.outdir, 'w') as out_file:
out_file.write(
"BIN1_CHR\tBIN1_START\tBIN1_END\tBIN2_CHROMOSOME\tBIN2_START\tBIN2_END\tFDR\tDETECTION_SCALE\n")
if o == []:
print("{0} loops found for chrmosome={1}, fdr<{2} in {3}sec".format(len(o), chromosome, args.pt,
"%.2f" % (time.time() - start_time)))
start_time = time.time()
continue
# if first_chr_to_write:
# first_chr_to_write = False
print("{0} loops found for chrmosome={1}, fdr<{2} in {3}sec".format(len(o), chromosome, args.pt,
"%.2f" % (time.time() - start_time)))
with open(args.outdir, 'a') as out_file:
# out_file.write( "BIN1_CHR\tBIN1_START\tBIN1_END\tBIN2_CHROMOSOME\tBIN2_START\tBIN2_END\tFDR\tDETECTION_SCALE\n")
for significant in o:
out_file.write(
str(chromosome) + '\t' + str(significant[0] * res) + '\t' + str((significant[0] + 1) * res) + '\t' +
str(chromosome2) + '\t' + str(significant[1] * res) + '\t' + str(
(significant[1] + 1) * res) + '\t' + str(significant[2]) +
'\t' + str(significant[3]) + '\n')
# else:
# print("{0} loops found for chrmosome={1}, fdr<{2} in {3}sec".format(len(o),chromosome,args.pt,"%.2f" % (time.time()-old_time)))
# with open(args.outdir, 'a') as out_file:
# for significant in o:
# out_file.write(str(chromosome)+'\t' + str(significant[0]*res) + '\t' + str((significant[0]+1)*res) + '\t' +
# str(chromosome2) + '\t' + str(significant[1]*res) + '\t' + str((significant[1]+1)*res) + '\t' + str(significant[2]) +
# '\t' + str(significant[3]) + '\n')
start_time = time.time()
if __name__ == '__main__':
main()
| 36.312783 | 159 | 0.506978 | 5,151 | 40,053 | 3.830907 | 0.099786 | 0.024832 | 0.018852 | 0.017838 | 0.504738 | 0.473572 | 0.433943 | 0.410632 | 0.39401 | 0.372574 | 0 | 0.027234 | 0.358275 | 40,053 | 1,102 | 160 | 36.345735 | 0.740497 | 0.077173 | 0 | 0.425676 | 0 | 0.054054 | 0.077888 | 0.002537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021396 | false | 0.004505 | 0.023649 | 0.003378 | 0.099099 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb1db30cde8c29ada1218746bb3ea83316ec2bbc | 5,667 | py | Python | python-backend/app/api/documents/expected/resources/expected_document_uploads.py | jeznorth/mds | bf50e56206034a8bc5f1d30a609b33982c392894 | [
"Apache-2.0"
] | null | null | null | python-backend/app/api/documents/expected/resources/expected_document_uploads.py | jeznorth/mds | bf50e56206034a8bc5f1d30a609b33982c392894 | [
"Apache-2.0"
] | null | null | null | python-backend/app/api/documents/expected/resources/expected_document_uploads.py | jeznorth/mds | bf50e56206034a8bc5f1d30a609b33982c392894 | [
"Apache-2.0"
] | null | null | null | import decimal
import uuid
import requests
import json
from datetime import datetime
from flask import request, current_app
from flask_restplus import Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug import exceptions
from sqlalchemy.exc import DBAPIError
from ..models.mine_expected_document import MineExpectedDocument
from ....mines.mine.models.mine import Mine
from ...expected.models.mine_expected_document import MineExpectedDocument
from ...expected.models.mine_expected_document_xref import MineExpectedDocumentXref
from ...mines.models.mine_document import MineDocument
from app.extensions import api, db
from ....utils.access_decorators import requires_any_of, MINE_CREATE, MINESPACE_PROPONENT
from ....utils.resources_mixins import UserMixin, ErrorMixin
class ExpectedDocumentUploadResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
@api.expect(parser)
@api.doc(
params={
'expected_document_guid':
'Required: The guid of the expected document that this upload will be satisfying.'
})
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def post(self, expected_document_guid):
self.parser.add_argument('file', type=FileStorage, location='files', action='append')
self.parser.add_argument('mine_document_guid', type=str)
try:
data = self.parser.parse_args()
except exceptions.RequestEntityTooLarge:
return self.create_error_payload(
413,
f'The maximum file upload size is {current_app.config["MAX_CONTENT_LENGTH"]/1024/1024}MB please ensure all files are this size.'
), 413
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
if not expected_document:
return self.create_error_payload(400, f'expected document not found'), 400
mine = Mine.find_by_mine_guid(str(expected_document.mine_guid))
document_category = expected_document.required_document.req_document_category.req_document_category
if data.get('mine_document_guid'):
existing_mine_doc = MineDocument.find_by_mine_document_guid(
data.get('mine_document_guid'))
if not existing_mine_doc:
return self.create_error_payload(400, 'mine_document not found'), 400
expected_document.mine_documents.append(existing_mine_doc)
db.session.commit()
result = expected_document.json()
else: #expecting a new file
if not data['file']:
return self.create_error_payload(
400, 'expecting mine_document_guid or new file, neither found'), 400
if document_category:
folder = 'mines/' + str(mine.mine_guid) + '/' + str(document_category)
pretty_folder = 'mines/' + str(mine.mine_no) + '/' + str(document_category)
else:
folder = 'mines/' + str(mine.mine_guid) + '/documents'
pretty_folder = 'mines/' + str(mine.mine_no) + '/documents'
document_manager_URL = current_app.config['DOCUMENT_MANAGER_URL'] + '/document-manager'
files = []
for file in data['file']:
files.append(('file', (file.filename, file, file.mimetype)))
args = {'folder': folder, 'pretty_folder': pretty_folder}
headers = {'Authorization': request.headers.get('Authorization')}
response = requests.post(
url=document_manager_URL, data=args, files=files, headers=headers)
json_response = response.json()
errors = json_response['errors']
document_guids = json_response['document_manager_guids']
filenames = []
try:
for key, value in document_guids.items():
doc = MineDocument(
mine_guid=expected_document.mine_guid,
document_manager_guid=key,
document_name=value,
**self.get_create_update_dict())
expected_document.mine_documents.append(doc)
db.session.add(expected_document)
filenames.append(value)
db.session.commit()
except DBAPIError:
#log the error here and return a pretty error message
db.session.rollback()
return self.create_error_payload(500, 'An unexpected error occured')
result = {'status': 200, 'errors': errors, 'files': filenames}
return result
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def delete(self, expected_document_guid=None, mine_document_guid=None):
if expected_document_guid is None or mine_document_guid is None:
return self.create_error_payload(
400, 'Must provide a expected document guid and a mine document guid.'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
mine_document = MineDocument.find_by_mine_document_guid(mine_document_guid)
if expected_document is None or mine_document is None:
return self.create_error_payload(
400,
f'Failed to remove the document either the expected document or the mine document was not found.'
), 400
expected_document.mine_documents.remove(mine_document)
expected_document.save()
return {'status': 200, 'message': 'The document was removed succesfully.'}
| 42.609023 | 144 | 0.659608 | 636 | 5,667 | 5.633648 | 0.256289 | 0.116104 | 0.044655 | 0.041027 | 0.31175 | 0.242813 | 0.179179 | 0.088194 | 0.042981 | 0.042981 | 0 | 0.012619 | 0.258867 | 5,667 | 132 | 145 | 42.931818 | 0.840476 | 0.012705 | 0 | 0.135922 | 0 | 0.009709 | 0.146612 | 0.017522 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019417 | false | 0 | 0.174757 | 0 | 0.300971 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb1e2f46f4efe1a11a194470e636286b2007dd4c | 5,084 | py | Python | leonhard/leonhard.py | migzpogi/leonhard | 0ddc99d2be4b0039c7ca944794450d41a30e6390 | [
"MIT"
] | 2 | 2020-07-27T02:36:55.000Z | 2021-05-13T12:39:07.000Z | leonhard/leonhard.py | migzpogi/leonhard | 0ddc99d2be4b0039c7ca944794450d41a30e6390 | [
"MIT"
] | null | null | null | leonhard/leonhard.py | migzpogi/leonhard | 0ddc99d2be4b0039c7ca944794450d41a30e6390 | [
"MIT"
] | null | null | null | from collections import deque
from functools import reduce
from leonhard.helpers import raise_if_not_positive_int
def get_factors_of_positive_integer(n):
"""
Gets the factors of a positive integer n
:param int n: number to get the factors of
:return [int]: a unique list of factors of n arranged in ascending order
"""
try:
if n == 0:
return [0]
midpoint = int(n**0.5)
list_of_factor_pairs = ([i, n//i] for i in range(1, midpoint+1) if n % i == 0)
factors = sorted(list(set(reduce((lambda a, b: a+b), list_of_factor_pairs))))
return factors
except TypeError:
raise TypeError("Input must be >= 0.")
def generate_fibonacci_sequence(number_of_terms=2, first_term=0, second_term=1):
"""
Generates a Fibonacci sequence
:param int number_of_terms: the number of terms to be generated including the first and second
:param int first_term: first number in the sequence, must be >= 0
:param int second_term: second number in the sequence, must be >= first_term
:return [int]: Fibonacci sequence
"""
try:
if number_of_terms < 2:
raise ValueError("Number of terms must be >= 2")
if first_term < 0:
raise ValueError("First term must be >= 0")
if second_term < first_term:
raise ValueError("Second term must be >= first term")
sequence = [first_term, second_term]
while len(sequence) != number_of_terms:
next_number = sequence[-1] + sequence[-2]
sequence.append(next_number)
return sequence
except TypeError:
raise TypeError("Input parameters must be positive integers")
def is_prime(n):
"""
Checks if the given number n is prime or not
:param int n: positive number to be checked
:return bool: returns true if prime, else false
"""
try:
if n < 0:
raise ValueError("Input must be >= 0")
if n == 0 or n == 1:
return False
factors = get_factors_of_positive_integer(n)
if len(factors) > 2:
return False
else:
return True
except TypeError:
raise TypeError("Input must be positive integers")
def is_pythagorean_triplet(a, b, c):
"""
A Pythagorean triplet is a set of three natural numbers a < b < c for which:
a^2 + b^2 = c^2
Example: 3,4,5
3^2 + 4^2 = 5^2
9 + 16 = 25
:param int a: first number
:param int b: second number
:param int c: third number
:return bool: returns True if a,b, and c are triplets
"""
try:
if a < b < c:
if (a**2 + b**2) == c**2:
return True
else:
return False
else:
return False
except TypeError:
raise TypeError("Input must be positive integers")
def count_digits(n):
"""
Returns the number of digits of a given positive integer
:param int n: number to count the digits of
:return int: number of digits
"""
raise_if_not_positive_int(n)
return len(str(n))
def generate_collatz_sequence(n, sequence):
"""
A sequence defined by:
n -> n/2 (n is even)
n -> 3n + 1 (n is odd)
:param int n: term
:param list sequence: list that will contain the collatz sequence
:return:
"""
raise_if_not_positive_int(n)
if n == 0:
raise ValueError("N must be a positive integer")
sequence.append(n)
get_next = lambda x: int(x / 2) if x % 2 == 0 else (3 * x) + 1
next_number = get_next(n)
if next_number != 1:
generate_collatz_sequence(next_number, sequence)
else:
sequence.append(1)
return sequence
def generate_cyclic_permutation(n):
"""
A permutation which shifts all elements of a set by a fixed offset, with the elements shifted off the end inserted
back at the beginning
Given 123, its cyclic permutations are: 123, 312, and 231
:param int n: number to get the permutations
:return [int]: list of cyclic permutations of n
"""
raise_if_not_positive_int(n)
deque_object = deque([char for char in str(n)])
cyclic_permutations = []
for i in range(len(deque_object)):
cyclic_permutations.append(list(deque_object))
deque_object.rotate(1)
return [int("".join(p)) for p in cyclic_permutations]
def is_triangle_number(n):
"""
A triangular number or triangle number counts objects arranged in an equilateral triangle.
0, 1, 3, 6, 10, 15, 21 ... 210, 231, etc.
Explanation of ((8 * n) + 1) ** 0.5) % 1 == 0
* An integer x is triangular if and only if 8x + 1 is a square
* We get the square root by using `** 0.5`
* To check if it is a whole number, we use `% 1`
:param int n: number to check if it is triangular or not
:return bool: True if triangular, else False
"""
raise_if_not_positive_int(n)
if (((8 * n) + 1) ** 0.5) % 1 == 0:
return True
else:
return False
| 28.723164 | 118 | 0.61192 | 750 | 5,084 | 4.041333 | 0.212 | 0.031673 | 0.017816 | 0.029693 | 0.231607 | 0.150775 | 0.08776 | 0.038931 | 0.038931 | 0.038931 | 0 | 0.028027 | 0.29819 | 5,084 | 176 | 119 | 28.886364 | 0.821469 | 0.380606 | 0 | 0.382716 | 0 | 0 | 0.087483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0 | 0.037037 | 0 | 0.308642 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb206f79923040853552b02951db676dfdf0b6c4 | 12,818 | py | Python | python/cogs/spam_blocker.py | stilte/felix | 99d9c42d3b91233f07e2db1b35b876c8e4b485e6 | [
"MIT"
] | null | null | null | python/cogs/spam_blocker.py | stilte/felix | 99d9c42d3b91233f07e2db1b35b876c8e4b485e6 | [
"MIT"
] | null | null | null | python/cogs/spam_blocker.py | stilte/felix | 99d9c42d3b91233f07e2db1b35b876c8e4b485e6 | [
"MIT"
] | null | null | null | """This is a cog for a discord.py bot.
It will provide commands to jail users for sending spam phishing links.
Commands:
spam Commands to add/update/remove to a spam list
├ add add spam link to automatically jails a user if posted
├ create create spam database
├ drop drop spam database
├ list show list of all spam links
├ remove remove a spam link that automatically jails a user if posted
├ update update/edit existing spam rule by rule id
└ who Show who created the rule
spammer
└ list last 10 spam rule breakers in desc order
Only users that have an admin role can use the commands.
"""
import re
import json
from db.config import engine, Base, async_session
from db.models.dals import SpamDAL, SpammerDAL
from discord.ext import commands, tasks
from discord import DMChannel, Embed, NotFound
class SpamBlocker(commands.Cog, name='Spam'):
def __init__(self, client):
self.client = client
self.jail_roles = self.client.config['jail_roles']
self.REPORT_CHANNEL_ID = self.client.config['report_channel']
self.JAIL_CHANNEL_ID = self.client.config['jail_channel']
self.REPORT_ROLE = self.client.config['report_role']
self.TEAM_ROLE = self.client.config['team_role']
self.spam_dict = None
# init database and tables
self.init_database.start()
self.construct_spam_dict.start()
@tasks.loop(count=1)
async def init_database(self):
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
@tasks.loop(count=1, reconnect=True)
async def construct_spam_dict(self):
async with async_session() as db:
async with db.begin():
scd = SpamDAL(db)
rows = await scd.get_all_spam()
self.spam_dict = {rule.regex:re.compile(rule.regex, re.I) for rule in rows}
async def cog_check(self, ctx):
return self.client.user_is_admin(ctx.author)
# ----------------------------------------------
# Helper Functions
# ----------------------------------------------
def reload_spam_dict(self):
self.construct_spam_dict.stop()
self.construct_spam_dict.start()
def load_state(self):
with open("../state.json", "r") as statefile:
return json.load(statefile)
def load_perma_jail(self):
state = self.load_state()
return state.get('jailed', [])
def save_perma_jail(self, perma_jail):
state = self.load_state()
state['jailed'] = perma_jail
with open("../state.json", "w") as statefile:
return json.dump(state, statefile, indent=1)
async def send_to_jail(self, member, reason=None, permanent=True):
"""Jail a user
Arguments:
member {discord.Member} -- The Member to jail
Keyword Arguments:
reason {string} -- The Reason that will show in the
Audit Log (default: {None})
permanent {bool} -- Add the users id to the
state.json (default: {True})
Returns:
str -- Status message
"""
status = f'{member} successfully jailed'
get_role = member.guild.get_role
jail_roles = [get_role(x) for x in self.jail_roles if get_role(x)]
try:
await member.add_roles(*jail_roles, reason=reason)
except NotFound:
status = f'{member} not in guild'
if permanent:
perma_jail = self.load_perma_jail()
if member.id not in perma_jail:
perma_jail.append(member.id)
self.save_perma_jail(perma_jail)
else:
status = f'{member} is already jailed'
return status
async def post_spam_report(self, msg, matched_line):
"""Post spam report of auto jailing to report channel"""
target = self.client.get_channel(self.REPORT_CHANNEL_ID)
embed = Embed(
title='Phishing Link Detected!',
description=f'{msg.content}\nRule: `{matched_line}`',
color=0xFFFFFF
)
await target.send(
f'<@&{self.REPORT_ROLE}> I jailed a user\n'
f'User {msg.author.mention} spammed in {msg.channel.mention}',
embed=embed
)
return True
# ----------------------------------------------
# Cog Event listeners
# ----------------------------------------------
@commands.Cog.listener()
async def on_message(self, msg):
member = msg.author
if msg.author.bot:
# Dont run on any bot messages
return
if isinstance(msg.channel, DMChannel):
# Ignore DM
return
if self.client.user_is_admin(member):
# Dont jail friends on after adding a new spam link
return
if self.spam_dict and msg.channel.id != self.JAIL_CHANNEL_ID:
for regex_string, regex in self.spam_dict.items():
if regex.findall(msg.content):
await self.send_to_jail(member, reason='Sent illegal spam')
await self.post_spam_report(msg, regex_string)
async with async_session() as db:
async with db.begin():
scd = SpammerDAL(db)
await scd.add_spammer(member=member.id, regex=regex_string)
await msg.delete()
break
# ----------------------------------------------
# Spam Cog Commands
# ----------------------------------------------
@commands.group(
pass_context=True,
name='spam',
hidden=True,
invoke_without_command=True,
)
async def spam(self, ctx):
"Commands to add/remove to spam list"
await ctx.send_help('spam')
@spam.command(
name='reset'
)
async def rebuild_spam_db(self, ctx):
"""WARNING!!! this will drop all tables and recreate them"""
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
self.spam_dict = None
await ctx.send(f'```✅ Spam Database reinitialized!```')
@spam.command(
name='add',
aliases=['new']
)
async def add_spam(self, ctx, *args):
"""Add a spam link to automatically jail a user if posted"""
regex = ' '.join((x for x in args))
member = ctx.message.author
async with async_session() as db:
async with db.begin():
scd = SpamDAL(db)
# check rule not already in database before adding.
check_dupe = await scd.check_duplicate(regex)
if check_dupe:
await ctx.send(f'```❌ Sorry {member.name}, {regex} is already in spam database!```')
return
# commit new spam rule and return updated rule set
rows = await scd.add_spam(member.id, regex)
self.spam_dict = {rule.regex:re.compile(rule.regex, re.I) for rule in rows}
embed = Embed(
color=0x13DC51,
title=f'New Phishing Rule Added',
description=f'```✅ {regex}```',
)
embed.set_footer(
text=member.name,
icon_url=member.display_avatar
)
await ctx.send(embed=embed)
@spam.command(
name='remove',
aliases=['rm']
)
async def remove_spam_item(self, ctx, _id:int):
"""Remove an item from spam list by its ID"""
member = ctx.message.author
async with async_session() as db:
async with db.begin():
scd = SpamDAL(db)
row = await scd.spam_by_id(_id)
if not row:
await ctx.send(f'```❌ Sorry {member.name}, cannot remove Rule {_id} it does not exist!```')
return
await scd.delete_spam(_id)
# reload spam dict on item removal
self.reload_spam_dict()
embed = Embed(
color=0xA0F1B9,
title=f'Rule {row.id} | Removed By {member.name}',
description=f'```❌ {row.regex}```'
)
embed.set_footer(
text=member.name,
icon_url=member.display_avatar
)
await ctx.send(embed=embed)
@spam.command(
name='update',
aliases=['mv'],
)
async def update_regex_rule(self, ctx, _id, *args):
"""Update an existing spam rule by rule ID"""
regex = ' '.join((x for x in args))
member = ctx.message.author
async with async_session() as db:
async with db.begin():
scd = SpamDAL(db)
await scd.update_spam_rule(_id, member.id, regex)
# reload spam dict on change
self.reload_spam_dict()
embed = Embed(
color=0xA0F1B9,
title=f'Rule {_id} | Updated By {member.name}',
description=f'```✅ {regex}```'
)
embed.set_footer(
text=member.name,
icon_url=member.display_avatar
)
await ctx.send(embed=embed)
@spam.command(
name='list',
aliases=['ls']
)
async def current_spam_list(self, ctx):
"""Lists all current items in the spam database"""
async with async_session() as db:
async with db.begin():
scd = SpamDAL(db)
res = await scd.get_all_spam()
NUM_SPAM = 25
NUM_LEN = 25
all_spam = [f' {row.id} | {row.regex}' if row.id < 10 else f' {row.id} | {row.regex}' for row in res]
response = []
for _ in range(len(all_spam)):
response.append('\n'.join(all_spam[NUM_SPAM - NUM_LEN:NUM_SPAM]))
NUM_SPAM += NUM_LEN
for block in response:
await ctx.send(f'```{"".join(block)}```') if len(block) > 0 else None
@spam.command(
name='who',
aliases=['w']
)
async def spam_added_by(self, ctx, _id: str):
"""Show who added spam link by ID"""
member = ctx.message.author
async with async_session() as db:
async with db.begin():
scd = SpamDAL(db)
row = await scd.spam_by_id(_id)
if not row:
await ctx.send(f'```❌ Sorry {member.name}, Rule: {_id} does not exist!```')
return
user = await self.client.fetch_user(row.member)
embed = Embed(
color=0x59E685,
title=f'Rule {row.id} | Created By {user.name}',
description=f'```Rule: {row.regex}```',
)
embed.set_footer(
text=user.name,
icon_url=user.display_avatar
)
await ctx.send(embed=embed)
# ----------------------------------------------
# Spammer Cog Commands
# ----------------------------------------------
@commands.group(
pass_context=True,
name='spammer',
hidden=True,
invoke_without_command=True,
)
async def spammer(self, ctx):
"Commands to view spam rule breakers"
await ctx.send_help('spammer')
@spammer.command(
name='List',
aliases=['ls']
)
async def list_rule_breakers(self, ctx):
"""Last 10 Rule Breakers and Rule Desc order"""
async with async_session() as db:
async with db.begin():
scd = SpammerDAL(db)
rows = await scd.get_all_spammers()
NUM_SPAM = 10
NUM_LEN = 10
all_spammers = [f' {row.id} | {await self.client.fetch_user(row.member)} | {row.regex}' for row in rows]
response = []
for _ in range(len(all_spammers)):
response.append('\n'.join(all_spammers[NUM_SPAM - NUM_LEN:NUM_SPAM]))
NUM_SPAM += NUM_LEN
for block in response:
await ctx.send(f'```{"".join(block)}```') if len(block) > 0 else None
# ----------------------------------------------
# Cog Tasks
# ----------------------------------------------
def setup(client):
"""This is called when the cog is loaded via load_extension"""
client.add_cog(SpamBlocker(client))
| 35.311295 | 120 | 0.525433 | 1,507 | 12,818 | 4.353019 | 0.175182 | 0.024695 | 0.021951 | 0.02561 | 0.372256 | 0.323171 | 0.298628 | 0.265396 | 0.228049 | 0.228049 | 0 | 0.004619 | 0.341239 | 12,818 | 362 | 121 | 35.40884 | 0.770488 | 0.12147 | 0 | 0.388235 | 0 | 0.003922 | 0.104671 | 0.011726 | 0 | 0 | 0.003845 | 0 | 0 | 1 | 0.023529 | false | 0.007843 | 0.023529 | 0 | 0.098039 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb22f63a11406a88783cd2680cfa3def119f6475 | 4,277 | py | Python | server.py | mthpower/async_tcp | 3c03a89594b897223a4111000fc092022de882b9 | [
"MIT"
] | null | null | null | server.py | mthpower/async_tcp | 3c03a89594b897223a4111000fc092022de882b9 | [
"MIT"
] | null | null | null | server.py | mthpower/async_tcp | 3c03a89594b897223a4111000fc092022de882b9 | [
"MIT"
] | null | null | null | # Echo server program
from select import select
import socket
HOST = ''
PORT = 50007
# Sockets are not meant to be inherited, so we have to key into this by
# file descriptor number to be able to get retrieve the BufferedSocket.
SOCKETS = {}
class BufferedSocket(object):
"""Wrapper around a socket to hold a "buffer"."""
buffer = b''
closed = False
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
self.fileno = sock.fileno()
@property
def readable(self):
read, _, __ = select([self.sock], [], [], 0)
return bool(read)
@property
def writeable(self):
_, write, __ = select([], [self.sock], [], 0)
return bool(write)
@property
def has_error(self):
_, __, error = select([], [], [self.sock], 0)
return bool(error)
def receive(self):
# Read until we would block
while self.readable:
chunk = self.sock.recv(4096)
# Check to see if the socket has been closed.
if chunk == b'':
self.closed = True
break
self.buffer += chunk
def echo(self):
# Write until we would block and
# as long as we can see a newline, keep sending.
while self.writeable and b'\n' in self.buffer:
position = self.buffer.find(b'\n')
msg = self.buffer[:position + 1]
sent = self.send(msg)
# Check to see if the client has disconnected
if sent == 0:
self.closed = True
break
self.buffer = self.buffer[sent:]
def send(self, msg):
sent = self.sock.send(msg)
return sent
def close(self):
print('Disconnected: ', self.addr)
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.closed = True
def accept(socket):
"""Accept a new connection."""
conn, addr = socket.accept()
print('Connected: ', addr)
return BufferedSocket(sock=conn, addr=addr)
def read_loop(sockets):
"""
Loop over readable sockets and see if we
can do any work on them in turn.
"""
for sock in sockets:
buff_sock = SOCKETS[sock.fileno()]
buff_sock.receive()
def write_loop(sockets):
"""
Loop over writeable sockets and see if we
can do any work on them in turn.
"""
for sock in sockets:
buff_sock = SOCKETS[sock.fileno()]
buff_sock.echo()
def closed():
"""Handle closed sockets and remove them from the loop."""
closed = [sock for sock in SOCKETS.values() if sock.closed]
for sock in closed:
sock.close()
SOCKETS.pop(sock.fileno)
def create_server_socket():
"""Special case to create our server socket and have it listen."""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen(10)
return server
def main_loop(server):
server_fileno = server.fileno()
while True:
# Grab all the sockets we know about
read = write = [s.sock for s in SOCKETS.values()]
# Add our server socket to the read list
read += [server]
# Block here until *something* is ready to read from
readable, _, __ = select(read, [], [])
# First see if we can accept a new connection
readable_descriptors = [sock.fileno() for sock in readable]
# Is our server in the readable sockets list?
if server_fileno in readable_descriptors:
# accept and add the new connection to our sockets loop
new_sock = accept(server)
SOCKETS.update({new_sock.fileno: new_sock})
# Regardless of whether we accepted a new connection,
# poll (timeout=0) to see what we can work on.
read = write = [s.sock for s in SOCKETS.values()]
readable, writeable, _ = select(read, write, [], 0)
read_loop(readable)
write_loop(writeable)
closed()
if __name__ == '__main__':
server = create_server_socket()
try:
main_loop(server)
except KeyboardInterrupt:
server.shutdown(socket.SHUT_RDWR)
server.close()
for sock in SOCKETS.values():
sock.close()
| 26.899371 | 71 | 0.593173 | 549 | 4,277 | 4.528233 | 0.273224 | 0.028962 | 0.021722 | 0.025744 | 0.18222 | 0.152454 | 0.098954 | 0.098954 | 0.098954 | 0.072405 | 0 | 0.006063 | 0.305822 | 4,277 | 158 | 72 | 27.06962 | 0.831256 | 0.245032 | 0 | 0.170213 | 0 | 0 | 0.011727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148936 | false | 0 | 0.021277 | 0 | 0.265957 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb23a67053c4b451023f962fe312c1c1d12f26ec | 1,345 | py | Python | models/minusz/make_minusz.py | jzuhone/xija | 1e423d0c48056cc4ea9e4993d28e34794c1420fa | [
"BSD-3-Clause"
] | 2 | 2016-01-05T19:20:43.000Z | 2021-06-04T08:23:08.000Z | models/minusz/make_minusz.py | jzuhone/xija | 1e423d0c48056cc4ea9e4993d28e34794c1420fa | [
"BSD-3-Clause"
] | 61 | 2015-02-24T02:27:11.000Z | 2022-03-23T13:52:15.000Z | models/minusz/make_minusz.py | jzuhone/xija | 1e423d0c48056cc4ea9e4993d28e34794c1420fa | [
"BSD-3-Clause"
] | 1 | 2016-01-04T21:08:17.000Z | 2016-01-04T21:08:17.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Replicate (mostly) the minusz TEPHIN node model. (dPs set to zero though).
env PYTHONPATH=$PWD python minusz/minusz.py
"""
import xija
import json
P_pitches = [45, 60, 90, 120, 145, 170]
P_pitches2 = [45, 60, 90, 120, 145, 171]
minusz = json.load(open('/proj/sot/ska/share/nmass/minusz/pars_minusz.json'))
sigmas = {'tephin': -10}
mdl = xija.ThermalModel(name='minusz', start='2010:001', stop='2010:002')
nodes = {}
pitch = mdl.add(xija.Pitch)
eclipse = mdl.add(xija.Eclipse)
for msid in minusz:
pars = minusz[msid]
Ps = [pars['pf_{0:03d}'.format(p)] for p in P_pitches]
nodes[msid] = mdl.add(xija.Node, msid, sigma=sigmas.get(msid, -20))
mdl.add(xija.SolarHeat, msid, pitch, eclipse, P_pitches2, Ps,
ampl=pars['p_ampl'])
mdl.add(xija.HeatSink, msid, T=pars['T_e'], tau=pars['tau_ext'])
for msid in minusz:
pars = minusz[msid]
coupled_nodes = [x for x in pars if x.startswith('tau_t')]
for parname in coupled_nodes:
mdl.add(xija.Coupling, msid, node2=parname[4:], tau=pars[parname])
#mdl.make_pars()
#mdl.make_mvals()
#mdl.make_tmal()
mdl.make()
mdl.write('minusz/minusz2.json')
# 128 ms for 180 days prediction (250 ms/year)
# Matches results from fit_nmass qualitatively well (visually compared
# residual plots).
| 30.568182 | 77 | 0.685502 | 217 | 1,345 | 4.175115 | 0.506912 | 0.039735 | 0.066225 | 0.019868 | 0.090508 | 0.064018 | 0.064018 | 0 | 0 | 0 | 0 | 0.058252 | 0.157621 | 1,345 | 43 | 78 | 31.27907 | 0.741395 | 0.267658 | 0 | 0.166667 | 0 | 0 | 0.130793 | 0.050463 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb2465c7dc2d1b6325693cd2fe4e70a7cdb9685b | 10,049 | py | Python | feediverse.py | edsu/feediverse | f3daed0bfb8a9377a9909e8b19d26090a2af2e25 | [
"MIT"
] | 35 | 2018-06-15T06:19:20.000Z | 2022-03-17T11:13:13.000Z | feediverse.py | edsu/feediverse | f3daed0bfb8a9377a9909e8b19d26090a2af2e25 | [
"MIT"
] | 23 | 2019-03-08T08:22:35.000Z | 2021-11-27T20:26:10.000Z | feediverse.py | edsu/feediverse | f3daed0bfb8a9377a9909e8b19d26090a2af2e25 | [
"MIT"
] | 9 | 2019-01-31T02:17:28.000Z | 2021-11-01T14:07:23.000Z | #!/usr/bin/env python3
import os
import re
import sys
import yaml
import codecs
import argparse
import urllib3
import dateutil
import feedparser
from bs4 import BeautifulSoup
from mastodon import Mastodon
from datetime import datetime, timezone, MINYEAR
DEFAULT_CONFIG_FILE = os.path.join("~", ".feediverse")
MAX_IMAGES = 4 # Mastodon allows attaching 4 images max.
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',)
# encoding error-handler for buggy wordpress urls
def __urlencodereplace_errors(exc):
bs = exc.object[exc.start:exc.end].encode("utf-8")
bs = b"".join(b'%%%X' % b for b in bs)
return (bs, exc.end)
codecs.register_error("urlencodereplace", __urlencodereplace_errors)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--dry-run", action="store_true",
help=("perform a trial run with no changes made: "
"don't toot, don't save config"))
parser.add_argument("-v", "--verbose", action="store_true",
help="be verbose")
parser.add_argument("-c", "--config",
help="config file to use",
default=os.path.expanduser(DEFAULT_CONFIG_FILE))
args = parser.parse_args()
config_file = args.config
if args.verbose:
print("using config file", config_file)
if not os.path.isfile(config_file):
setup(config_file)
config = read_config(config_file)
masto = Mastodon(
api_base_url=config['url'],
client_id=config['client_id'],
client_secret=config['client_secret'],
access_token=config['access_token']
)
newest_post = config['updated']
for feed in config['feeds']:
if args.verbose:
print(f"fetching {feed['url']} entries since {config['updated']}")
for entry in get_feed(feed['url'], config['updated'],
config['include_images'],
generator=feed.get('generator')):
newest_post = max(newest_post, entry['updated'])
if args.verbose:
try:
print(entry)
except UnicodeEncodeError:
# work-around for non-unicode terminals
print(dict(
(k, v.encode("utf-8") if hasattr(v, "encode") else v)
for k, v in entry.items()))
if args.dry_run:
print("trial run, not tooting ", entry["title"][:50])
continue
media_ids = []
for img in entry.get("images", []):
media = masto.media_post(img, img.headers['content-type'])
img.release_conn() # deferred from collect_images()
if not 'error' in media:
media_ids.append(media)
entry.pop("images", None)
masto.status_post(feed['template'].format(**entry)[:499],
media_ids=media_ids)
config['updated'] = newest_post.isoformat()
if args.dry_run:
print("trial run, not saving the config")
else:
if args.verbose:
print("saving the config", config_file)
save_config(config, config_file)
def save_config(config, config_file):
copy = dict(config)
with open(config_file, 'w') as fh:
fh.write(yaml.dump(copy, default_flow_style=False))
def read_config(config_file):
config = {
'updated': datetime(MINYEAR, 1, 1, 0, 0, 0, 0, timezone.utc),
'include_images': False,
}
with open(config_file) as fh:
cfg = yaml.load(fh, yaml.SafeLoader)
if 'updated' in cfg:
cfg['updated'] = dateutil.parser.parse(cfg['updated'])
config.update(cfg)
return config
def detect_generator(feed):
# For RSS the generator tag holds the URL, while for ATOM it holds the name
generator = feed.feed.get("generator", "")
if "/wordpress.org/" in generator:
return "wordpress"
elif "wordpress" == generator.lower():
return "wordpress"
return None
def get_feed(feed_url, last_update, include_images, generator=None):
new_entries = 0
feed = feedparser.parse(feed_url)
if last_update:
entries = [e for e in feed.entries
if dateutil.parser.parse(e['updated']) > last_update]
else:
entries = feed.entries
entries.sort(key=lambda e: e.updated_parsed)
generator = generator or detect_generator(feed)
for entry in entries:
new_entries += 1
yield get_entry(entry, include_images, generator)
return new_entries
def collect_images(entry, generator=None):
def find_urls(part):
if not part:
return
soup = BeautifulSoup(part, 'html.parser')
for tag in soup.find_all(["a", "img"]):
if tag.name == "a":
url = tag.get("href")
elif tag.name == "img":
url = tag.get("src")
if url and url not in urls:
urls.append(url)
urls = []
find_urls(entry.get("summary", ""))
for c in entry.get("content", []):
find_urls(c.value)
for e in (entry.enclosures
+ [l for l in entry.links if l.get("rel") == "enclosure"]):
if (e["type"].startswith(("image/", "video/")) and
e["href"] not in urls):
urls.append(e["href"])
if generator == "wordpress":
urls = (u for u in urls if not "/wp-content/plugins/" in u)
# Work around a wordpress bug: If the filename contains an
# umlaut, this will not be encoded using %-escape, as the
# standard demands. This will break encoding in http.request()
urls = (u.encode("ascii", "urlencodereplace").decode()
for u in urls)
images = []
for url in urls:
try:
resp = http.request('GET', url, preload_content=False)
if resp.headers.get('content-type', '').startswith(("image/", "video/")):
images.append(resp)
# IMPORTANT: Need to release_conn() later!
if len(images) >= MAX_IMAGES:
break
else:
resp.release_conn()
except urllib3.exceptions.HTTPError:
# ignore http errors, maybe they should be logged?
pass
return images
def get_entry(entry, include_images, generator=None):
def cleanup(text):
html = BeautifulSoup(text, 'html.parser')
# Remove all elements of class read-more or read-more-*
for more in html.find_all(None, re.compile("^read-more($|-.*)")):
more.extract()
text = html.get_text()
text = re.sub('\xa0+', ' ', text)
text = re.sub(' +', ' ', text)
text = re.sub(' +\n', '\n', text)
text = re.sub('\n\n\n+', '\n\n', text, flags=re.M)
return text.strip()
hashtags = []
for tag in entry.get('tags', []):
t = tag['term'].replace(' ', '_').replace('.', '').replace('-', '')
hashtags.append('#{}'.format(t))
summary = entry.get('summary', '')
content = entry.get('content', '') or ''
if content:
content = cleanup(content[0].get('value', ''))
url = entry.id
if generator == "wordpress":
links = [l for l in entry.links if l.get("rel") == "alternate"]
if len(links) > 1:
links = [l for l in entry.links if l.get("type") == "text/html"]
if links:
url = links[0]["href"]
t = tag['term'].replace(' ', '_').replace('.', '').replace('-', '')
hashtags.append('#{}'.format(t))
return {
'url': url,
'link': entry.link,
'title': cleanup(entry.title),
'summary': cleanup(summary),
'content': content,
'hashtags': ' '.join(hashtags),
'updated': dateutil.parser.parse(entry['updated']),
'images': collect_images(entry, generator) if include_images else [],
'__generator__': generator,
}
def setup(config_file):
def yes_no(question):
res = input(question + ' [y/n] ')
return res.lower() in "y1"
url = input('What is your Mastodon Instance URL? ')
have_app = yes_no('Do you have your app credentials already?')
if have_app:
name = 'feediverse'
client_id = input('What is your app\'s client id: ')
client_secret = input('What is your client secret: ')
access_token = input('access_token: ')
else:
print("Ok, I'll need a few things in order to get your access token")
name = input('app name (e.g. feediverse): ')
client_id, client_secret = Mastodon.create_app(
api_base_url=url,
client_name=name,
#scopes=['read', 'write'],
website='https://github.com/edsu/feediverse'
)
username = input('mastodon username (email): ')
password = input('mastodon password (not stored): ')
m = Mastodon(client_id=client_id, client_secret=client_secret, api_base_url=url)
access_token = m.log_in(username, password)
feed_url = input('RSS/Atom feed URL to watch: ')
old_posts = yes_no('Shall already existing entries be tooted, too?')
include_images = yes_no('Shall images be included in the toot?')
config = {
'name': name,
'url': url,
'client_id': client_id,
'client_secret': client_secret,
'access_token': access_token,
'include_images': include_images,
'feeds': [
{'url': feed_url, 'template': '{title} {url}'}
]
}
if not old_posts:
config['updated'] = datetime.now(tz=timezone.utc).isoformat()
save_config(config, config_file)
print("")
print("Your feediverse configuration has been saved to {}".format(config_file))
print("Add a line line this to your crontab to check every 15 minutes:")
print("*/15 * * * * /usr/local/bin/feediverse")
print("")
if __name__ == "__main__":
main()
| 35.889286 | 88 | 0.574883 | 1,228 | 10,049 | 4.584691 | 0.262215 | 0.031972 | 0.017407 | 0.017762 | 0.095382 | 0.074778 | 0.057016 | 0.057016 | 0.03286 | 0.03286 | 0 | 0.004342 | 0.289581 | 10,049 | 279 | 89 | 36.017921 | 0.784284 | 0.05921 | 0 | 0.118143 | 0 | 0 | 0.174981 | 0.002648 | 0 | 0 | 0 | 0.003584 | 0 | 1 | 0.050633 | false | 0.012658 | 0.050633 | 0 | 0.147679 | 0.054852 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb267354be603b27ee9c3740f32a386b1ade0dc7 | 8,710 | py | Python | khbr/KH2/SpawnManager.py | thundrio-kh/khbr | d8cdbe3f402ef5b0eb11b283573af3dd37a248dc | [
"MIT"
] | 2 | 2021-04-01T05:10:02.000Z | 2021-04-16T17:10:02.000Z | khbr/KH2/SpawnManager.py | thundrio-kh/khbr | d8cdbe3f402ef5b0eb11b283573af3dd37a248dc | [
"MIT"
] | 1 | 2022-03-20T19:13:54.000Z | 2022-03-20T19:13:54.000Z | khbr/KH2/SpawnManager.py | thundrio-kh/khbr | d8cdbe3f402ef5b0eb11b283573af3dd37a248dc | [
"MIT"
] | 1 | 2022-03-02T05:25:45.000Z | 2022-03-02T05:25:45.000Z |
import os
import yaml
from khbr.randutils import pick_boss_to_replace, pick_enemy_to_replace
from khbr._config import KH2_DIR
import random
class SpawnManager:
def __init__(self):
self.boss_enemies = [] # Gets filled as a cache
self.spawns = None
self.roommodedits = {
"ax2_99": self.ax2_99,
"ax2_40": self.ax2_40,
"ax2_50": self.ax2_50,
"stormrider_61": self.stormrider_61
}
# TODO
# I don't think this is needed
# def set_spawns(self):
# if not self.spawns:
# self.spawns = self.get_locations()
def modify_spawn(self, editname, spawnpoint):
if editname not in self.roommodedits:
print("Warning: could not find {} method for editing spawn, leaving unmodified".format(editname))
return
self.roommodedits(spawnpoint)
def ax2_99(self, spawnpoint):
# set the characters Y values and X values properly
sora = spawnpoint[0]["Entities"][0]
sora["PositionY"] = 14940
# track for later
bossz = float(sora["PositionZ"])
sora["PositionZ"] = float(spawnpoint[0]["Entities"][2]["PositionZ"])
riku = spawnpoint[0]["Entities"][1]
riku["PositionY"] = 14940
riku["PositionZ"] = float(spawnpoint[0]["Entities"][2]["PositionZ"])
boss = spawnpoint[0]["Entities"][2]
boss["PositionY"] = 14940
boss["PositionZ"] = bossz
def ax2_40(self, spawnpoint):
# remove the buildings
for spid in spawnpoint:
spid["Entities"] = []
def ax2_50(self, spawnpoint):
# remove the dragon
spawnpoint[0]["Entities"] = []
def stormrider_61(self, spawnpoint):
# move enemies height to the bottom
sora = spawnpoint[0]["Entities"][0]
sora["PositionY"] = 0
donald = spawnpoint[0]["Entities"][1]
donald["PositionY"] = 0
goofy = spawnpoint[0]["Entities"][2]
goofy["PositionY"] = 0
boss = spawnpoint[0]["Entities"][3]
boss["PositionY"] = 0
def apply_room_mods(self, basespawns, ardname):
roommods = {}
if "roommodedits" in basespawns:
for rm in basespawns["roommodedits"]:
existing_rm = self.getSpawnpoint(ardname, rm)
self.roommodedits[basespawns["roommodedits"][rm]](existing_rm)
roommods[rm] = existing_rm
return roommods
@staticmethod
def subtract_spawns(original_yaml, entities_to_remove):
# TODO might need another pass
if entities_to_remove:
for sp_instance in original_yaml:
toremove = []
for e in range(len(sp_instance["Entities"])):
ent = sp_instance["Entities"][e]
for etr in entities_to_remove:
if ent["ObjectId"] == etr["ObjectId"]:
if "Argument1" in etr and etr["Argument1"] != ent["Argument1"]:
continue
if "Argument2" in etr and etr["Argument2"] != ent["Argument2"]:
continue
toremove.append(e)
for e in sorted(list(set(toremove)))[::-1]:
sp_instance["Entities"].pop(e)
@staticmethod
def add_new_object(original_spawns, new_spawn_descriptor):
# adding new entity to list, defaulting all values to the first entity in the list
new_ent = dict(original_spawns["Entities"][0])
# TODO Make a unique serial for the spawnpoint?? Maybe 6xx
for attr in new_spawn_descriptor:
if attr.startswith("mod"):
baseattr = attr[3:]
new_ent[baseattr] = new_ent[baseattr] + new_spawn_descriptor[attr]
elif attr in new_ent:
new_ent[attr] = new_spawn_descriptor[attr]
# put the new entity in the existing sp_instance
original_spawns["Entities"].append(new_ent)
# set the ent index to the proper value
new_spawn_descriptor["index"] = len(original_spawns["Entities"])-1
@staticmethod
def set_object_by_id(old_spawn, new_spawn_descriptor):
for k in new_spawn_descriptor:
if k == "name":
old_spawn["ObjectId"] = new_spawn_descriptor[k]
elif k == "index":
pass
else:
old_spawn[k] = new_spawn_descriptor[k]
@staticmethod
def set_object_by_rec(old_spawn, obj):
oid = obj["obj_id"]
vrs = obj["vars"]
old_spawn["ObjectId"] = oid
old_spawn["Argument1"] = vrs[0]
old_spawn["Argument2"] = vrs[1]
@staticmethod
def getSpawnpoint(ardname, spawnpoint, altspawns={}):
if spawnpoint in altspawns.keys():
return altspawns[spawnpoint]
with open(os.path.join(KH2_DIR, "subfiles", "spawn", "ard", ardname, "{}.spawn".format(spawnpoint))) as f:
return yaml.load(f, Loader=yaml.SafeLoader)
@staticmethod
def should_replace_enemy(old_enemy_object):
if not old_enemy_object["source_replace_allowed"]:
return False
return True
@staticmethod
def should_replace_boss(old_boss, old_boss_parent, rand_seed):
if old_boss["name"] in ["Final Xemnas (Clone)", "Final Xemnas (Clone) (Data)"]:
return False # He gets removed later by subtracts, so don't replace
if not old_boss["source_replace_allowed"] and old_boss["name"] != "Seifer (2)":
return False
if rand_seed.config.bossmode == "Wild" and "onetooneonly" in old_boss["tags"]:
return False
if rand_seed.bossmapping and old_boss_parent["name"] not in rand_seed.bossmapping:
return False
return True
@staticmethod
def get_new_boss(old_boss_object, old_boss_parent, config, rand_seed, enemy_records):
# TODO SEIFER Can't be replaced here normally because it wants an enemy, so just put shadow roxas here
if old_boss_object["name"] == "Seifer (2)":
return "Shadow Roxas"
elif config.selected_boss:
return config.selected_boss
else:
new_boss_parent = None
if rand_seed.bossmapping:
new_boss_parent = rand_seed.bossmapping[old_boss_parent["name"]]
if new_boss_parent:
bosspicklist = [new_boss_parent]
elif old_boss_parent["name"] in rand_seed.data_replacements:
bosspicklist = [rand_seed.data_replacements[old_boss_parent["name"]]]
else:
bosspicklist = old_boss_parent["available"]
new_boss = pick_boss_to_replace(enemy_records, bosspicklist)
if "roxas" in old_boss_object["tags"]:
if new_boss == "Axel (Data)":
# This fight is probably not very winnable as roxas, so force to normal axel II
return "Axel II"
if "solo" in old_boss_object["tags"]:
if new_boss == "Demyx (Data)":
return "Demyx" # Actual fix would be to just mod the ai to increase the time for destroying clones
return new_boss
@staticmethod
def get_new_enemy(rand_seed, old_enemy_object):
if rand_seed.config.selected_enemy:
new_enemy = rand_seed.config.selected_enemy
elif rand_seed.config.enemymode == "Wild":
#TODO pretty sure this is broken, but also not safe to run in the game anyway
new_enemy = pick_enemy_to_replace(old_enemy_object, rand_seed.config.enemies)
elif rand_seed.enemymapping:
if old_enemy_object["name"] not in rand_seed.enemymapping:
return None # if it's not in mapping it's not enabled
new_enemy = rand_seed.enemymapping[old_enemy_object["name"]]
if rand_seed.config.bosses_replace_enemies and rand_seed.config.bosses:
chance = 0.005
if random.random() < chance:
if not rand_seed.config.boss_enemies:
for boss_name in rand_seed.config.bosses:
boss = rand_seed.config.bosses[boss_name]
if boss["enabled"] and boss["can_be_enemy"]:
rand_seed.config.boss_enemies.append(boss_name)
new_enemy = random.choice(rand_seed.config.boss_enemies)
return new_enemy
@staticmethod
def getSpId(spawnpoint, idnum):
for spid in spawnpoint:
if spid["Id"] == idnum:
return spid
raise Exception("Spid not found!") | 40.138249 | 118 | 0.595982 | 1,042 | 8,710 | 4.78215 | 0.242802 | 0.038531 | 0.033715 | 0.016055 | 0.127835 | 0.057797 | 0.043347 | 0.011238 | 0 | 0 | 0 | 0.015591 | 0.307807 | 8,710 | 217 | 119 | 40.138249 | 0.810914 | 0.111022 | 0 | 0.154762 | 0 | 0 | 0.107009 | 0.0057 | 0 | 0 | 0 | 0.004608 | 0 | 1 | 0.10119 | false | 0.005952 | 0.029762 | 0 | 0.25 | 0.005952 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb2740d9bb7f96446a060bada52c3cecc2d922f3 | 66,587 | py | Python | raleigh/core/solver.py | evgueni-ovtchinnikov/raleigh | 620cff4a848cb98034671edc1ebdc6b108fe88b4 | [
"BSD-3-Clause"
] | 5 | 2019-09-25T13:45:36.000Z | 2021-05-28T15:16:51.000Z | raleigh/core/solver.py | evgueni-ovtchinnikov/raleigh | 620cff4a848cb98034671edc1ebdc6b108fe88b4 | [
"BSD-3-Clause"
] | null | null | null | raleigh/core/solver.py | evgueni-ovtchinnikov/raleigh | 620cff4a848cb98034671edc1ebdc6b108fe88b4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 United Kingdom Research and Innovation
# Author: Evgueni Ovtchinnikov (evgueni.ovtchinnikov@stfc.ac.uk)
'''
RALEIGH (RAL EIGensolvers for real symmetric and Hermitian problems) core
solver.
For advanced users only - consider using more user-friendly interfaces in
raleigh/interfaces first.
Implements a block Conjugate Gradient algorithm for the computation of
several eigenpairs (eigenvalues and corresponding eigenvectors) of real
symmetrtic or Hermitian problems, namely:
- standard eigenvalue problem
A x = lambda x
- generalized eigenvalue problems
A x = lambda B x
A B x = lambda x
where A and B are real symmetric or Hermitian operators, B being positive
definite.
The algorithm operates on sets of vectors (v_1, ..., v_m) encapsulated by
an abstract data type Vectors with the following methods:
---------------------------------
new_vectors(self, nv=0, dim=None)
returns a new Vectors object encapsulating nv vectors of dimension dim
if dim is not None or else the same dimension as self
---------------------------------
dimension(self):
returns the dimension of vectors encapsulated by self
---------------------------------
select(self, nv, first=0)
selects a subset of nv encapsulated vectors starting from first;
all subsequent operations on self will involve these vectors only
---------------------------------
selected(self)
returns current subset selection as tuple (first, nv)
---------------------------------
clone(self)
returns a copy of (selected part of) self
---------------------------------
append(self, other):
appends vectors from Vectors object other to those of self
---------------------------------
nvec(self)
returns the number of currently selected vectors
---------------------------------
data_type(self)
returns the data type of vectors' elements
(numpy.float32, numpy.float64, numpy.complex64 or numpy.complex128)
---------------------------------
fill_random(self):
fills vectors with random values uniformly distributed between -1 and 1
---------------------------------
copy(self, other, ind=None)
copies vectors from self to other:
if ind is None, previously selected vectors of self copied into previously
selected vectors of other (the numbers of selected vectors must coincide),
otherwise vectors specified by the array of indices ind are copied
---------------------------------
scale(self, s, multiply=False)
if multiply is True, previously selected vectors of self are multiplied by
the respective elements of numpy ndarray s, otherwise the former are divided
by the latter, skipping division for zero elements
---------------------------------
dots(self, other, transp=False)
if transp is False, returns the numpy ndarray of dot products of previously
selected vectors of self with the respective selected vectors of other,
otherwise returns a numpy ndarray, i-th element of which is the dot product
of the vector of i-th components of selected vectors of self by the vector
of i-th components of selected vectors of other (note that for complex
vectors the complex dot products are computed, i.e. complex conjugation is
applied to the components of vectors in other)
---------------------------------
dot(self, other)
returns the ndarray of shape (m, n), where m and n are the numbers of the
selected vectors in other and self respectively, containing dot products of
the selected vectors of self by those of other (again, dot products are
complex in the complex vectors case)
---------------------------------
multiply(self, q, other)
for each column q[:, j] of ndarray q, assigns the linear combination of the
selected vectors of self with coefficients q[0, j], q[1, j], ... to j-th
selected vector of other
---------------------------------
add(self, other, s, q=None)
if s is a scalar:
if q is None, adds the selected vectors of other multiplied by s to the
respective selected vectors of self, otherwise
for each column q[:, j] of ndarray q, adds the linear combination of
selected vectors of other with coefficients q[0, j], q[1, j], ...
multiplied by s to j-th selected vector of self,
otherwise adds the selected vectors of other multiplied by the respective
elements of one-dimensional ndarray s to respective selected vectors of self
(q is ignored)
---------------------------------
The folder raleigh/algebra contains three implementations of Vectors type:
(i) numpy implementaion, (ii) mkl implementation (requires MKL 10.3 or later:
needs mkl_rt.dll on Windows, libmkl_rt.so on Linux), and (iii) CUDA GPU
implementation (requires CUDA-enabled GPU and NVIDIA Toolkit). These may be
used as templates for further implementations - MPI, out of core etc.
The number of wanted eigenpairs does not need to be set before calling the
core solver - instead, the user may provide an object responsible for stopping
the computation based on the data computed so far.
An object responsible for deciding whether a particular eigenpair has converged
can also be supplied by the user (default convergence criteria object is
available).
If some eigenvectors are already available, they can be passed to the core
solver, which then will compute further eigenpairs. Initial guesses to
eigenvectors may also be supplied by the user.
'''
import math
import numpy
import numpy.linalg as nla
import scipy.linalg as sla
RECORDS = 100
class DefaultConvergenceCriteria:
'''
Convergence criteria to be used if not specified by the user via Options
(see Options.convergence_criteria below).
'''
def __init__(self):
self.tolerance = 1e-3
self.error = 'kinematic eigenvector error'
def set_error_tolerance(self, error, tolerance):
self.error = error
self.tolerance = tolerance
def satisfied(self, solver, i):
err = solver.convergence_data(self.error, i)
return err >= 0 and err <= self.tolerance
class Options:
'''
Solver options.
Attributes
----------
verbosity : int
printout level
<0 : no output
0 : error an warning messages
1 : iteration number, converged eigenvalues
2 : convergence data for all current iterates
max_iter : int
maximal number of iterations per eigenpair;
if negative, set by solver
min_iter : int
minimal number of iterations per eigenpair
block_size : int
the nuber of simultaneously iterated eigenvector approximations;
if negative, set by solver
threads : int
the number of CPU threads, to be used to determine the block size if
not set by the user
sigma : float
if not None, indicates that the solver is used in shift-invert context
convergence_criteria : object
if not None, must be an object with method satisfied(self, solver, i)
that returns True if i-th approximate eigenpair converged and False
otherwise, based on the information provided by solver.convergence_data
(see below)
stopping_criteria : object
if not None, must be an object with method satisfied(self, solver)
that returns True if sufficient number of eigenpairs have been computed
and False otherwise, based on the values of solver attributes (e.g.
solver.eigenvalues) and possibly the user input, see
interfaces.partial_svd.DefaultStoppingCriteria for an example
detect_stagnation : bool
if set to True, detects the loss of convergence, i.e. impossibility to
significantly improve the accuracy of the approximation (set to False
and request very high accuracy if you want to test the solver for
numerical stability)
max_quota : float
the iterations will stop as soon as the number of computed eigenpairs
exceeds max_quota multiplied by the problem size, and the rest of
eigenpairs will be computed by scipy.linalg.eigh
'''
def __init__(self):
self.verbosity = 0
self.max_iter = -1
self.min_iter = 0
self.block_size = -1
self.threads = -1
self.sigma = None
self.convergence_criteria = None
self.stopping_criteria = None
self.detect_stagnation = True
self.max_quota = 0.75
class EstimatedErrors:
'''
Estimated errors container.
Attributes
----------
kinematic : one-dimensional ndarray of floats
error estimates based on the convergence history
residual : one-dimensional ndarray of floats
error estimates based on the residuals
'''
def __init__(self):
self.kinematic = numpy.ndarray((0,), dtype=numpy.float32)
self.residual = numpy.ndarray((0,), dtype=numpy.float32)
def __getitem__(self, item):
return self.kinematic[item], self.residual[item]
def append(self, est):
self.kinematic = numpy.concatenate((self.kinematic, est[0, :]))
self.residual = numpy.concatenate((self.residual, est[1, :]))
def reorder(self, ind):
self.kinematic = self.kinematic[ind]
self.residual = self.residual[ind]
class Problem:
'''
Eigenvalue problem specification.
Attributes
----------
__A : object
operator A (stiffness matrix)
__B : object
operator B (mass matrix)
__type : string
problem type
'std' : standard A x = lambda x
'gen' : generalized A x = lambda B x
'pro' : generalized A B x = lambda x
'''
def __init__(self, v, A, B=None, prod=None):
self.__vector = v
self.__A = A
self.__B = B
if B is None:
self.__type = 'std'
else:
if prod is None:
self.__type = 'gen'
else:
self.__type = 'pro'
def A(self):
return self.__A
def B(self):
return self.__B
def type(self):
return self.__type[0]
def vector(self):
return self.__vector
class Solver:
'''
Eigenvalue problem solver specification.
Attributes
----------
__problem : instance of Problem
problem specification
__P : object
preconditioner
iteration : int
the current iteration number
lcon : int
the number of computed eigenpairs on the left margin of the spectrum
rcon : int
the number of computed eigenpairs on the right margin of the spectrum
eigenvalues : one-dimensional ndarray of dtype numpy.float64
converged eigenvalues
eigenvalue_errors : one-dimensional ndarray of dtype EstimatedErrors
estimated errors for computed eigenvalues
eigenvector_errors : one-dimensional ndarray of dtype EstimatedErrors
estimated errors for computed eigenvectors
residual_norms : one-dimensional ndarray of dtype numpy.float32
residual norms for computed eigenpairs
convergence_status : one-dimensional ndarray of dtype numpy.int32
computed eigenpairs convergence status
> 0 : the number of iterations taken to converge
< 0 : the negative of the number of iterations taken to stagnate
cnv : one-dimensional ndarray of dtype numpy.int32
current convergence status
0 : has not converged yet
i > 0 : converged after i iterations
i < 0 : stopped converging after i iterations
lmd : one-dimensional ndarray of dtype numpy.float64
current eigenvalue iterates
res : one-dimensional ndarray of dtype numpy.float32
current residuals
err_lmd : two-dimensional ndarray of dtype numpy.float32
error estimates for current eigenvalue iterates
err_X : two-dimensional ndarray of dtype numpy.float32
error estimates for current eigenvector iterates
'''
def __init__(self, problem):
self.__problem = problem
self.__P = None
self.iteration = 0
self.lcon = 0
self.rcon = 0
self.eigenvalues = numpy.ndarray((0,), dtype=numpy.float64)
self.eigenvalue_errors = EstimatedErrors()
self.eigenvector_errors = EstimatedErrors()
self.residual_norms = numpy.ndarray((0,), dtype=numpy.float32)
self.convergence_status = numpy.ndarray((0,), dtype=numpy.int32)
# data to be set by solver
self.eigenvectors = None
self.eigenvectors_im = None
self.block_size = None
self.cnv = None
self.lmd = None
self.res = None
self.err_lmd = None
self.err_X = None
def set_preconditioner(self, P):
self.__P = P
def problem(self):
return self.__problem
def preconditioner(self):
return self.__P
def convergence_data(self, what='residual', which=0):
'''Reports current convergence data.
Parameters
----------
what : string
convergence data to report (can be abbreviated, full names below)
which : int
for which eigenpair iterate to report
'''
if what.find('block') > -1:
'''block size
'''
return self.block_size
elif what.find('res') > -1 and what.find('vec') == -1:
'''relative residual.
WARNING: use only for the largest eigenvalues computation.
'''
max_lmd = numpy.amax(abs(self.lmd))
if self.lcon + self.rcon > 0:
max_lmd = max(max_lmd, numpy.amax(abs(self.eigenvalues)))
return self.res[which]/max_lmd
elif what.find('val') > -1:
if what.find('max') > -1:
max_lmd = numpy.amax(abs(self.lmd))
if self.lcon + self.rcon > 0:
max_lmd = max(max_lmd, numpy.amax(abs(self.eigenvalues)))
return max_lmd
if what.find('err') > -1:
err = self.err_lmd[:, which]
if what.find('k'):
'''kinematic eigenvalue error estimate.
'''
return err[0]
else:
'''residual-based eigenvalue error estimate.
'''
return err[1]
else:
'''current eigenvalue iterate.
'''
return self.lmd[which]
elif what.find('vec') > -1:
err = self.err_X[:, which]
if what.find('k') > -1:
'''kinematic eigenvector error estimate.
'''
return err[0]
else:
'''residual-based eigenvector error estimate.
'''
return err[1]
else:
raise ValueError('convergence data %s not found' % what)
def solve(self, eigenvectors, options=Options(), which=(-1, -1), \
extra=(-1, -1), init=(None, None)):
'''Main core solver routine.
Parameters
----------
eigenvectors : object of abstract Vectors type
eigenvectors container; normally empty (eigenvectors.nvec() = 0),
if not, then assumed by the solver to contain previously computed
eigenvectors of the solved problem; on return contains all computed
eigenvectors (previous and new)
options : object of type Options
solver options
which : int or tuple of two ints
if int, the number of the largest eigenvalues wanted;
if tuple, the numbers of eigenvalues wanted on the left (which[0])
and right (which[1]) margine of the spectrum;
negative values mark unknown number of wanted eigenvalues: in this
case, the user must provide stopping_criteria
extra : tuple of two ints
numbers of extra eigenvectors corresponding to eigenvalues on the
margins of the spectrum to iterate purely for the sake of better
convergence: convergence criteria will not be applied to these
extra eigenpairs, i.e. iterations will stop when all wanted
eigenpairs converge
init : tuple of two Vectors objects
each tuple item, if not None, contains initial guesses for
eigenvectors corresponding to eigenvalues on the respective margin
of the spectrum
Returns
-------
status : int
execution status
0 : success
1 : maximal number of iterations exceeded
2 : no search directions left (bad problem data or preconditioner)
3 : some of the requested left eigenvalues may not exist
4 : some of the requested right eigenvalues may not exist
<0 : fatal error - exception thrown
'''
verb = options.verbosity
try:
l = len(which)
if l != 2:
raise ValueError\
('which must be either integer or tuple of 2 integers')
largest = False
except:
largest = True
if largest:
if which >= 0:
left = which//2
right = which - left
else:
left = -1
right = -1
else:
left = int(which[0])
right = int(which[1])
if left == 0 and right == 0:
if verb > -1:
print('No eigenpairs requested, quit')
return 0
m = int(options.block_size)
if m < 0:
m = _default_block_size(left, right, extra, init, options.threads)
else:
if (left == 0 or right == 0) and not largest:
if m < 3:
if verb > -1:
print('Block size %d too small, will use 3 instead' % m)
m = 3
else:
if m < 4:
if verb > -1:
print('Block size %d too small, will use 4 instead' % m)
m = 4
self.block_size = m
n = eigenvectors.dimension()
#output
self.iteration = 0
self.lcon = 0
self.rcon = 0
self.eigenvalues = numpy.ndarray((0,), dtype=numpy.float64)
self.eigenvalue_errors = EstimatedErrors()
self.eigenvector_errors = EstimatedErrors()
self.residual_norms = numpy.ndarray((0,), dtype=numpy.float32)
self.convergence_status = numpy.ndarray((0,), dtype=numpy.int32)
if m < n//2:
try:
status = self._solve(eigenvectors, options, which, extra, init)
if status > 1:
if verb > -1:
print('core solver return status %d' % status)
return status - 1
except _Error as err:
if verb > -1:
print('%s' % err.value)
return -1
else:
status = 1
if status == 0:
return 0 # success
Xc = eigenvectors
nc = Xc.nvec()
m = n - nc
if verb > -1:
msg = '%d eigenpairs not computed by CG, applying ' + \
'Rayleigh-Ritz procedure'
print( msg % m)
print('in the complement subspace...')
X = eigenvectors.new_vectors(m)
X.fill_random()
Y = X.new_vectors(m)
Z = X.new_vectors(m)
std = (self.__problem.type() == 's')
pro = (self.__problem.type() == 'p')
opA = self.problem().A()
A = lambda x, y: opA.apply(x, y)
opB = self.problem().B()
if opB is not None:
B = lambda x, y: opB.apply(x, y)
data_type = eigenvectors.data_type()
if nc > 0:
if not std:
BXc = eigenvectors.clone()
if nc > 0:
B(Xc, BXc)
else:
BXc = Xc
if nc > 0:
Gc = BXc.dot(Xc)
#Gci = nla.inv(Gc)
# approximate inverse of Gc
Gci = 2*numpy.identity(nc, dtype=data_type) - Gc
Q = numpy.dot(Gci, X.dot(BXc))
X.add(Xc, -1.0, Q)
Q = numpy.dot(Gci, X.dot(BXc))
X.add(Xc, -1.0, Q)
if not std:
B(X, Y)
XBX = Y.dot(X)
else:
XBX = X.dot(X)
lmd, Q = sla.eigh(-XBX)
lmd = -lmd
epsilon = 100*numpy.finfo(data_type).eps
k = numpy.sum(lmd <= epsilon*lmd[0])
if k > 0:
if verb > -1:
#print(lmd[-k], lmd[0])
msg = 'dropping %d linear dependent vectors ' + \
'from the Rayleigh-Ritz procedure...'
print(msg % k)
X.multiply(Q, Z)
Z.copy(X)
Y.multiply(Q, Z)
Z.copy(Y)
m -= k
X.select(m)
Y.select(m)
Z.select(m)
if not std:
B(X, Y)
XBX = Y.dot(X)
else:
XBX = X.dot(X)
if pro:
A(Y, Z)
XAX = Z.dot(Y)
else:
A(X, Z)
XAX = Z.dot(X)
lmdx, Q = sla.eigh(XAX, XBX, turbo=False, overwrite_a=True, \
overwrite_b=True)
X.multiply(Q, Z)
Z.copy(X)
eigenvectors.append(X)
self.eigenvalues = numpy.concatenate((self.eigenvalues, lmdx))
return 0
def _solve(self, eigenvectors, options, which, extra, init):
verb = options.verbosity
sigma = options.sigma
try:
l = len(which)
if l != 2:
raise ValueError\
('which must be either integer or tuple of 2 integers')
largest = False
left = int(which[0])
right = int(which[1])
except:
largest = True
left = which
right = which
m = self.block_size
if left == 0 and not largest:
r = 0.0
l = 1
elif right == 0:
r = 1.0
l = m - 1
elif left > 0 and right > 0:
r = left/(left + 1.0*right)
l = int(round(r*m))
if l < 2:
l = 2
if l > m - 2:
l = m - 2
else:
r = 0.5
l = m//2
left_ratio = r
block_size = m
left_block_size = l
extra_left = int(extra[0])
extra_right = int(extra[1])
if left >= 0:
if extra_left > 0:
left_total = left + extra_left
else:
left_total = max(left + 1, left_block_size)
if verb > 2:
print('left total: %d' % left_total)
if right >= 0:
if extra_right > 0:
right_total = right + extra_right
else:
right_total = max(right + 1, block_size - left_block_size)
if verb > 2:
print('right_total: %d' % right_total)
if verb > 0:
print('left block size %d, right block size %d' % (l, m - l))
# problem
problem = self.__problem
vector = problem.vector()
problem_type = problem.type()
std = (problem_type == 's')
gen = (problem_type == 'g')
pro = (problem_type == 'p')
data_type = vector.data_type()
epsilon = numpy.finfo(data_type).eps
single = (data_type == numpy.float32 or data_type == numpy.complex64)
# convergence data
self.cnv = numpy.zeros((m,), dtype=numpy.int32)
self.lmd = numpy.zeros((m,), dtype=numpy.float64)
self.res = -numpy.ones((m,), dtype=numpy.float32)
self.err_lmd = -numpy.ones((2, m,), dtype=numpy.float32)
self.err_X = -numpy.ones((2, m,), dtype=numpy.float32)
# convergence criteria
if options.convergence_criteria is None:
convergence_criteria = DefaultConvergenceCriteria()
else:
convergence_criteria = options.convergence_criteria
# convergence history data
iterations = numpy.zeros((m,), dtype=numpy.int32)
dlmd = numpy.zeros((m, RECORDS), dtype=numpy.float32)
dX = numpy.ones((m,), dtype=numpy.float32)
acf = numpy.ones((2, m,), dtype=numpy.float32)
cluster = numpy.zeros((2, m), dtype=numpy.int32)
# workspace
X = vector.new_vectors(m)
X.fill_random()
Y = vector.new_vectors(m)
Z = vector.new_vectors(m)
W = vector.new_vectors(m)
AX = vector.new_vectors(m)
AY = vector.new_vectors(m)
if not std:
BX = vector.new_vectors(m)
BY = vector.new_vectors(m)
else:
BX = X
BY = Y
AZ = AY
BZ = BY
# copy initial vectors if present
l = left_block_size
m = block_size
init_lX = init[0]
if init_lX is not None:
init_left = min(l, init_lX.nvec())
X.select(init_left)
init_lX.select(init_left)
init_lX.copy(X)
else:
init_left = 0
init_rX = init[1]
if init_rX is not None:
init_right = min(m - l, init_rX.nvec())
X.select(init_right, init_left)
init_rX.select(init_right)
init_rX.copy(X)
# check for zero initial vectors
X.select(m)
s = X.dots(X)
for i in range(m):
if s[i] == 0.0:
if verb > -1:
print('Zero initial guess, replacing with random')
X.select(1, i)
X.fill_random()
s[i : i + 1] = X.dots(X)
X.select(m)
s = numpy.sqrt(X.dots(X))
X.scale(s)
# shorcuts
detect_stagn = options.detect_stagnation
lmd = self.lmd
res = self.res
err_lmd = self.err_lmd
err_X = self.err_X
A = lambda x, y: problem.A().apply(x, y)
opB = problem.B()
if opB is not None:
B = lambda x, y: opB.apply(x, y)
else:
B = None
opP = self.__P
if opP is not None:
P = lambda x, y: opP.apply(x, y)
else:
P = None
# constraints (already available eigenvectors e.g. from previous run)
self.eigenvectors = eigenvectors
Xc = eigenvectors
nc = Xc.nvec()
if not std:
BXc = eigenvectors.clone()
if nc > 0:
B(Xc, BXc)
self.eigenvectors_im = BXc
else:
BXc = Xc
if nc > 0:
Gc = BXc.dot(Xc)
# approximate inverse of Gc
Gci = 2*numpy.identity(nc, dtype=data_type) - Gc
# initialize
leftX = left_block_size
rightX = block_size - leftX
rec = 0
ix = 0 # first X
nx = block_size
ny = block_size
nz = 0
lmdz = None
if Xc.nvec() > 0:
# orthogonalize X to Xc
# std: X := X - Xc Xc* X
# gen: X := X - Xc BXc* X
# pro: X := X - Xc BXc* X
Q = numpy.dot(Gci, X.dot(BXc))
X.add(Xc, -1.0, Q)
if not std:
B(X, BX)
XBX = BX.dot(X)
# do pivoted Cholesky for XBX to eliminate linear dependent X
U = XBX.copy()
ind, dropped = _piv_chol(U, 0, 1e-2)
if dropped > 0:
if verb > 0:
print('dropped %d initial vectors out of %d' % (dropped, nx))
# drop linear dependent initial vectors
nx -= dropped
if nx > 0:
W.select(nx)
X.copy(W, ind)
W.copy(X)
X.select(dropped, nx)
X.fill_random()
if not std:
if nx > 0:
BX.copy(W, ind)
W.copy(BX)
BX.select(dropped, nx)
B(X, BX)
if Xc.nvec() > 0:
# orthogonalize X to Xc
Q = numpy.dot(Gci, X.dot(BXc))
Xc.multiply(Q, W)
X.add(W, -1.0)
if not std:
BXc.multiply(Q, W)
BX.add(W, -1.0)
nx = m
X.select(nx)
if not std:
BX.select(nx)
XBX = BX.dot(X)
# Rayleigh-Ritz in the initial space
if pro:
A(BX, AX)
XAX = AX.dot(BX)
else:
A(X, AX)
XAX = AX.dot(X)
lmdx, Q = sla.eigh(XAX, XBX, turbo=False)
W.select(m)
X.multiply(Q, W)
W.copy(X)
AX.multiply(Q, W)
W.copy(AX)
if not std:
BX.multiply(Q, Z)
Z.copy(BX)
# ===== main CG loop
max_iter = options.max_iter
min_iter = options.min_iter
if max_iter < 0:
max_iter = 100
self.iteration = 0
while True:
maxit = 0
if left != 0 and left_block_size > 0:
maxit = numpy.amax(iterations[:left_block_size])
if right != 0 and left_block_size < block_size:
maxit = max(maxit, numpy.amax(iterations[left_block_size:]))
if maxit >= max_iter:
if verb > -1:
msg = 'iterations limit of %d exceeded, terminating'
print(msg % max_iter)
break
if verb > 0:
print('------------- iteration %d' % self.iteration)
if pro:
XAX = AX.dot(BX)
else:
XAX = AX.dot(X)
XBX = BX.dot(X)
da = XAX.diagonal()
db = XBX.diagonal()
new_lmd = _real(da/db)
# estimate error in residual computation due to the error in
# computing AX, to be used in detecting convergence stagnation
Lmd = numpy.zeros((nx, nx))
Lmd[range(nx), range(nx)] = lmdx #new_lmd
RX = XAX - numpy.dot(XBX, Lmd)
delta_R = _norm(RX, 0)
if gen:
s = numpy.sqrt(abs(X.dots(X)))
delta_R /= s
rv_err = numpy.amax(abs(new_lmd - lmdx))/numpy.amax(abs(lmdx))
rv_no = numpy.amax(abs(XBX - numpy.eye(nx)))
if verb > 2:
print('Ritz values error: %.1e' % rv_err)
print('Ritz vectors non-orthonormality: %.1e' % rv_no)
if max(rv_err, rv_no) > math.sqrt(epsilon):
if verb > 0:
if verb < 3:
print('Ritz values error: %.1e' % rv_err)
print('Ritz vectors non-orthonormality: %.1e' % rv_no)
print('restarting...')
rec = 0
nz = 0
sigma, Q = X.svd()
if std:
XBX = X.dot(X)
else:
B(X, BX)
XBX = BX.dot(X)
if pro:
A(BX, AX)
XAX = AX.dot(BX)
else:
A(X, AX)
XAX = AX.dot(X)
#rv_no = numpy.amax(abs(XBX - numpy.eye(nx)))
#print('Ritz vectors non-orthonormality: %.1e' % rv_no)
lmdx, Q = sla.eigh(XAX, XBX, turbo=False)
W.select(nx)
X.multiply(Q, W)
W.copy(X)
AX.multiply(Q, W)
W.copy(AX)
if not std:
BX.multiply(Q, W)
W.copy(BX)
if pro:
XAX = AX.dot(BX)
else:
XAX = AX.dot(X)
if std:
XBX = X.dot(X)
else:
XBX = BX.dot(X)
rv_no = numpy.amax(abs(XBX - numpy.eye(nx)))
#print('Ritz vectors non-orthonormality: %.1e' % rv_no)
da = XAX.diagonal()
db = XBX.diagonal()
new_lmd = _real(da/db)
for i in range(nx):
iterations[ix + i] += 1
# if self.iteration > 0:
if rec > 0:
# compute eigenvalue decrements
for i in range(nx):
if iterations[ix + i]:
delta = lmd[ix + i] - new_lmd[i]
eps = math.sqrt(epsilon)
eps *= max(abs(lmd[ix + i]), abs(new_lmd[i]))
if abs(delta) > eps:
dlmd[ix + i, rec - 1] = delta
# iterations[ix + i] += 1
if verb > 3:
print('eigenvalues shifts history:')
print(numpy.array_str(dlmd[ix : ix + nx, :rec].T, \
precision=2))
lmd[ix : ix + nx] = new_lmd
# compute residuals
# std: A X - X lmd
# gen: A X - B X lmd
# pro: A B X - X lmd
W.select(nx, ix)
Y.select(nx)
AX.copy(W)
if gen:
W.add(BX, -lmd[ix : ix + nx])
else:
W.add(X, -lmd[ix : ix + nx])
if Xc.nvec() > 0:
# orthogonalize W to Xc
# std: W := W - Xc Xc* W
# gen: W := W - BXc Xc* W
# pro: W := W - Xc BXc* W
if pro:
Q = numpy.dot(Gci, W.dot(BXc))
else:
Q = numpy.dot(Gci, W.dot(Xc))
if gen:
W.add(BXc, -1.0, Q)
else:
W.add(Xc, -1.0, Q)
if pro:
W.copy(Y)
B(Y, W)
s = W.dots(Y)
else:
s = W.dots(W)
res[ix : ix + nx] = numpy.sqrt(abs(s))
# kinematic error estimates
if rec > 3: # sufficient history available
for i in range(nx):
if dX[ix + i] > 0.01:
err_X[0, ix + i] = -1.0
continue
k = 0
s = 0
# go through the last 1/3 of the history
for r in range(rec - 1, rec - rec//3 - 2, -1):
d = abs(dlmd[ix + i, r])
if d == 0:
break
k = k + 1
s = s + d
if k < 2 or s == 0:
continue
# estimate asymptotic convergence factor (a.c.f)
qi = abs(dlmd[ix + i, rec - 1])/s
if qi <= 0:
continue
qi = qi**(1.0/(k - 1))
acf[1, ix + i] = acf[0, ix + i]
acf[0, ix + i] = qi # a.c.f. estimate
if qi >= 1.0:
continue
# esimate error based on a.c.f.
theta = qi/(1 - qi)
d = theta*dlmd[ix + i, rec - 1]
err_lmd[0, ix + i] = abs(d)
qx = math.sqrt(qi)
err_X[0, ix + i] = dX[ix + i]*qx/(1 - qx)
if not gen:
# residual-based error estimates:
# asymptotic Lehmann for eigenvalues
# generalized (extended gap) Davis-Kahan for eigenvectors;
# not valid for the generalized eigenvalue problem
l = 0
for k in range(1, leftX):
i = ix + k
if dX[i] > 0.01:
break
if lmd[i] - lmd[i - 1] > res[i]:
l = k
if l > 0:
i = ix + l
t = lmd[i]
if verb > 2:
print('using left pole at lmd[%d] = %e' % (i, t))
m = block_size
for k in range(l):
i = ix + k
s = res[i]
err_lmd[1, i] = s*s/(t - lmd[i])
err_X[1, i] = s/(t - lmd[i])
l = 0
for k in range(1, rightX):
i = ix + nx - k - 1
if dX[i] > 0.01:
break
if lmd[i + 1] - lmd[i] > res[i]:
l = k
if l > 0:
i = ix + nx - l - 1
t = lmd[i]
if verb > 2:
print('using right pole at lmd[%d] = %e' % (i, t))
m = block_size
for k in range(l):
i = ix + nx - k - 1
s = res[i]
err_lmd[1, i] = s*s/(lmd[i] - t)
err_X[1, i] = s/(lmd[i] - t)
if verb > 1:
msg = ' eigenvalue residual ' + \
'estimated errors (kinematic/residual)' + \
' a.c.f.'
print(msg)
msg = ' ' + \
' eigenvalue eigenvector '
print(msg)
for i in range(block_size):
print('%14e %8.1e %8.1e / %8.1e %.1e / %.1e %.3e %d' % \
(lmd[i], res[i], \
err_lmd[0, i], err_lmd[1, i], \
abs(err_X[0, i]), abs(err_X[1, i]), \
acf[0, i], self.cnv[i]))
eps = epsilon**0.67
lbs = left_block_size
if lbs > 0:
dlmd_min_lft = eps*numpy.amax(abs(dlmd[:lbs, rec - 1]))
if lbs < block_size:
dlmd_min_rgt = eps*numpy.amax(abs(dlmd[lbs:, rec - 1]))
if self.iteration == 2:
dlmd_min_left = dlmd_min_lft
dlmd_min_right = dlmd_min_rgt
if self.iteration >= 2:
cluster[:, :] = 0
nc = 0
for i in range(left_block_size - 1):
if abs(lmd[i + 1] - lmd[i]) <= dlmd_min_lft:
if cluster[0, i] == 0:
nc += 1
cluster[0, i] = nc
cluster[1, i] = 1
cluster[0, i + 1] = cluster[0, i]
cluster[1, i + 1] = cluster[1, i] + 1
for j in range(m - left_block_size - 1):
i = m - j - 1
if abs(lmd[i - 1] - lmd[i]) <= dlmd_min_rgt:
if cluster[0, i] == 0:
nc += 1
cluster[0, i] = nc
cluster[1, i] = 1
cluster[0, i - 1] = cluster[0, i]
cluster[1, i - 1] = cluster[1, i] + 1
if verb > 2:
print(cluster[0, :])
print(cluster[1, :])
lcon = 0
for i in range(leftX - leftX//4):
if left == 0:
break
j = self.lcon + i
k = ix + i
if sigma is not None and lmd[k] > 0:
break
it = iterations[k]
if it < min_iter:
break
dlmd1 = abs(dlmd[k, max(0, rec - 1)])
dlmd2 = abs(dlmd[k, max(0, rec - 3)])
if convergence_criteria.satisfied(self, k):
if verb > 0:
msg = 'left eigenpair %d converged' + \
' after %d iterations,\n' + \
' eigenvalue %e, error %.1e / %.1e'
it = iterations[k]
print(msg % (j, it, lmd[k], err_X[0, k], err_X[1, k]))
lcon += 1
self.cnv[k] = self.iteration + 1
elif detect_stagn and it > 2 and dlmd1 <= dlmd_min_left \
and (dlmd1 > dlmd2 or dlmd1 == 0.0):
if verb > 0:
msg = 'left eigenpair %d stagnated,\n' + \
' eigenvalue %e, error %.1e / %.1e'
print(msg % (j, lmd[k], err_X[0, k], err_X[1, k]))
lcon += 1
self.cnv[k] = -self.iteration - 1
else:
if cluster[0, k] > 0:
for l in range(k - 1, k - cluster[1, k], -1):
if self.cnv[l] == -self.iteration - 1:
self.cnv[l] = 0
lcon -= 1
if verb > 0:
msg = 'stagnation of %e cancelled'
print(msg % lmd[l])
break
rcon = 0
for i in range(rightX - rightX//4):
if right == 0:
break
j = self.rcon + i
k = ix + nx - i - 1
if sigma is not None and lmd[k] < 0:
break
it = iterations[k]
if it < min_iter:
break
dlmd1 = abs(dlmd[k, max(0, rec - 1)])
dlmd2 = abs(dlmd[k, max(0, rec - 3)])
if convergence_criteria.satisfied(self, k):
if verb > 0:
msg = 'right eigenpair %d converged' + \
' after %d iterations,\n' + \
' eigenvalue %e, residual %.1e, error %.1e / %.1e'
print(msg % (j, it, lmd[k], res[k], err_X[0, k], err_X[1, k]))
rcon += 1
self.cnv[k] = self.iteration + 1
elif detect_stagn and it > 2 and dlmd1 <= dlmd_min_right \
and (dlmd1 > dlmd2 or dlmd1 == 0.0):
if verb > 0:
msg = 'right eigenpair %d stagnated,\n' + \
' eigenvalue %e, error %.1e / %.1e'
print(msg % (j, lmd[k], err_X[0, k], err_X[1, k]))
rcon += 1
self.cnv[k] = -self.iteration - 1
else:
if cluster[0, k] > 0:
for l in range(k + 1, k + cluster[1, k]):
if self.cnv[l] == -self.iteration - 1:
self.cnv[l] = 0
rcon -= 1
if verb > 0:
msg = 'stagnation of %e cancelled'
print(msg % lmd[l])
break
if largest: # ensure the largest converge first
if lcon > 0:
i = ix + lcon - 1
j = ix + nx - rcon - 1
while lcon > 0 and abs(lmd[i]) < abs(lmd[j]):
self.cnv[i] = 0
lcon -= 1
i -= 1
if rcon > 0:
i = ix + lcon
j = ix + nx - rcon
while rcon > 0 and abs(lmd[i]) > abs(lmd[j]):
self.cnv[j] = 0
rcon -= 1
j += 1
# move converged X to Xc, update Gram matrix for Xc
ncon = Xc.nvec()
if lcon > 0:
self.eigenvalues = numpy.concatenate \
((self.eigenvalues, lmd[ix : ix + lcon]))
self.eigenvalue_errors.append(err_lmd[:, ix : ix + lcon])
self.eigenvector_errors.append(err_X[:, ix : ix + lcon])
self.residual_norms = numpy.concatenate \
((self.residual_norms, res[ix : ix + lcon]))
self.convergence_status = numpy.concatenate \
((self.convergence_status, self.cnv[ix : ix + lcon]))
X.select(lcon, ix)
if std and ncon > 0:
if ncon > 0:
Gu = X.dot(Xc)
Xc.append(X)
if not std:
if ncon > 0:
Gu = X.dot(BXc)
BX.select(lcon, ix)
BXc.append(BX)
if ncon < 1:
Gc = BXc.dot(Xc)
else:
Gl = BXc.dot(X)
else:
if ncon < 1:
Gc = Xc.dot(Xc)
else:
Gl = Xc.dot(X)
if ncon > 0:
Gc = numpy.concatenate((Gc, Gu), axis=1)
Gc = numpy.concatenate((Gc, Gl))
ncon += lcon
if rcon > 0:
jx = ix + nx
self.eigenvalues = numpy.concatenate \
((self.eigenvalues, lmd[jx - rcon : jx]))
self.eigenvalue_errors.append(err_lmd[:, jx - rcon : jx])
self.eigenvector_errors.append(err_X[:, jx - rcon : jx])
self.residual_norms = numpy.concatenate \
((self.residual_norms, res[jx - rcon : jx]))
self.convergence_status = numpy.concatenate \
((self.convergence_status, self.cnv[jx - rcon : jx]))
X.select(rcon, jx - rcon)
if std and ncon > 0:
if ncon > 0:
Gu = X.dot(Xc)
Xc.append(X)
if not std:
if ncon > 0:
Gu = X.dot(BXc)
BX.select(rcon, jx - rcon)
BXc.append(BX)
if ncon < 1:
Gc = BXc.dot(Xc)
else:
Gl = BXc.dot(X)
else:
if ncon < 1:
Gc = Xc.dot(Xc)
else:
Gl = Xc.dot(X)
if ncon > 0:
Gc = numpy.concatenate((Gc, Gu), axis=1)
Gc = numpy.concatenate((Gc, Gl))
ncon += rcon
if ncon > 0:
H = Gc - numpy.identity(ncon, dtype=data_type)
if verb > 2:
print('Gram error: %e' % nla.norm(H))
# approximate inverse, good enough if Gram offdiagonal
# entries are less than sqrt(epsilon)
Gci = 2*numpy.identity(ncon, dtype=data_type) - Gc
self.lcon += lcon
self.rcon += rcon
if options.stopping_criteria is not None:
if options.stopping_criteria.satisfied(self):
return 0
if largest and right > 0 and self.lcon + self.rcon >= right:
return 0
left_converged = left >= 0 and self.lcon >= left
right_converged = right >= 0 and self.rcon >= right
if left_converged and right_converged:
return 0
if sigma is not None:
if right_converged:
i = ix + lcon
lmd_i = lmd[i]
err_i = err_lmd[0, i]
if lmd_i > 0 and err_i != -1.0 and err_i < lmd_i/4:
return 4
if left_converged:
i = ix + nx - rcon - 1
lmd_i = lmd[i]
err_i = err_lmd[0, i]
if lmd_i < 0 and err_i != -1.0 and err_i < -lmd_i/4:
return 5
lim = options.max_quota * eigenvectors.dimension()
if eigenvectors.nvec() > lim:
return 1
leftX -= lcon
rightX -= rcon
# re-select Xs, AXs, BXs accordingly
iy = ix
ny = nx
ix += lcon
nx -= lcon + rcon
X.select(nx, ix)
AX.select(nx, ix)
if not std:
BX.select(nx, ix)
XAX = XAX[lcon : lcon + nx, lcon : lcon + nx]
XBX = XBX[lcon : lcon + nx, lcon : lcon + nx]
if not pro:
if P is None:
W.copy(Y)
else:
P(W, Y)
if nz > 0:
# compute the conjugation matrix
if pro:
ZAY = W.dot(AZ)
else:
ZAY = Y.dot(AZ)
if std:
ZBY = Y.dot(Z)
else:
ZBY = Y.dot(BZ)
Num = ZAY - numpy.dot(ZBY, numpy.diag(lmd[iy : iy + ny]))
ny = Y.nvec()
Lmd = numpy.ndarray((1, ny))
Mu = numpy.ndarray((nz, 1))
Lmd[0, :] = lmd[iy : iy + ny]
Mu[:, 0] = lmdz
Den = Mu - Lmd
sy = numpy.sqrt(abs(Y.dots(Y)))
sz = numpy.sqrt(abs(Z.dots(Z)))
Beta = numpy.ndarray((nz, ny), dtype=data_type)
for iz in range(nz):
for iy in range(ny):
s = sy[iy]/sz[iz]
if abs(Num[iz, iy]) >= 100*s*abs(Den[iz, iy]):
Beta[iz, iy] = 0.0
else:
Beta[iz, iy] = Num[iz, iy]/Den[iz, iy]
# conjugate search directions
AZ.select(ny)
Y.add(Z, -1.0, Beta)
if pro: # if gen or nz == 0, BY computed later
W.add(BZ, -1.0, Beta)
BY.select(ny)
W.copy(BY)
elif pro:
BY.select(ny)
W.copy(BY)
Q = Y.dot(BX)
Y.add(X, -1.0, Q)
if pro:
BY.add(BX, -1.0, Q)
if Xc.nvec() > 0:
# orthogonalize Y to Xc
# std: W := W - Xc Xc* W (not needed if P is None)
# gen: W := W - Xc BXc* W
# pro: W := W - Xc BXc* W (not needed if P is None)
Q = numpy.dot(Gci, Y.dot(BXc))
Y.add(Xc, -1.0, Q)
if pro:
BY.add(BXc, -1.0, Q)
# compute (B-)Gram matrix for (X,Y)
if std:
s = numpy.sqrt(abs(Y.dots(Y)))
Y.scale(s)
if nx > 0:
XBY = Y.dot(X)
YBY = Y.dot(Y)
else:
BY.select(Y.nvec())
if not pro: # or nz == 0:
B(Y, BY)
s = numpy.sqrt(abs(BY.dots(Y)))
Y.scale(s)
BY.scale(s)
if nx > 0:
XBY = BY.dot(X)
YBY = BY.dot(Y)
if nx > 0:
YBX = _conjugate(XBY)
GB = numpy.concatenate((XBX, YBX))
H = numpy.concatenate((XBY, YBY))
GB = numpy.concatenate((GB, H), axis=1)
else:
GB = YBY
# do pivoted Cholesky for GB
U = GB
ny = Y.nvec()
if single:
eps = 1e-3
else:
eps = 1e-8
ind, dropped = _piv_chol(U, nx, eps)
if dropped > 0:
if verb > 0:
print('dropped %d search directions out of %d' \
% (dropped, ny))
ny -= dropped
if ny < 1:
if verb > -1:
print('no search directions left, terminating')
return 3
# re-arrange/drop-linear-dependent search directions
nxy = nx + ny
U = U[:nxy, :nxy]
indy = ind[nx: nxy]
for i in range(ny):
indy[i] -= nx
W.select(ny)
Y.copy(W, indy[:ny])
Y.select(ny)
W.copy(Y)
AY.select(ny)
if not std:
BY.copy(W, indy[:ny])
BY.select(ny)
W.copy(BY)
# compute A-Gram matrix for (X,Y)
if pro:
A(BY, AY)
if nx > 0:
XAY = AY.dot(BX)
YAY = AY.dot(BY)
else:
A(Y, AY)
if nx > 0:
XAY = AY.dot(X)
YAY = AY.dot(Y)
if nx > 0:
YAX = _conjugate(XAY)
GA = numpy.concatenate((XAX, YAX))
H = numpy.concatenate((XAY, YAY))
GA = numpy.concatenate((GA, H), axis=1)
else:
GA = YAY
# solve Rayleigh-Ritz eigenproblem
G = _transform(GA, U)
YAY = G[nx : nxy, nx : nxy]
lmdy, Qy = sla.eigh(YAY)
G[:, nx : nxy] = numpy.dot(G[:, nx : nxy], Qy)
if nx > 0:
G[nx : nxy, :nx] = _conjugate(G[:nx, nx : nxy])
G[nx : nxy, nx : nxy] = numpy.dot(_conjugate(Qy), G[nx : nxy, nx : nxy])
if G.dtype.kind == 'c':
G = G.astype(numpy.complex128)
else:
G = G.astype(numpy.float64)
lmdxy, Q = sla.eigh(G, turbo = False)
lmdxy = lmdxy.astype(lmdy.dtype)
Q = Q.astype(Qy.dtype)
# estimate changes in eigenvalues and eigenvectors
lmdx = numpy.concatenate \
((lmdxy[:leftX], lmdxy[nxy - rightX:]))
lmdy = lmdxy[leftX : nxy - rightX]
QX = numpy.concatenate \
((Q[:, :leftX], Q[:, nxy - rightX:]), axis=1)
QYX = QX[nx:, :].copy()
lmdX = numpy.ndarray((1, nx))
lmdY = numpy.ndarray((ny, 1))
lmdX[0, :] = lmdx
lmdY[:, 0] = lmdy
Delta = (lmdY - lmdX)*QYX*QYX
dX[ix : ix + nx] = _norm(QYX, 0)
if rec == RECORDS:
for i in range(rec - 1):
dlmd[:, i] = dlmd[:, i + 1]
else:
rec += 1
dlmd[ix : ix + nx, rec - 1] = _real(numpy.sum(Delta, axis=0))
# select new numbers of left and right eigenpairs
if left < 0:
shift_left = ix
elif lcon > 0:
shift_left = max(0, left_total - self.lcon - leftX)
shift_left = min(shift_left, ix)
else:
shift_left = 0
if right < 0:
shift_right = block_size - ix - nx
elif rcon > 0:
shift_right = max(0, right_total - self.rcon - rightX)
shift_right = min(shift_right, block_size - ix - nx)
else:
shift_right = 0
if shift_left + shift_right > ny:
shift_left = min(shift_left, int(round(left_ratio*ny)))
shift_right = min(shift_right, ny - shift_left)
if left > 0 and lcon > 0 and self.lcon >= left:
if verb > 0:
print('left-hand side converged')
leftX_new = 0
l = left_block_size
rightX_new = min(nxy, l + rightX + shift_right)
left_block_size_new = l + rightX + shift_right - rightX_new
shift_left = -leftX - lcon
left_ratio = 0.0
ix_new = left_block_size_new
elif right > 0 and rcon > 0 and self.rcon >= right:
if verb > 0:
print('right-hand side converged')
ix_new = ix - shift_left
leftX_new = min(nxy, block_size - ix_new)
rightX_new = 0
shift_right = -rightX - rcon
left_block_size_new = ix_new + leftX_new
left_ratio = 1.0
else:
leftX_new = leftX + shift_left
rightX_new = rightX + shift_right
left_block_size_new = left_block_size
ix_new = ix - shift_left
nx_new = leftX_new + rightX_new
if verb > 2:
print('left X: was %d, now %d' % (leftX, leftX_new))
print('right X: was %d, now %d' % (rightX, rightX_new))
print('new ix %d, new nx %d, nxy %d' % (ix_new, nx_new, nxy))
# shift eigenvalues etc.
m = block_size
l = left_block_size
nl = left_block_size_new
cnv = self.cnv
if shift_left > 0:
for i in range(l - shift_left):
j = i + shift_left
cnv[i] = cnv[j]
lmd[i] = lmd[j]
res[i] = res[j]
acf[:, i] = acf[:, j]
err_lmd[:, i] = err_lmd[:, j]
dlmd[i, :] = dlmd[j, :]
err_X[:, i] = err_X[:, j]
dX[i] = dX[j]
iterations[i] = iterations[j]
if shift_left >= 0:
for i in range(l - shift_left, nl):
_reset_cnv_data \
(i, cnv, res, acf, err_lmd, dlmd, err_X, dX, iterations)
else:
for i in range(l):
_reset_cnv_data \
(i, cnv, res, acf, err_lmd, dlmd, err_X, dX, iterations)
if shift_right > 0:
for i in range(m - 1, l + shift_right - 1, -1):
j = i - shift_right
cnv[i] = cnv[j]
lmd[i] = lmd[j]
res[i] = res[j]
acf[:, i] = acf[:, j]
err_lmd[:, i] = err_lmd[:, j]
dlmd[i, :] = dlmd[j, :]
err_X[:, i] = err_X[:, j]
dX[i] = dX[j]
iterations[i] = iterations[j]
if shift_right >= 0:
for i in range(l + shift_right - 1, nl - 1, -1):
_reset_cnv_data \
(i, cnv, res, acf, err_lmd, dlmd, err_X, dX, iterations)
else:
for i in range(l, block_size):
_reset_cnv_data \
(i, cnv, res, acf, err_lmd, dlmd, err_X, dX, iterations)
# compute RR coefficients for X and 'old search directions' Z
# by re-arranging columns of Q
Q[nx : nxy, :] = numpy.dot(Qy, Q[nx : nxy, :])
Q = sla.solve_triangular(U, Q)
lmdx = numpy.concatenate \
((lmdxy[:leftX_new], lmdxy[nxy - rightX_new:]))
QX = numpy.concatenate \
((Q[:, :leftX_new], Q[:, nxy - rightX_new:]), axis=1)
lft = leftX_new
rgt = rightX_new
nz = nxy - lft - rgt
lmdz = lmdxy[lft : nxy - rgt]
QZ = Q[:, lft : nxy - rgt]
if nx > 0:
QXX = QX[:nx, :].copy()
QYX = QX[nx:, :].copy()
if nx > 0:
QXZ = QZ[:nx, :].copy()
QYZ = QZ[nx:, :].copy()
# update X and 'old search directions' Z and their A- and B-images
W.select(nx_new)
Z.select(nx_new)
if nx > 0:
AX.multiply(QXX, W)
W.add(AY, 1.0, QYX)
else:
AY.multiply(QYX, W)
if nz > 0:
Z.select(nz)
AY.multiply(QYZ, Z)
AZ.select(nz)
if nx > 0:
Z.add(AX, 1.0, QXZ)
Z.copy(AZ)
AX.select(nx_new, ix_new)
W.copy(AX)
if not std:
Z.select(nx_new)
if nx > 0:
BX.multiply(QXX, W)
W.add(BY, 1.0, QYX)
else:
BY.multiply(QYX, W)
if nz > 0:
Z.select(nz)
BY.multiply(QYZ, Z)
BZ.select(nz)
if nx > 0:
Z.add(BX, 1.0, QXZ)
Z.copy(BZ)
BX.select(nx_new, ix_new)
W.copy(BX)
else:
BZ = Z
Z.select(nx_new)
if nx > 0:
X.multiply(QXX, W)
W.add(Y, 1.0, QYX)
else:
Y.multiply(QYX, W)
if nz > 0:
Z.select(nz)
Y.multiply(QYZ, Z)
if nx > 0:
Z.add(X, 1.0, QXZ)
X.select(nx_new, ix_new)
W.copy(X)
nx = nx_new
ix = ix_new
leftX = leftX_new
rightX = rightX_new
left_block_size = left_block_size_new
self.iteration += 1
return 2
class _Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return '??? ' + repr(self.value)
def _conjugate(a):
if a.dtype.kind == 'c':
return a.conj().T
else:
return a.T
def _real(a):
if a.dtype.kind == 'c':
return a.real
else:
return a
def _transform(A, U):
B = sla.solve_triangular(_conjugate(U), _conjugate(A), lower=True)
A = sla.solve_triangular(_conjugate(U), _conjugate(B), lower=True)
return A
def _default_block_size(left, right, extra, init, threads):
extra_left = int(extra[0])
extra_right = int(extra[1])
init_left = 0
init_right = 0
if init[0] is not None:
init_left = int(init[0].nvec())
if init[1] is not None:
init_right = int(init[1].nvec())
if threads <= 8:
threads = 8
if left == 0 and right == 0:
return 0
if left <= 0 and right <= 0:
if init_left == 0 and init_right == 0:
if left < 0 and right < 0:
return 2*threads
else:
return threads
m = init_left + init_right
m = threads*((m - 1)//threads + 1)
if left < 0 or right < 0:
m = max(m, 2*threads)
return m
left_total = 0
right_total = 0
if left > 0:
if extra_left >= 0:
left_total = max(left + extra_left, init_left)
else:
left_total = int(math.floor(max(left, init_left)*1.2))
if right > 0:
if extra_right >= 0:
right_total = max(right + extra_right, init_right)
else:
right_total = int(math.floor(max(right, init_right)*1.2))
if left < 0:
left_total = right_total
if right < 0:
right_total = left_total
m = int(left_total + right_total)
m = threads*((m - 1)//threads + 1)
if left < 0 or right < 0:
m = max(m, 2*threads)
return m
def _reset_cnv_data(i, cnv, res, acf, err_lmd, dlmd, err_X, dX, iterations):
cnv[i] = 0
res[i] = -1.0
acf[:, i] = 1.0
err_lmd[:, i] = -1.0
dlmd[i, :] = 0
err_X[:, i] = -1.0
dX[i] = 1.0
iterations[i] = 0
def _norm(a, axis):
return numpy.apply_along_axis(nla.norm, axis, a)
def _piv_chol(A, k, eps, blk=64, verb=0):
n = A.shape[0]
buff = A[0, :].copy()
ind = [i for i in range(n)]
drop_case = 0
dropped = 0
last_check = -1
if k > 0:
U = sla.cholesky(A[:k, :k])
A[:k, :k] = U.copy()
A[:k, k : n] = sla.solve_triangular \
(_conjugate(U), A[:k, k : n], lower=True)
A[k : n, :k].fill(0.0)
A[k : n, k : n] -= numpy.dot(_conjugate(A[:k, k : n]), A[:k, k : n])
l = k
for i in range(k, n):
s = numpy.diag(A[i : n, i : n]).copy()
if i > l:
t = _norm(A[l : i, i : n], 0)
s -= t*t
j = i + numpy.argmax(s)
if i != j:
buff[:] = A[i, :]
A[i, :] = A[j, :]
A[j, :] = buff
buff[:] = A[:, i]
A[:, i] = A[:, j]
A[:, j] = buff
ind[i], ind[j] = ind[j], ind[i]
if i > l:
A[i, i : n] -= numpy.dot(_conjugate(A[l : i, i]), A[l : i, i : n])
last_piv = A[i, i].real
if last_piv <= eps:
A[i : n, :].fill(0.0)
drop_case = 1
dropped = n - i
break
A[i, i] = math.sqrt(last_piv)
A[i, i + 1 : n] /= A[i, i]
A[i + 1 : n, i].fill(0.0)
if i - l == blk - 1 or i == n - 1:
last_check = i
lmin = _estimate_lmin(A[: i + 1, : i + 1])
lmax = _estimate_lmax(A[: i + 1, : i + 1])
if verb > 0:
print('%e %e %e' % (A[i, i], lmin, lmax))
if lmin/lmax <= eps:
A[i : n, :].fill(0.0)
drop_case = 2
dropped = n - i
break
if i - l == blk - 1 and i < n - 1:
j = i + 1
A[j : n, j : n] -= numpy.dot(_conjugate(A[l : j, j : n]), \
A[l : j, j : n])
l += blk
if last_check < n - 1 and drop_case != 2:
i = last_check
j = n - dropped - 1
while i < j:
m = i + (j - i + 1)//2
lmin = _estimate_lmin(A[: m + 1, : m + 1])
lmax = _estimate_lmax(A[: m + 1, : m + 1])
if verb > 0:
print('%d %e %e' % (m, lmin, lmax))
if lmin/lmax <= eps:
if j > m:
j = m
continue
else:
A[j : n, :].fill(0.0)
dropped = n - j
last_piv = A[j - 1, j - 1]**2
break
else:
i = m
continue
return ind, dropped
def _estimate_lmax(U):
U = numpy.triu(U)
return sla.norm(numpy.dot(_conjugate(U), U), ord=1)
def _estimate_lmin(U):
n = U.shape[0]
if U.dtype.kind == 'c':
tr = 2
else:
tr = 1
x = numpy.ones((n,), dtype=U.dtype)
s = numpy.dot(x, x)
for i in range(3):
y = sla.solve_triangular(U, x, trans=tr)
t = numpy.dot(y, y)
rq = s/t
x = sla.solve_triangular(U, y)
s = numpy.dot(x, x)
return rq
| 36.070964 | 86 | 0.454894 | 8,106 | 66,587 | 3.660375 | 0.087466 | 0.016076 | 0.010077 | 0.007044 | 0.390314 | 0.315999 | 0.265849 | 0.225169 | 0.202555 | 0.179805 | 0 | 0.020654 | 0.437938 | 66,587 | 1,845 | 87 | 36.090515 | 0.772137 | 0.20492 | 0 | 0.414545 | 0 | 0.000727 | 0.033758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022545 | false | 0 | 0.002909 | 0.006545 | 0.061818 | 0.032727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb27dae5cc67b086060a1c9ce94ea921cee14d4e | 9,096 | py | Python | imaginenc/imaginenc.py | martinkozle/Imaginenc | 4f0f24e53561f696638f5db1f128fa84fa64f998 | [
"MIT"
] | 1 | 2021-08-19T11:34:50.000Z | 2021-08-19T11:34:50.000Z | imaginenc/imaginenc.py | martinkozle/Imaginenc | 4f0f24e53561f696638f5db1f128fa84fa64f998 | [
"MIT"
] | null | null | null | imaginenc/imaginenc.py | martinkozle/Imaginenc | 4f0f24e53561f696638f5db1f128fa84fa64f998 | [
"MIT"
] | 1 | 2021-08-16T16:16:35.000Z | 2021-08-16T16:16:35.000Z | """Convert any file into an image."""
import os
import math
import sys
import argparse
from pathlib import Path
from typing import List, Tuple, Optional, Iterable, Union
import numpy as np
from PIL import Image, ImageColor
METADATA_INFO = {
'metadata_size': (0, 3, int),
'extra_bytes': (3, 4, int),
'sign': (4, 54, str),
'file_name': (54, 310, str)
}
def parse_metadata(data: bytes) -> dict:
"""Parse metadata bytes
:param data: metadata bytes
:return: parsed metadata dictionary
"""
bytes_converters = {
int: bytes_to_int,
str: bytes_to_str
}
metadata = {}
for key, (fr, to, type_) in METADATA_INFO.items():
val = bytes_converters[type_](data[fr: to])
if isinstance(val, str):
val = val.rstrip('\0')
metadata[key] = val
return metadata
def decode_image_to_bytes(image: Union[Image.Image, Iterable[np.uint8]]
) -> Tuple[bytes, dict]:
"""Decode an encoded image into bytes
:param image: PIL image or iterable of unsigned 8bit ints
:return: decoded bytes and metadata
"""
data = bytes(list(np.asarray(image, dtype=np.uint8).flatten()))
metadata = parse_metadata(data)
file_data_hex = ''.join(
f'{pixel:0>2x}' for pixel in data[metadata['metadata_size']:]
)
file_bytes = bytes.fromhex(
file_data_hex[:len(file_data_hex) - metadata['extra_bytes'] * 2]
)
return file_bytes, metadata
def decode_image_name(input_file_path: str, output_file_path: str) -> dict:
"""Decode an encoded image into a file
:param input_file_path: filepath to image
:param output_file_path: path to output directory
:return: encoded image metadata
"""
if not input_file_path.endswith('.png'):
input_file_path += '.png'
image = Image.open(input_file_path)
file_bytes, metadata = decode_image_to_bytes(image)
Path(output_file_path).mkdir(parents=True, exist_ok=True)
with open(f'{output_file_path}/{metadata["file_name"]}', 'wb') as f:
f.write(file_bytes)
return metadata
def int_to_bytes(num: int, num_bytes: int, signed: bool = False) -> bytes:
return num.to_bytes(num_bytes, byteorder='big', signed=signed)
def bytes_to_int(bytes_: bytes, signed: bool = False) -> int:
return int.from_bytes(bytes_, 'big', signed=signed)
def str_to_bytes(string: str, num_bytes: int = 0) -> bytes:
return string.encode()[:num_bytes].ljust(num_bytes, b'\0')
def bytes_to_str(bytes_: bytes) -> str:
return bytes_.decode()
def bytes_to_hex(bytes_: bytes) -> List[str]:
return list(map(lambda b: f'{b:02x}', bytes_))
def int_to_n_hex(num: int, num_hex: int) -> List[str]:
return bytes_to_hex(int_to_bytes(num, num_hex))
def str_to_hex(string: str, num_hex: int = 0) -> List[str]:
return bytes_to_hex(str_to_bytes(string, num_hex))
def colors_to_image(colors: List[str]) -> Image.Image:
"""Create an image with minimum side difference
:param colors: list of hex pixel colors
:return: PIL image
"""
size = len(colors)
root = math.ceil(math.sqrt(size))
x = 1
for i in range(root + 1, 1, -1):
if size % i == 0:
x = i
break
image_data = []
image_row = []
for color in colors:
image_row.append(list(ImageColor.getcolor(color, 'RGB')))
if len(image_row) == x:
image_data.append(image_row)
image_row = []
image = Image.fromarray((np.array(image_data)).astype(np.uint8))
return image
def hex_bytes_to_colors(hex_bytes: List[str]) -> List[str]:
"""Convert hex bytes into list of hex pixel colors
:param hex_bytes: list of hex bytes
:return: list of hex pixel colors
"""
colors = []
color = '#'
for byte_hex in hex_bytes:
color += byte_hex
if len(color) == 7:
colors.append(color)
color = '#'
if color != '#':
while len(color) < 7:
color += '0'
colors.append(color)
return colors
def encode_bytes_to_colors(file_bytes: bytes, file_name: str,
sign: str = '') -> List[str]:
"""Encode bytes into image pixel colors
:param file_bytes: input file bytes
:param file_name: input file name
:param sign: signature for the output image
:return: list of hex pixel colors
"""
input_file_hex = bytes_to_hex(file_bytes)
extra_bytes = -len(input_file_hex) % 3
metadata_hex = [
*int_to_n_hex(extra_bytes, 1),
*str_to_hex(sign, 50),
*str_to_hex(file_name, 256)
]
metadata_hex += int_to_n_hex(0, -len(metadata_hex) % 3)
metadata_hex = int_to_n_hex(len(metadata_hex) + 3, 3) + metadata_hex
return hex_bytes_to_colors(metadata_hex + input_file_hex)
def encode_bytes_to_image(file_bytes: bytes, file_name: str,
sign: str = '') -> Image.Image:
"""Encode bytes into an image
:param file_bytes: input file bytes
:param file_name: input file name
:param sign: signature for the output image
:return: PIL image
"""
colors = encode_bytes_to_colors(file_bytes, file_name, sign)
return colors_to_image(colors)
def get_file_bytes(input_file_path: str) -> Optional[bytes]:
with open(input_file_path, 'rb') as f:
return f.read()
def encode_file_name(input_file_path: str, output_file_path: str,
sign: str = ''):
"""Encode a file into an image
:param input_file_path: filepath to input file
:param output_file_path: path to output directory
:param sign: signature for the output image
"""
input_file_bytes = get_file_bytes(input_file_path)
file_name = os.path.basename(input_file_path)
colors = encode_bytes_to_colors(input_file_bytes, file_name, sign)
image = colors_to_image(colors)
Path(output_file_path).mkdir(parents=True, exist_ok=True)
image.save(f'{output_file_path}/{file_name}.png')
def parse_args_command_line() -> argparse.Namespace:
"""Parse command line arguments
:return: dict of parsed arguments
"""
parser = argparse.ArgumentParser(
description='Convert any file into an image,'
' and images back to files.'
' Run without args for interactive input mode.'
)
encode_decode_group = parser.add_mutually_exclusive_group(required=True)
encode_decode_group.add_argument(
'-e', '--encode',
action='store_true',
help='encode file to image'
)
encode_decode_group.add_argument(
'-d', '--decode',
action='store_true',
help='decode image to file'
)
parser.add_argument(
'-i', '--input',
type=str,
help='input file',
required=True
)
parser.add_argument(
'-o', '--output',
type=str,
help='output folder',
default='.'
)
parser.add_argument(
'-s', '--sign',
type=str,
help='sign the encoded image (max 50 characters)',
default=''
)
return parser.parse_args()
def process_args_interactive() -> dict:
"""Parse arguments with interactive standard input prompts
:return: dict of parsed arguments
"""
print("""
███████████████████████▀█████████████████████████████████
█▄─▄█▄─▀█▀─▄██▀▄─██─▄▄▄▄█▄─▄█▄─▀█▄─▄█▄─▄▄─█▄─▀█▄─▄█─▄▄▄─█
██─███─█▄█─███─▀─██─██▄─██─███─█▄▀─███─▄█▀██─█▄▀─██─███▀█
▀▄▄▄▀▄▄▄▀▄▄▄▀▄▄▀▄▄▀▄▄▄▄▄▀▄▄▄▀▄▄▄▀▀▄▄▀▄▄▄▄▄▀▄▄▄▀▀▄▄▀▄▄▄▄▄▀
""")
while (mode := input('(E)ncode or (D)ecode? ').lower()[0]) not in 'de':
print('Invalid input.')
if mode == 'e':
input_file_path = input(
'Enter the input filepath of the file to encode: '
)
else:
input_file_path = input(
'Enter the input filename of the image to decode: '
)
output_file_path = input('Enter the output path (ENTER for .): ') or '.'
if mode == 'e':
sign = input(
'Sign the encoded image (max 50 characters, ENTER for blank): '
)
else:
sign = ''
return {
'mode': mode,
'input': input_file_path,
'output': output_file_path,
'sign': sign
}
def parse_args() -> dict:
"""Parse command line args if passed else parse interactive args
:return: dict of parsed arguments
"""
if not len(sys.argv) > 1:
return process_args_interactive()
args = parse_args_command_line()
return {
'mode': 'd' if args.decode else 'e',
'input': args.input,
'output': args.output,
'sign': args.sign
}
def main():
args = parse_args()
try:
if args['mode'] == 'e':
encode_file_name(args['input'], args['output'], args['sign'])
elif args['mode'] == 'd':
metadata = decode_image_name(args['input'], args['output'])
sign = metadata['sign']
if sign:
print(f'This image has been signed: {sign}')
except OSError as err:
print(err.strerror, file=sys.stderr)
| 28.968153 | 76 | 0.602902 | 1,202 | 9,096 | 4.543261 | 0.171381 | 0.041201 | 0.033327 | 0.006592 | 0.262956 | 0.198132 | 0.130196 | 0.099982 | 0.064823 | 0.051273 | 0 | 0.007379 | 0.255057 | 9,096 | 313 | 77 | 29.060703 | 0.764906 | 0.154024 | 0 | 0.136585 | 0 | 0 | 0.139992 | 0.040647 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.039024 | 0.034146 | 0.229268 | 0.019512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb287331a851f7aa42680fe705caff05b09ac9f5 | 9,935 | py | Python | src/python/packages/visr_bst/tracker/razor_ahrs.py | s3a-spatialaudio/VISR | 55f6289bc5058d4898106f3520e1a60644ffb3ab | [
"ISC"
] | 17 | 2019-03-12T14:52:22.000Z | 2021-11-09T01:16:23.000Z | src/python/packages/visr_bst/tracker/razor_ahrs.py | s3a-spatialaudio/VISR | 55f6289bc5058d4898106f3520e1a60644ffb3ab | [
"ISC"
] | null | null | null | src/python/packages/visr_bst/tracker/razor_ahrs.py | s3a-spatialaudio/VISR | 55f6289bc5058d4898106f3520e1a60644ffb3ab | [
"ISC"
] | 2 | 2019-08-11T12:53:07.000Z | 2021-06-22T10:08:08.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2017-2018 Andreas Franck and Giacomo Costantini
# Copyright (C) 2017-2018 University of Southampton
# VISR Binaural Synthesis Toolkit (BST)
# Authors: Andreas Franck and Giacomo Costantini
# Project page: http://cvssp.org/data/s3a/public/BinauralSynthesisToolkit/
# The Binaural Synthesis Toolkit is provided under the ISC (Internet Systems Consortium) license
# https://www.isc.org/downloads/software-support-policy/isc-license/ :
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# We kindly ask to acknowledge the use of this software in publications or software.
# Paper citation:
# Andreas Franck, Giacomo Costantini, Chris Pike, and Filippo Maria Fazi,
# “An Open Realtime Binaural Synthesis Toolkit for Audio Research,” in Proc. Audio Eng.
# Soc. 144th Conv., Milano, Italy, 2018, Engineering Brief.
# http://www.aes.org/e-lib/browse.cfm?elib=19525
# The Binaural Synthesis Toolkit is based on the VISR framework. Information about the VISR,
# including download, setup and usage instructions, can be found on the VISR project page
# http://cvssp.org/data/s3a/public/VISR .
import visr
import pml
import numpy as np
import serial
from ..util.rotation_functions import deg2rad
class RazorAHRS(visr.AtomicComponent ):
"""
Component to receive tracking data from a Razor AHRS device through a serial port.
"""
def __init__( self,
context, name, parent,
port,
yawOffset=0,
pitchOffset=0,
rollOffset=0,
yawRightHand=False,
pitchRightHand=False,
rollRightHand=False,
calibrationInput = False # Whether to instantiate an input port to set the orientation.
):
"""
Constructor.
Parameters
----------
context : visr.SignalFlowContext
Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
name : string
Name of the component, Standard visr.Component construction argument
parent : visr.CompositeComponent
Containing component if there is one, None if this is a top-level component of the signal flow.
yawOffset:
Initial offset for the yaw component, default 0.0
pitchOffset : float
Offset for the pitch value, in degree
rollOffset : float:
Initial value for the roll component, default 0.0
yawRightHand: bool
Whehther the yaw coordinate is interpreted as right-hand
(mathematically negative) rotation. Default: False
pitchRightHand: bool
Whehther the pitch coordinate is interpreted as right-hand
(mathematically negative) rotation. Default: False
rollRightHand: bool
Whehther the roll coordinate is interpreted as right-hand
(mathematically negative) rotation. Default: False
calibrationInput: bool
Flag to determine whehter the component has an additional input "calibration"
that resets the orientation offsets. At the moment, this input is of
type StringParameter, and the value is ignored.
TODO: Check whether to support ListenerPosition objects as calibration triggers
to set the orientation to an arbitrary value
"""
super( RazorAHRS, self ).__init__( context, name, parent )
self.yprVec = np.zeros( 3, dtype = np.float32 )
baudRate = 57600
self.ser = serial.Serial(port, baudRate, timeout=0)
self.message = ""
self.sent = False
self.trackingOutput = visr.ParameterOutput( "orientation", self,
pml.ListenerPosition.staticType,
pml.DoubleBufferingProtocol.staticType,
pml.EmptyParameterConfig() )
self.trackingOutputProtocol = self.trackingOutput.protocolOutput()
if calibrationInput:
self.calibrationInput = visr.ParameterInput( "calibration", self,
pml.StringParameter.staticType,
pml.MessageQueueProtocol.staticType,
pml.EmptyParameterConfig() )
else:
self.calibrationInput = None
self.sentN = 0
self.parsedN = 0
self.ser.read() #necessary for the .in_waiting to work
self.procN =0
self.yawOffset = yawOffset
self.pitchOffset = pitchOffset
self.rollOffset = rollOffset
self.yawRightHand = yawRightHand
self.pitchRightHand = pitchRightHand
self.rollRightHand= rollRightHand
self.orientation = np.array( [0.0, 0.0, 0.0 ] ) # Current orientation, unadjusted, in radian
def send_data(self,newdata):
data = newdata
data = data.replace("#","").replace("Y","").replace("P","").replace("R","").replace("=","").rstrip()
try:
yprvec = [float(i) for i in data.split(',')]
if self.yawRightHand:
# print(ypr.orientation[0])
yprvec[0]*= -1
# print(ypr.orientation[0])
if self.pitchRightHand:
yprvec[1]*= -1
if self.rollRightHand:
yprvec[2]*= -1
if np.array(yprvec).size != 3:
raise ValueError( 'yaw pitch roll bad format:'+str(np.array(yprvec)))
self.orientation = yprvec # Store the current position
ypr = self.trackingOutput.protocolOutput().data()
# [deg2rad(yprvec[0]+self.yawOffset),deg2rad(yprvec[1]+self.pitchOffset),deg2rad(yprvec[2]+self.rollOffset)]
ypr.orientation = [deg2rad(self.orientation[0] + self.yawOffset),
deg2rad(self.orientation[1] + self.pitchOffset),
deg2rad(self.orientation[2] + self.rollOffset)]
self.sentN = self.sentN+1
# print("%d serial parsing %f sec"%(self.procN,time.time()-self.start))
# print("[%d,%d,%d]"%(yprvec[0]+self.yawOffset,yprvec[1]+self.pitchOffset,yprvec[2]+self.rollOffset))
self.trackingOutput.protocolOutput().swapBuffers()
except ValueError:
print ("Parsing went wrong because of a wrongly formatted string...")
def parse_message (self, read):
last = read.rfind("\r\n")
if last == -1:
self.message+=read
#print(" no endl: "+repr(self.message))
else:
ndlast = read.rfind("\r\n", 0, last)
if ndlast == -1:
#print(" just one endl bef: "+repr(self.message))
self.message+= read[:last+2]
#print(" just one endl: "+repr(self.message))
if self.message.count('\r\n') >= 2:
lastM = self.message.rfind("\r\n")
ndlastM = self.message.rfind("\r\n", 0, lastM)
lastN = self.message[ndlastM+2:lastM].rfind("\n")
lastR = self.message[ndlastM+2:lastM].rfind("\r")
if lastN != -1:
ndlastM = lastN
if lastR != -1:
ndlastM = lastR
#print(" message sent: "+repr(self.message[ndlastM+2:lastM+2]))
self.send_data(self.message[ndlastM+2:lastM+2])
self.message = read[last:]
else:
self.message+= read[last+2:]
#print(" not sent: "+repr(self.message))
else:
#print(" message sent: "+repr(read[ndlast+2:last+2]))#+" ( in mem "+repr(read[last:])+" ) over total "+(repr(read)))
self.send_data(read[ndlast+2:last+2])
self.sent = True
self.message = read[last:]
self.parsedN = self.parsedN+1
def process( self ):
if not self.calibrationInput is None:
calInputProtocol = self.calibrationInput.protocolInput()
if not calInputProtocol.empty():
# We are not interested in the content, but just use it as a trigger to use the
# most recent orientation to set the compensation values.
self.yawOffset = - self.orientation[0]
self.pitchOffset = - self.orientation[1]
self.rollOffset = - self.orientation[2]
calInputProtocol.clear()
self.procN=self.procN+1
inBuffer = self.ser.in_waiting
if inBuffer == 0 :
return
else :
try:
read = self.ser.read(inBuffer).decode('iso-8859-1') #read the bytes and convert from binary array to ASCII
totRead=len(read)
if totRead == 0 :
return
else :
self.parse_message(read)
except BaseException as ex:
print( "Error while decoding tracking data message: %s" % str(ex) )
| 44.352679 | 134 | 0.588727 | 1,092 | 9,935 | 5.341575 | 0.332418 | 0.032059 | 0.012858 | 0.013029 | 0.127893 | 0.07989 | 0.061375 | 0.051432 | 0.039088 | 0.039088 | 0 | 0.016726 | 0.31998 | 9,935 | 223 | 135 | 44.55157 | 0.846655 | 0.428888 | 0 | 0.12069 | 0 | 0 | 0.036021 | 0 | 0 | 0 | 0 | 0.004484 | 0 | 1 | 0.034483 | false | 0 | 0.043103 | 0 | 0.103448 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb2df572b9837f3c3d86ae8056012b552ac2dfe8 | 670 | py | Python | modelexp/data/_xysData.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | modelexp/data/_xysData.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | modelexp/data/_xysData.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | import numpy as np
from ._xyeData import XyeData
class XysData(XyeData):
def loadFromFile(self, filename):
self.filename = filename
x = []
y = []
e = []
with open(filename, 'r') as f:
for line in f:
if line.startswith('!'):
continue
splitLine = line.strip().split()
if len(splitLine) != 3:
continue
x.append(float(splitLine[0]))
y.append(float(splitLine[1]))
e.append(float(splitLine[2]))
x = np.array(x)
y = np.array(y)
e = np.array(e)
sortedArgs = np.argsort(x)
x = x[sortedArgs]
y = y[sortedArgs]
e = e[sortedArgs]
self.setData(x, y, e) | 23.103448 | 40 | 0.555224 | 89 | 670 | 4.168539 | 0.438202 | 0.016173 | 0.161725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008547 | 0.301493 | 670 | 29 | 41 | 23.103448 | 0.784188 | 0 | 0 | 0.076923 | 0 | 0 | 0.002981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb303aa9a9e35048fd364aab67b910b9b13e5a5f | 5,252 | py | Python | lcm/ns_vnfs/views/vnf_views.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 4 | 2018-08-29T02:51:38.000Z | 2021-11-16T11:36:11.000Z | lcm/ns_vnfs/views/vnf_views.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | null | null | null | lcm/ns_vnfs/views/vnf_views.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 1 | 2019-05-12T08:21:19.000Z | 2019-05-12T08:21:19.000Z | # Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from lcm.ns_vnfs.biz.grant_vnf import GrantVnf
from lcm.ns_vnfs.serializers.grant_vnf_serializer import GrantRequestSerializer
from lcm.ns_vnfs.serializers.grant_vnf_serializer import GrantSerializer
from lcm.ns_vnfs.biz.handle_notification import HandleVnfLcmOocNotification, HandleVnfIdentifierCreationNotification, HandleVnfIdentifierDeletionNotification
from lcm.ns_vnfs.serializers.grant_vnf_serializer import VnfLcmOperationOccurrenceNotificationSerializer, VnfIdentifierCreationNotificationSerializer, VnfIdentifierDeletionNotificationSerializer
logger = logging.getLogger(__name__)
class VnfGrantView(APIView):
@swagger_auto_schema(
request_body=GrantRequestSerializer(),
responses={
status.HTTP_201_CREATED: GrantSerializer(
help_text="The grant was created successfully (synchronous mode)."
),
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
def post(self, request):
logger.debug("VnfGrantView Post: %s" % request.data)
try:
grant_request = GrantRequestSerializer(data=request.data)
if not grant_request.is_valid():
raise Exception(grant_request.errors)
grant_resp = GrantVnf(request.data).exec_grant()
resp_serializer = GrantSerializer(data=grant_resp)
if not resp_serializer.is_valid():
raise Exception(grant_resp)
return Response(data=grant_resp, status=status.HTTP_201_CREATED)
except Exception as e:
logger.error(traceback.format_exc())
logger.error("Exception in VnfGrant: %s", e.args[0])
return Response(data={'error': e.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class VnfNotifyView(APIView):
@swagger_auto_schema(
request_body=VnfLcmOperationOccurrenceNotificationSerializer(
help_text="A notification about lifecycle changes triggered by a VNF LCM operation occurrence."
),
responses={
status.HTTP_204_NO_CONTENT: "The notification was delivered successfully.",
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
def post(self, request, vnfmId, vnfInstanceId):
logger.debug("VnfNotifyView post: %s" % request.data)
logger.debug("vnfmId: %s vnfInstanceId: %s", vnfmId, vnfInstanceId)
notification_type = request.data['notificationType']
try:
if notification_type == 'VnfLcmOperationOccurrenceNotification':
notification = VnfLcmOperationOccurrenceNotificationSerializer(data=request.data)
if not notification.is_valid():
logger.warn(notification.errors)
HandleVnfLcmOocNotification(vnfmId, vnfInstanceId, notification.data).do_biz()
elif notification_type == 'VnfIdentifierCreationNotification':
notification = VnfIdentifierCreationNotificationSerializer(data=request.data)
if not notification.is_valid():
logger.warn(notification.errors)
HandleVnfIdentifierCreationNotification(vnfmId, vnfInstanceId, notification.data).do_biz()
elif notification_type == 'VnfIdentifierDeletionNotification':
notification = VnfIdentifierDeletionNotificationSerializer(data=request.data)
if not notification.is_valid():
logger.warn(notification.errors)
HandleVnfIdentifierDeletionNotification(vnfmId, vnfInstanceId, notification.data).do_biz()
else:
raise Exception('Unexpected noitifcation type value.')
return Response(data={}, status=status.HTTP_204_NO_CONTENT)
except Exception as e:
logger.error(traceback.format_exc())
logger.error("Exception in VnfLcmOoc Notification: %s", e.args[0])
return Response(data={'error': e.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@swagger_auto_schema(
responses={
status.HTTP_204_NO_CONTENT: "The notification endpoint was tested successfully.",
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
def get(self, request, vnfmId, vnfInstanceId):
logger.debug("VnfNotifyView get")
logger.debug("vnfmId: %s vnfInstanceId: %s", vnfmId, vnfInstanceId)
return Response(data={}, status=status.HTTP_204_NO_CONTENT)
| 47.745455 | 194 | 0.706969 | 544 | 5,252 | 6.654412 | 0.308824 | 0.030387 | 0.012431 | 0.017956 | 0.424586 | 0.395856 | 0.36547 | 0.338674 | 0.285083 | 0.18674 | 0 | 0.010933 | 0.216299 | 5,252 | 109 | 195 | 48.183486 | 0.868562 | 0.106055 | 0 | 0.345238 | 0 | 0 | 0.129887 | 0.022004 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.130952 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb30a88e6ee61f39cad41da1f9c9e343720cc8c6 | 3,819 | py | Python | vif_plug_ovs/ovsdb/ovsdb_lib.py | petrutlucian94/os-vif | be44c603959452b345dae81f7b50a69667c83b5f | [
"Apache-2.0"
] | null | null | null | vif_plug_ovs/ovsdb/ovsdb_lib.py | petrutlucian94/os-vif | be44c603959452b345dae81f7b50a69667c83b5f | [
"Apache-2.0"
] | null | null | null | vif_plug_ovs/ovsdb/ovsdb_lib.py | petrutlucian94/os-vif | be44c603959452b345dae81f7b50a69667c83b5f | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from vif_plug_ovs import constants
from vif_plug_ovs import linux_net
from vif_plug_ovs.ovsdb import api as ovsdb_api
LOG = logging.getLogger(__name__)
class BaseOVS(object):
def __init__(self, config):
self.timeout = config.ovs_vsctl_timeout
self.connection = config.ovsdb_connection
self.interface = config.ovsdb_interface
self.ovsdb = ovsdb_api.get_instance(self)
def _ovs_supports_mtu_requests(self):
return bool(self.ovsdb.db_list(
'Interface', columns=['mtu_request']).execute(check_error=True,
log_errors=True))
def _set_mtu_request(self, dev, mtu):
self.ovsdb.db_set('Interface', dev, ('mtu_request', mtu)).execute()
def update_device_mtu(self, dev, mtu, interface_type=None):
if not mtu:
return
if interface_type not in [
constants.OVS_VHOSTUSER_INTERFACE_TYPE,
constants.OVS_VHOSTUSER_CLIENT_INTERFACE_TYPE]:
if sys.platform != constants.PLATFORM_WIN32:
# Hyper-V with OVS does not support external programming of
# virtual interface MTUs via netsh or other Windows tools.
# When plugging an interface on Windows, we therefore skip
# programming the MTU and fallback to DHCP advertisement.
linux_net.set_device_mtu(dev, mtu)
elif self._ovs_supports_mtu_requests():
self._set_mtu_request(dev, mtu)
else:
LOG.debug("MTU not set on %(interface_name)s interface "
"of type %(interface_type)s.",
{'interface_name': dev,
'interface_type': interface_type})
def ensure_ovs_bridge(self, bridge, datapath_type):
return self.ovsdb.add_br(bridge, may_exist=True,
datapath_type=datapath_type).execute()
def create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id,
mtu=None, interface_type=None,
vhost_server_path=None):
external_ids = {'iface-id': iface_id,
'iface-status': 'active',
'attached-mac': mac,
'vm-uuid': instance_id}
col_values = [('external_ids', external_ids)]
if interface_type:
col_values.append(('type', interface_type))
if vhost_server_path:
col_values.append(('options',
{'vhost-server-path': vhost_server_path}))
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.add_port(bridge, dev))
txn.add(self.ovsdb.db_set('Interface', dev, *col_values))
self.update_device_mtu(dev, mtu, interface_type=interface_type)
def update_ovs_vif_port(self, dev, mtu=None, interface_type=None):
self.update_device_mtu(dev, mtu, interface_type=interface_type)
def delete_ovs_vif_port(self, bridge, dev, delete_netdev=True):
self.ovsdb.del_port(dev, bridge=bridge, if_exists=True).execute()
if delete_netdev:
linux_net.delete_net_dev(dev)
| 41.967033 | 78 | 0.632888 | 482 | 3,819 | 4.771784 | 0.348548 | 0.084783 | 0.036957 | 0.018261 | 0.163043 | 0.089565 | 0.046957 | 0.046957 | 0.046957 | 0.046957 | 0 | 0.00219 | 0.282535 | 3,819 | 90 | 79 | 42.433333 | 0.837226 | 0.202671 | 0 | 0.033898 | 0 | 0 | 0.076999 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.084746 | 0.033898 | 0.288136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb32274487e56191df83b272421f3e8d4f0c978f | 2,878 | py | Python | modules/request_luis.py | microsoft/cai-advanced-processing-service | 91fce8048fe275aa870083bed22d452d330ef535 | [
"MIT"
] | 9 | 2021-11-17T11:50:47.000Z | 2022-02-15T14:48:32.000Z | modules/request_luis.py | microsoft/cai-advanced-processing-service | 91fce8048fe275aa870083bed22d452d330ef535 | [
"MIT"
] | 7 | 2021-12-03T17:05:26.000Z | 2022-03-02T04:59:50.000Z | modules/request_luis.py | microsoft/cai-advanced-processing-service | 91fce8048fe275aa870083bed22d452d330ef535 | [
"MIT"
] | null | null | null | '''MODULES USED ACROSS THE SOLUTION'''
import requests
import logging
import json
import os
# Define logger
logger = logging.getLogger(__name__)
# LUIS Scoring
def score_luis(text, luis_creds):
"""Score LUIS endpoint (REST API v3) and return parsed JSON"""
# Set header/params for rest call
params = {
'q': text,
'timezoneOffset': '0',
'verbose': 'false',
'spellCheck': 'false',
'staging': 'false',
}
headers = {
'Ocp-Apim-Subscription-Key': luis_creds['luis_key'],
}
# Set URL parameters to use in this REST call.
params ={
'query': text,
'timezoneOffset': '0',
'verbose': 'true',
'show-all-intents': 'true',
'spellCheck': 'false',
'staging': 'false',
'subscription-key': luis_creds['luis_key']
}
# Make the REST call.
try:
r = requests.get(f'https://{luis_creds["luis_prediction_endpoint"]}.cognitiveservices.azure.com/luis/prediction/v3.0/apps/{luis_creds["luis_id"]}/slots/{luis_creds["luis_slot"]}/predict', headers=headers, params=params)
r = json.loads(r.text)
logger.info('[INFO] - successfully processed LUIS request')
except Exception as e:
logger.error(f"[ERROR] LUIS encountered an issue -> {e}.")
return r
def get_luis_creds(region=None):
try:
"""Retrieve LUIS credentials from env variables or local config and return as dict"""
luis_creds = dict()
# If region is not None, we add a _ and the region parameter
if region:
_region = f'_{region}'
else:
_region = ""
luis_creds['luis_id'] = os.environ.get(f'LUIS_ID{_region}')
luis_creds['luis_key'] = os.environ.get(f'LUIS_KEY{_region}')
luis_creds['luis_prediction_endpoint'] = os.environ.get(f'LUIS_PREDICTION_ENDPOINT{_region}')
luis_creds['luis_slot'] = os.environ.get(f'LUIS_SLOT{_region}')
# Local debugging
if luis_creds['luis_key'] is None:
logger.info(f'[INFO] Entering local debugging')
import sys
sys.path.append('./')
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
luis_creds['luis_id'] = config[f'LUIS{_region}']['appid']
luis_creds['luis_key'] = config[f'LUIS{_region}']['key']
luis_creds['luis_prediction_endpoint'] = config[f'LUIS{_region}']['prediction_endpoint']
luis_creds['luis_slot'] = config[f'LUIS{_region}']['slot']
except KeyError:
logger.error(f'[ERROR] - No valid luis credentials found. Please make sure you are using the correct region or whether there is region-neutral information set.')
luis_creds = None
return luis_creds | 38.891892 | 228 | 0.604934 | 347 | 2,878 | 4.84438 | 0.363112 | 0.101725 | 0.108269 | 0.047591 | 0.132659 | 0.036883 | 0 | 0 | 0 | 0 | 0 | 0.002364 | 0.265115 | 2,878 | 74 | 229 | 38.891892 | 0.792435 | 0.100417 | 0 | 0.172414 | 0 | 0.034483 | 0.375983 | 0.043892 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb33c0fc1fcce1bc18a1d38298e45efb2e472d9f | 8,958 | py | Python | mlbench_core/controlflow/tensorflow/train_validation.py | mlbench/mlbench-core | 4fd3c7e6f1a5be69e52383ab2eb64cad257218c2 | [
"Apache-2.0"
] | 14 | 2018-11-12T17:23:32.000Z | 2022-03-11T22:45:49.000Z | mlbench_core/controlflow/tensorflow/train_validation.py | mlbench/mlbench-core | 4fd3c7e6f1a5be69e52383ab2eb64cad257218c2 | [
"Apache-2.0"
] | 330 | 2018-10-09T12:15:56.000Z | 2022-03-01T18:07:40.000Z | mlbench_core/controlflow/tensorflow/train_validation.py | mlbench/mlbench-core | 4fd3c7e6f1a5be69e52383ab2eb64cad257218c2 | [
"Apache-2.0"
] | 9 | 2019-02-28T19:11:58.000Z | 2020-08-17T17:52:37.000Z | r"""A controlflow which train and evaluate a model."""
import logging
from mlbench_core.utils import AverageMeter, Tracker
def train_round(
session,
train_set_init_op,
train_op,
loss_op,
metrics,
batch_size,
num_batches_per_epoch_for_train,
tracker,
lr_scheduler_level=None,
lr_tensor=None,
):
"""Performs num_batches_per_epoch_for_train batches of training (or full trainset if
not specified)
Args:
session (obj): The tensorflow session
train_set_init_op (obj): The trainset initialisation tf operation
train_op (obj): The tensorflow training operation
loss_op (obj): The tensorflow loss operation
metrics (list): List of metrics to track
batch_size (int): The batch size
num_batches_per_epoch_for_train (int): Maximum number of batches tot rain for per epoch,
default: `None` (all batches)
tracker (`obj`:mlbench_core.utils.Tracker): Tracker object to use
lr_scheduler_level (str): Learning Rate scheduler mode, one of `batch` or `epoch`
lr_tensor (obj): The learningrate schedule tensorflow operation
"""
logging.info("Initialize training dataset.")
session.run(train_set_init_op)
tracker.train()
loss_meter = AverageMeter()
metrics_meter = [AverageMeter() for _ in metrics]
if lr_scheduler_level == "epoch" and lr_tensor is not None:
lr = session.run(lr_tensor)
logging.debug(
"Epoch {} Learning Rate : {:10.3e}".format(tracker.current_epoch, lr)
)
for i_batch in range(num_batches_per_epoch_for_train):
# for i_batch in range(1):
tracker.batch_start()
if lr_scheduler_level == "batch" and lr_tensor is not None:
lr = session.run(lr_tensor)
logging.debug(
"Epoch {} Learning Rate : {:10.3e}".format(tracker.current_epoch, lr)
)
out = session.run(
{
"metrics": [m.metric_op for m in metrics],
"loss": loss_op,
"train_op": train_op,
}
)
tracker.batch_end()
# Update tracker
loss_meter.update(out["loss"], n=batch_size)
tracker.record_loss(loss_meter.avg, log_to_api=True)
for metric, meter, o in zip(metrics, metrics_meter, out["metrics"]):
meter.update(o, n=batch_size)
tracker.record_metric(metric, meter.avg, log_to_api=True)
# Print logging information.
progress = i_batch / num_batches_per_epoch_for_train
progress += tracker.current_epoch
status = "Epoch {:5.2f} Batch {:4}: ".format(progress, i_batch)
logging.info(status + str(tracker))
# Record training loss and metrics.
tracker.record_loss(loss_meter.avg, log_to_api=True)
for metric, meter in zip(metrics, metrics_meter):
tracker.record_metric(metric, meter.avg, log_to_api=True)
logging.info("Finish training for one epoch.")
def validation_round(
session,
validation_set_init_op,
loss_op,
metrics,
batch_size,
num_batches_per_epoch_for_validation,
tracker,
):
"""Handles one full iteration of validation on the whole validation set.
Args:
session (obj): The tensorflow session
validation_set_init_op (obj): The trainset initialisation tf operation
loss_op (obj): The tensorflow loss operation
metrics (list): List of metrics to track
batch_size (int): The batch size
num_batches_per_epoch_for_validation (int): Maximum number of batches to validate
for per epoch, default: `None` (all batches)
tracker (`obj`:mlbench_core.utils.Tracker): Tracker object to use
"""
session.run(validation_set_init_op)
tracker.validation()
loss_meter = AverageMeter()
metrics_meter = [AverageMeter() for _ in metrics]
for i_batch in range(num_batches_per_epoch_for_validation):
out = session.run({"metrics": [m.metric_op for m in metrics], "loss": loss_op})
# Update tracker
loss_meter.update(out["loss"], n=batch_size)
for meter, o in zip(metrics_meter, out["metrics"]):
meter.update(o, n=batch_size)
logging.debug(
"{}/{} Validation loss={:10.3e} | metrics: [{}]".format(
tracker.current_epoch,
i_batch,
loss_meter.avg,
",".join([format(m.avg, "10.3e") for m in metrics_meter]),
)
)
tracker.record_loss(loss_meter.avg, log_to_api=True)
if tracker.rank == 0:
tracker.record_stat("global_loss", loss_meter.avg, log_to_api=True)
for i, metric, meter in zip(range(len(metrics)), metrics, metrics_meter):
tracker.record_metric(metric, meter.avg, log_to_api=True)
if tracker.rank == 0:
tracker.record_stat(
"global_{}".format(metric.name), meter.avg, log_to_api=True
)
class TrainValidation(object):
"""A control flow to train and evaluate a model.
Args:
train_op (:obj:`tf.Operation`): An operation for training models.
sess (:obj:`tf.Session`): A session which the control flow will communicate.
loss (:obj:`tf.Tensor`): The loss tensor.
metrics (list of :obj:`tf.Tensor`): A list of metrics tensors.
max_train_steps (int): Number of steps for training (independent of lr)
train_epochs (int): Number of steps for training (may related to lr).
batch_size (int): Size of a batch.
num_batches_per_epoch_for_train (int): Number of batches in one training epoch
num_batches_per_epoch_for_validation (int): Number of batches in one validation epoch
train_set_init_op (:obj:`tf.Operation`): Op for initializing training dataset.
validation_set_init_op (:obj:`tf.Operation`): Op for initializing validation dataset.
run_id (str): the id of the run in the dashboard
rank (int): the rank of the current worker
lr_scheduler_level (str): Learning rate is updated based on `epoch` or `batch`.
"""
def __init__(
self,
train_op,
sess,
loss,
metrics,
max_train_steps,
train_epochs,
batch_size,
num_batches_per_epoch_for_train,
num_batches_per_epoch_for_validation,
train_set_init_op,
validation_set_init_op,
run_id,
rank,
lr_scheduler_level="epoch",
tracker=None,
):
self.batch_size = batch_size
self.num_batches_per_epoch_for_train = num_batches_per_epoch_for_train
self.num_batches_per_epoch_for_validation = num_batches_per_epoch_for_validation
self.sess = sess
self.loss = loss
self.metrics = metrics
self.train_op = train_op
self.lr_scheduler_level = lr_scheduler_level
self.max_train_steps = max_train_steps
self.train_epochs = train_epochs
self.train_set_init_op = train_set_init_op
self.validation_set_init_op = validation_set_init_op
self.run_id = run_id
self.rank = rank
if tracker:
self.tracker = tracker
else:
self.tracker = Tracker(metrics, run_id, rank)
def train_one_epoch(self, lr_tensor_name=None):
"""Train a model for an epoch and use tracker to log stats.
Args:
lr_tensor (obj): The learningrate schedule tensorflow operation"""
train_round(
self.sess,
self.train_set_init_op,
self.train_op,
self.loss,
self.metrics,
self.batch_size,
self.num_batches_per_epoch_for_train,
self.tracker,
lr_tensor=lr_tensor_name,
lr_scheduler_level=self.lr_scheduler_level,
)
def valid_one_epoch(self):
"""Validate a model for an epoch and use tracker to log stats."""
validation_round(
self.sess,
self.validation_set_init_op,
self.loss,
self.metrics,
self.batch_size,
self.num_batches_per_epoch_for_validation,
self.tracker,
)
def train_and_eval(self, initial_epoch=0, lr_tensor_name=None):
"""Train and evaluate one epoch.
Args:
initial_epoch (int, optional): Defaults to 0. Initial epoch of training.
lr_tensor_name (:obj:`tf.Tensor`, optional): Defaults to None.
A (scalar) float tensor representing name of learning rate
"""
final_epoch = min(self.max_train_steps, self.train_epochs)
for i_epoch in range(initial_epoch, final_epoch):
logging.debug("=> Epoch {}".format(i_epoch))
self.train_one_epoch()
self.valid_one_epoch()
self.tracker.epoch_end()
return self.tracker
| 34.992188 | 96 | 0.634963 | 1,165 | 8,958 | 4.616309 | 0.134764 | 0.029751 | 0.043511 | 0.060245 | 0.589067 | 0.524359 | 0.459093 | 0.411863 | 0.384901 | 0.32354 | 0 | 0.003093 | 0.278187 | 8,958 | 255 | 97 | 35.129412 | 0.828642 | 0.329649 | 0 | 0.371795 | 0 | 0 | 0.052228 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.012821 | 0 | 0.064103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb34105bad39d903197e306923b700b79a9ad305 | 1,295 | py | Python | galaxy/main/migrations/0050_auto_20171024_0354.py | akaRem/galaxy | 567947171579fcdf7c0192316812ee0c59ccce6e | [
"Apache-2.0"
] | null | null | null | galaxy/main/migrations/0050_auto_20171024_0354.py | akaRem/galaxy | 567947171579fcdf7c0192316812ee0c59ccce6e | [
"Apache-2.0"
] | null | null | null | galaxy/main/migrations/0050_auto_20171024_0354.py | akaRem/galaxy | 567947171579fcdf7c0192316812ee0c59ccce6e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import galaxy.main.fields
import galaxy.main.mixins
class Migration(migrations.Migration):
dependencies = [
('main', '0049_auto_20161013_1744'),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('description', galaxy.main.fields.TruncatingCharField(default=b'', max_length=255, blank=True)),
('active', models.BooleanField(default=True, db_index=True)),
('url', models.CharField(unique=True, max_length=256)),
],
options={
'verbose_name': 'videos',
},
bases=(models.Model, galaxy.main.mixins.DirtyMixin),
),
migrations.AddField(
model_name='role',
name='videos',
field=models.ManyToManyField(related_name='videos', verbose_name=b'videos', editable=False, to='main.Video', blank=True),
),
]
| 35 | 133 | 0.59305 | 128 | 1,295 | 5.835938 | 0.523438 | 0.053548 | 0.042838 | 0.069612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024339 | 0.27027 | 1,295 | 36 | 134 | 35.972222 | 0.766138 | 0.016216 | 0 | 0.066667 | 0 | 0 | 0.095126 | 0.018082 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb367219d87b6bd3a73ff34c230fd097975b32d3 | 793 | py | Python | pyhelp.py | Decripter/Py_Help | 7fe290fcfe3374d064cc8e04d17763c4e19b88f4 | [
"RSA-MD"
] | null | null | null | pyhelp.py | Decripter/Py_Help | 7fe290fcfe3374d064cc8e04d17763c4e19b88f4 | [
"RSA-MD"
] | null | null | null | pyhelp.py | Decripter/Py_Help | 7fe290fcfe3374d064cc8e04d17763c4e19b88f4 | [
"RSA-MD"
] | null | null | null | #!/usr/local/bin/python3
import sys
from topics import python
def inicio():
print(
"""
Este é um programa de ajuda.
Os seguintes tópicos estão disponíveis:
"""
)
topicos()
def topicos():
print(
"""
python
"""
)
def opcao_invalida(nome_opcao):
print(nome_opcao, " - Este tópico ainda não está disponível.")
print("Verifique a lista de tópicos disponíveis:")
topicos()
def main(argumentos):
if len(argumentos) > 0:
if argumentos[1] == "python":
if len(argumentos) > 2:
python.exibi_ajuda(argumentos[2])
else:
python.subtopicos()
else:
opcao_invalida(argumentos[1])
else:
inicio()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 17.23913 | 66 | 0.578815 | 89 | 793 | 5.011236 | 0.539326 | 0.040359 | 0.09417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01083 | 0.301387 | 793 | 45 | 67 | 17.622222 | 0.794224 | 0.029004 | 0 | 0.269231 | 0 | 0 | 0.141384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb37be9e4f8db458688c4984efa6cd3c91b44c7b | 1,636 | py | Python | cli/aws_orbit/services/secretsmanager.py | srinivasreddych/aws-orbit-workbench | 2d154addff58d26f5459a73c06148aaf5e9fad46 | [
"Apache-2.0"
] | 94 | 2021-03-19T19:55:11.000Z | 2022-03-31T19:50:01.000Z | cli/aws_orbit/services/secretsmanager.py | srinivasreddych/aws-orbit-workbench | 2d154addff58d26f5459a73c06148aaf5e9fad46 | [
"Apache-2.0"
] | 410 | 2021-03-19T18:04:48.000Z | 2022-03-22T13:56:53.000Z | cli/aws_orbit/services/secretsmanager.py | srinivasreddych/aws-orbit-workbench | 2d154addff58d26f5459a73c06148aaf5e9fad46 | [
"Apache-2.0"
] | 24 | 2021-03-19T23:16:23.000Z | 2022-03-04T01:05:18.000Z | import json
import logging
from typing import Any, Dict, cast
from botocore.exceptions import ClientError
from aws_orbit.utils import boto3_client
_logger: logging.Logger = logging.getLogger(__name__)
def get_secret_value(secret_id: str) -> Dict[str, Any]:
client = boto3_client("secretsmanager")
try:
_logger.debug("Getting Secret: %s", secret_id)
get_secret_value_response = client.get_secret_value(SecretId=secret_id)
except ClientError as e:
_logger.exception(e)
return {}
else:
return cast(Dict[str, Any], json.loads(get_secret_value_response.get("SecretString", "{}")))
def put_secret_value(secret_id: str, secret: Dict[str, Any]) -> None:
client = boto3_client("secretsmanager")
try:
_logger.debug("Creating Secret: %s", secret_id)
client.create_secret(Name=secret_id, SecretString="{}")
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceExistsException":
_logger.info("Secret %s exists, ignoring", secret_id)
else:
_logger.exception(e)
raise e
try:
_logger.debug("Putting Secret Value: %s", secret_id)
client.put_secret_value(SecretId=secret_id, SecretString=json.dumps(secret))
except ClientError as e:
_logger.exception(e)
def delete_docker_credentials(secret_id: str) -> None:
client = boto3_client("secretsmanager")
try:
_logger.debug("Deleting Secret: %s", secret_id)
client.delete_secret(SecretId=secret_id, ForceDeleteWithoutRecovery=True)
except ClientError as e:
_logger.exception(e)
| 30.867925 | 100 | 0.685208 | 200 | 1,636 | 5.36 | 0.3 | 0.089552 | 0.052239 | 0.074627 | 0.364739 | 0.234142 | 0.234142 | 0.091418 | 0 | 0 | 0 | 0.003079 | 0.20599 | 1,636 | 52 | 101 | 31.461538 | 0.822171 | 0 | 0 | 0.435897 | 0 | 0 | 0.119804 | 0.014059 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.128205 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb39430918dc60481b0b98b664b64731bbf4708f | 4,516 | py | Python | sdk/keyvault/azure-keyvault-keys/samples/key_rotation_async.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/keyvault/azure-keyvault-keys/samples/key_rotation_async.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/keyvault/azure-keyvault-keys/samples/key_rotation_async.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.identity.aio import DefaultAzureCredential
from azure.keyvault.keys import KeyRotationLifetimeAction, KeyRotationPolicyAction
from azure.keyvault.keys.aio import KeyClient
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-keys and azure-identity libraries (pip install these)
#
# 3. Set environment variable VAULT_URL with the URL of your key vault
#
# 4. Set up your environment to use azure-identity's DefaultAzureCredential. To authenticate a service principal with
# environment variables, set AZURE_CLIENT_ID, AZURE_CLIENT_SECRET, and AZURE_TENANT_ID
# (See https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# 5. Key rotation permissions for your service principal in your vault
#
# ----------------------------------------------------------------------------------------------------------
# Sample - creates and updates a key's automated rotation policy, and rotates a key on-demand
#
# 1. Create a new key rotation policy (update_key_rotation_policy)
#
# 2. Get a key's current rotation policy (get_key_rotation_policy)
#
# 3. Update a key's rotation policy (update_key_rotation_policy)
#
# 4. Rotate a key on-demand (rotate_key)
#
# 5. Delete a key (delete_key)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a key client that will be used to call the service.
# Here we use the DefaultAzureCredential, but any azure-identity credential can be used.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = KeyClient(vault_url=VAULT_URL, credential=credential)
# First, create a key
key_name = "rotation-sample-key"
key = await client.create_rsa_key(key_name)
print("\nCreated a key; new version is {}".format(key.properties.version))
# Set the key's automated rotation policy to rotate the key two months after the key was created
actions = [KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE, time_after_create="P2M")]
updated_policy = await client.update_key_rotation_policy(key_name, lifetime_actions=actions)
# The created policy should only have one action
assert len(updated_policy.lifetime_actions) == 1, "There should be exactly one rotation policy action"
policy_action = updated_policy.lifetime_actions[0]
print(
"\nCreated a new key rotation policy: {} after {}".format(policy_action.action, policy_action.time_after_create)
)
# Get the key's current rotation policy
current_policy = await client.get_key_rotation_policy(key_name)
policy_action = current_policy.lifetime_actions[0]
print("\nCurrent rotation policy: {} after {}".format(policy_action.action, policy_action.time_after_create))
# Update the key's automated rotation policy to notify 30 days before the key expires
new_actions = [KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY, time_before_expiry="P30D")]
# You may also specify the duration after which the newly rotated key will expire
# In this example, any new key versions will expire after 90 days
new_policy = await client.update_key_rotation_policy(key_name, expires_in="P90D", lifetime_actions=new_actions)
# The updated policy should only have one action
assert len(new_policy.lifetime_actions) == 1, "There should be exactly one rotation policy action"
policy_action = new_policy.lifetime_actions[0]
print(
"\nUpdated rotation policy: {} {} before expiry".format(policy_action.action, policy_action.time_before_expiry)
)
# Finally, you can rotate a key on-demand by creating a new version of the key
rotated_key = await client.rotate_key(key_name)
print("\nRotated the key on-demand; new version is {}".format(rotated_key.properties.version))
# To clean up, delete the key
await client.delete_key(key_name)
print("\nDeleted the key")
await credential.close()
await client.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
| 47.041667 | 123 | 0.691541 | 582 | 4,516 | 5.202749 | 0.286942 | 0.083223 | 0.044914 | 0.030383 | 0.288639 | 0.212021 | 0.187583 | 0.153236 | 0.128137 | 0.097094 | 0 | 0.006221 | 0.145704 | 4,516 | 95 | 124 | 47.536842 | 0.778642 | 0.48295 | 0 | 0.051282 | 0 | 0 | 0.164192 | 0 | 0 | 0 | 0 | 0 | 0.051282 | 1 | 0 | false | 0 | 0.128205 | 0 | 0.128205 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb3fd4082484b9c13d6fba30ffea31607188e294 | 1,050 | py | Python | lcmlutils/urls.py | cnr-stiima-vci/LCMLUtils | bee3e293de42d28e6c141238728bbc725337109c | [
"BSD-3-Clause"
] | null | null | null | lcmlutils/urls.py | cnr-stiima-vci/LCMLUtils | bee3e293de42d28e6c141238728bbc725337109c | [
"BSD-3-Clause"
] | 3 | 2021-03-19T23:45:42.000Z | 2021-06-10T23:09:18.000Z | lcmlutils/urls.py | cnr-stiima-vci/LCMLUtils | bee3e293de42d28e6c141238728bbc725337109c | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import include, patterns, url
from django.views.generic import TemplateView
from django.contrib import admin
from rest_framework import routers
from lcmlutils.views import LegendViewSet
router = routers.DefaultRouter()
router.register(r'legends', LegendViewSet)
admin.autodiscover()
urlpatterns = patterns(
'lcmlutils.views',
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^$', 'index', name='index'),
url(r'^code/', 'code', name='code'),
url(r'^rest/', include(router.urls)),
url(r'^services/list-basic-elements', 'list_basic_elements', name='list-basic-elements'),
url(r'^services/basic-element-schema/(?P<basic_element_name>\w{0,50})', 'basic_element_schema', name='basic-element-schema'),
url(r'^services/derived-classes-list/(?P<basename>\w{0,50})', 'derived_classes_list', name='derived-classes-list'),
url(r'^services/similarity-assessment', 'similarity_assessment', name='similarity-assessment'),
url(r'^admin/', include(admin.site.urls)),
) | 47.727273 | 130 | 0.711429 | 136 | 1,050 | 5.419118 | 0.338235 | 0.048847 | 0.065129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006445 | 0.113333 | 1,050 | 22 | 131 | 47.727273 | 0.785177 | 0 | 0 | 0 | 0 | 0 | 0.418447 | 0.234951 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb42672b945fcc38633501f95df64e3348969a2b | 1,737 | py | Python | rlj/main.py | CoderOJ/RLJ | 20bb7ffd1581bf8cf820e0f594b63d2760ea721c | [
"MIT"
] | 45 | 2017-11-19T06:54:07.000Z | 2022-03-21T14:44:42.000Z | rlj/main.py | CoderOJ/RLJ | 20bb7ffd1581bf8cf820e0f594b63d2760ea721c | [
"MIT"
] | 6 | 2017-12-12T05:33:16.000Z | 2018-08-28T12:30:50.000Z | rlj/main.py | CoderOJ/RLJ | 20bb7ffd1581bf8cf820e0f594b63d2760ea721c | [
"MIT"
] | 12 | 2017-12-12T04:43:04.000Z | 2022-02-23T00:13:47.000Z | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
'''_Rqy's local judge.
Usage:
rlj -h | --help | --version
rlj [-s | --silent] [-j Source] [-c Config] [--O2]
rlj --genConfig [FILE]
rlj -d | --delete
Arguments:
FILE 生成配置文件位置;若未指定,则为config.yml
Options:
-h --help 输出此信息并退出
--version 输出版本号并退出
-s --silent 简化输出消息
-j Source 评测制定源文件
-c Config 指定配置文件 [default: config.yml]
--O2 编译时打开O2选项
--genConfig 生成配置文件
-d --delete 刪除temp文件夹
'''
import os
import docopt
from .constants import __version__
from .judge import Judge
from .config import Config, makeConfig, genConfig
from .output import printResult
def main():
arguments = docopt.docopt(__doc__, help=True, version=__version__)
# print(arguments)
if arguments['--genConfig']:
fileName = arguments['FILE']
if fileName is None:
fileName = 'config.yml'
genConfig(fileName)
return
elif arguments['--delete']:
os.system('rm -rf temp')
return
try:
configFile = arguments['-c']
if not os.path.exists(configFile):
raise FileNotFoundError('配置文件{}不存在!'.format(configFile))
config = makeConfig(configFile, arguments)
if not os.path.exists(config.source):
raise FileNotFoundError('源文件{}不存在!'.format(config.source))
judger = Judge(config)
if not printResult(config, judger.judge()):
return 1
if not config.silent and judger.runner.firstWA is not None:
print('你在第{}个测试点出错了,\ndiff信息在diff_log中'.format(
judger.runner.firstWA))
except FileNotFoundError as e:
print('错误:' + str(e))
return 1
if __name__ == '__main__':
exit(main())
| 25.544118 | 70 | 0.616005 | 200 | 1,737 | 5.24 | 0.455 | 0.019084 | 0.013359 | 0.020992 | 0.032443 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005426 | 0.25734 | 1,737 | 67 | 71 | 25.925373 | 0.806977 | 0.276914 | 0 | 0.114286 | 0 | 0 | 0.085806 | 0.02486 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.171429 | 0 | 0.314286 | 0.114286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb436e0054384f89492cb45aa0cad687efc380f4 | 4,463 | py | Python | ctd_processing/paths.py | sharksmhi/ctd_processing | 616df4cd7ed626b678622448a08a0356086a8a3f | [
"MIT"
] | null | null | null | ctd_processing/paths.py | sharksmhi/ctd_processing | 616df4cd7ed626b678622448a08a0356086a8a3f | [
"MIT"
] | null | null | null | ctd_processing/paths.py | sharksmhi/ctd_processing | 616df4cd7ed626b678622448a08a0356086a8a3f | [
"MIT"
] | null | null | null | from pathlib import Path
import datetime
import os
class SBEPaths:
def __init__(self):
self._paths = {}
self._year = None
self._sub_dir_list_local = ['source', 'raw', 'cnv', 'nsf', 'cnv_up', 'plot', 'temp']
self._sub_dir_list_server = ['raw', 'cnv', 'nsf', 'cnv_up']
def __call__(self, key, create=False, default=None, **kwargs):
path = self._paths.get(key)
if not path:
if default is not None:
return default
return False
if create and not path.exists():
os.makedirs(str(path))
return path
@property
def year(self):
return self._year
@property
def local_sub_directories(self):
return self._sub_dir_list_local
@property
def server_sub_directories(self):
return self._sub_dir_list_server
def _local_key(self, key=None):
if key not in self.local_sub_directories + ['root']:
raise Exception(f'Not a valid sub directory: {key}')
return f'local_dir_{key}'
def _server_key(self, key=None):
if key not in self.server_sub_directories + ['root']:
raise Exception(f'Not a valid sub directory: {key}')
return f'server_dir_{key}'
def get_local_directory(self, key, create=False, default=None):
return self(self._local_key(key), create=create, default=default)
def get_server_directory(self, key, year=None, create=False, default=None):
# if key not in self.server_sub_directories + ['root']:
# return False
if year:
return self._get_server_directory_for_year(key, year, create=create)
return self(self._server_key(key), create=create, default=default)
def create_local_paths(self):
for key in self._sub_dir_list:
self.get_local_directory(key, create=True)
def create_server_paths(self, year=None):
if not year:
year = datetime.datetime.now().year
for key in self._sub_dir_list:
self.get_server_directory(key, year=year, create=True)
def _get_server_directory_for_year(self, key, year, create=False):
if key not in self._sub_dir_list_server:
raise Exception(f'Invalid directory: {key}')
path = Path(self._paths['server_dir_root'], str(year), key)
if create and not path.exists():
os.makedirs(path)
return path
def set_config_root_directory(self, path):
self._paths['config_dir'] = Path(path)
# self._paths['instrumentinfo_file'] = Path(self._paths['config_dir'], 'instrumentinfo.xlsx')
self._paths['instrumentinfo_file'] = Path(self._paths['config_dir'], 'Instruments.xlsx')
def set_local_root_directory(self, directory):
root_directory = Path(directory)
if root_directory.name == 'data':
root_directory = root_directory.parent
self._paths['local_dir_root'] = root_directory
self._paths['working_dir'] = Path(self._paths['local_dir_root'], 'temp')
self._paths['local_dir_temp'] = self._paths['working_dir']
self._paths['local_dir_source'] = Path(self._paths['local_dir_root'], 'source')
self._paths['local_dir_raw'] = Path(self._paths['local_dir_root'], 'raw')
self._paths['local_dir_cnv'] = Path(self._paths['local_dir_root'], 'cnv')
self._paths['local_dir_cnv_up'] = Path(self._paths['local_dir_root'], 'cnv', 'up_cast')
self._paths['local_dir_nsf'] = Path(self._paths['local_dir_root'], 'data')
self._paths['local_dir_plot'] = Path(self._paths['local_dir_root'], 'plots')
def set_server_root_directory(self, directory):
print('set_server_root_directory', directory)
self._paths['server_dir_root'] = Path(directory)
self.set_year()
def set_year(self, year=None):
""" Year is neaded to set sub directories for the different filtypes """
if year:
self._year = str(year)
if self._year and self._paths.get('server_dir_root'):
self._paths['server_dir_raw'] = Path(self._paths['server_dir_root'], self._year, 'raw')
self._paths['server_dir_cnv'] = Path(self._paths['server_dir_root'], self._year, 'cnv')
self._paths['server_dir_nsf'] = Path(self._paths['server_dir_root'], self._year, 'data')
self._paths['server_dir_cnv_up'] = Path(self._paths['server_dir_root'], self._year, 'cnv_up') | 42.504762 | 105 | 0.64665 | 603 | 4,463 | 4.446103 | 0.121061 | 0.117493 | 0.082432 | 0.095114 | 0.500559 | 0.38232 | 0.298396 | 0.251399 | 0.172324 | 0.072361 | 0 | 0 | 0.226753 | 4,463 | 105 | 105 | 42.504762 | 0.776876 | 0.051311 | 0 | 0.154762 | 0 | 0 | 0.166351 | 0.005916 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.035714 | 0.047619 | 0.380952 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb44efdbbcb7fe1a93f020445f196497de771f2b | 14,938 | py | Python | rest_api/build/lib/rest_api/workflow/patients.py | hidura/sawtooth-blockmed | d0e047972557315489de1b308a84f477da6f993d | [
"Apache-2.0"
] | 3 | 2020-06-03T23:09:27.000Z | 2021-05-15T22:21:21.000Z | rest_api/rest_api/workflow/patients.py | hidura/sawtooth-blockmed | d0e047972557315489de1b308a84f477da6f993d | [
"Apache-2.0"
] | 6 | 2020-07-21T00:03:29.000Z | 2021-09-28T03:30:02.000Z | rest_api/rest_api/workflow/patients.py | hidura/sawtooth-blockmed | d0e047972557315489de1b308a84f477da6f993d | [
"Apache-2.0"
] | 1 | 2020-06-26T03:50:50.000Z | 2020-06-26T03:50:50.000Z | from sanic import Blueprint
from sanic import response
# from rest_api.common.protobuf import payload_pb2
from rest_api.common import transaction
from rest_api.workflow import general, security_messaging
from rest_api.workflow.errors import ApiInternalError, ApiBadRequest
import requests
import logging
import hashlib
from rest_api.GeneralTool import GeneralTool
PATIENTS_BP = Blueprint('patients')
_client_type_=2
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
@PATIENTS_BP.get('patients')
async def get_patient_basic_info(request):
"""Fetches complete details of all Accounts in state"""
idcard = request.headers['idcard']
headers = {'Content-Type': 'application/json'}
dataParty = {"username": idcard}
response_load = requests.post('http://validator:8863/getPrivateKey',
data=GeneralTool().parse2JSON(dataParty),
headers=headers)
if response_load.status_code != 200:
raise Exception("There was a problem communicating with the validator.")
elif response_load.status_code == 200:
keys = GeneralTool().parseFromJSON(response_load.content.decode())
public_key = keys['public_key']
party = await security_messaging.get_party(request.app.config.VAL_CONN, public_key)
patientKey='{}|{}'.format(public_key, '0'+str(_client_type_))
patient = await security_messaging.get_patient(request.app.config.VAL_CONN, patientKey)
patient_list_json = []
patient_list_json.append({
'name': party.name,
'lastname':party.lastname,
'telephone':party.telephone,
'birthdate':party.birthdate,
'idcard':party.idcard,
'sex': patient.biological_sex,
'photo': patient.photo,
'insurance': patient.current_insurance,
'blood_type':patient.blood_type
})
return response.json(body={'data': patient_list_json},
headers=general.get_response_headers())
else:
raise Exception("User with no patient registred.")
@PATIENTS_BP.post('patients')
async def register_new_patient(request):
"""Updates auth information for the authorized account"""
# keyfile = common.get_keyfile(request.json.get['signer'])
required_fields = ['first_name', 'last_name', 'idcard_type','idcard']
general.validate_fields(required_fields, request.json)
name = request.json.get('first_name')
surname = request.json.get('last_name')
idcard = request.json.get('idcard')
idcard_type = int(request.json.get('idcard_type'))
party_info = {"name": name,
"lastname": surname,
"idcard": idcard,
"telephone": request.json.get('telephone') if 'telephone' in request.json else " ",
"birthdate": request.json.get('birthdate') if 'birthdate' in request.json else " ",
"idcard_type": idcard_type}
headers = {'Content-Type': 'application/json'}
dataParty = {"username": party_info['idcard'], "_client_type_": _client_type_}
response_load = requests.post('http://validator:8863/getPrivateKey',
data=GeneralTool().parse2JSON(dataParty),
headers=headers)
batch_lst=[]
if response_load.status_code != 200:
raise Exception("There was a problem communicating with the validator.")
elif response_load.status_code == 200 and 'private_key' in GeneralTool().parseFromJSON(response_load.content.decode()):
keys = GeneralTool().parseFromJSON(response_load.content.decode())
privatekey=keys['private_key']
public_key=keys['public_key']
elif response_load.status_code == 200 and 'private_key' not in GeneralTool().parseFromJSON(
response_load.content.decode()):
party_txn, privatekey, public_key = general.addParty(party_info, _client_type_)
batch_lst.append(party_txn)
else:
raise Exception("There was a problem communicating with the validator.")
patient_signer = GeneralTool().addSigner(GeneralTool().ParsePrivateKey(private_key_str=privatekey))
patientKey='{}|{}'.format(public_key, '0'+str(_client_type_))
patient_info={'party_key':public_key,
'record_id':patientKey,
'biological_sex':request.json.get('biological_sex'),
"blood_type":request.json.get('blood_type') if 'blood_type' in request.json else " ",
"critical_info": request.json.get('critical_info') if 'critical_info' in
request.json else " ",
"current_insurance":request.json.get('current_insurance') if 'current_insurance' in
request.json else " ",
"disability_kind": request.json.get('disability_kind') if 'disability_kind' in
request.json else " ",
"disabled_person": request.json.get('disability_kind') if 'disability_kind' in
request.json else '0',
"familiar_antecedents": request.json.get('familiar_antecedents') if 'familiar_antecedents' in
request.json else " ",
"general_info": request.json.get('general_info') if 'general_info' in
request.json else " ",
"history_information": request.json.get('general_info') if 'general_info' in
request.json else {},
"alcohol": request.json.get('alcohol') if 'alcohol' in request.json else '0',
"anticonceptive": request.json.get('anticonceptive')
if 'anticonceptive' in request.json else '0',
"car_child_safety": request.json.get('car_child_safety')
if 'car_child_safety' in request.json else '0',
"car_revision": request.json.get('car_revision')
if 'car_revision' in request.json else '0',
"car_seat_belt": request.json.get('car_seat_belt')
if 'car_seat_belt' in request.json else '0',
"coffee": request.json.get('coffee')
if 'coffee' in request.json else '0',
"diet": request.json.get('diet')
if 'diet' in request.json else '0',
"drug_iv": request.json.get('drug_iv')
if 'drug_iv' in request.json else '0',
"drug_usage": request.json.get('drug_usage')
if 'drug_usage' in request.json else '0',
"eats_alone": request.json.get('eats_alone')
if 'eats_alone' in request.json else '0',
"ex_alcoholic": request.json.get('ex_alcoholic')
if 'ex_alcoholic' in request.json else '0',
"ex_drug_addict": request.json.get('ex_drug_addict')
if 'ex_drug_addict' in request.json else '0',
"ex_smoker": request.json.get('ex_smoker')
if 'ex_smoker' in request.json else '0',
"exercise": request.json.get('exercise')
if 'exercise' in request.json else '0',
"helmet": request.json.get('helmet')
if 'helmet' in request.json else '0',
"home_safety": request.json.get('home_safety')
if 'home_safety' in request.json else '0',
"motorcycle_rider": request.json.get('motorcycle_rider')
if 'motorcycle_rider' in request.json else '0',
"prostitute": request.json.get('prostitute')
if 'prostitute' in request.json else '0',
"salt": request.json.get('salt')
if 'salt' in request.json else '0',
"second_hand_smoker": request.json.get('second_hand_smoker')
if 'second_hand_smoker' in request.json else '0',
"smoking": request.json.get('smoking')
if 'smoking' in request.json else '0',
"soft_drinks": request.json.get('soft_drinks')
if 'soft_drinks' in request.json else '0',
"traffic_laws": request.json.get('traffic_laws')
if 'traffic_laws' in request.json else '0',
"photo": request.json.get('photo')
if 'photo' in request.json else '/static/pictures/man.svg'
if request.json.get('biological_sex') == 'm' else '/static/pictures/woman.svg'
}
patient_txn = transaction.create_patient(
txn_signer=patient_signer,
batch_signer=patient_signer,
patient_info=patient_info
)
batch_lst.append(patient_txn)
evaluation_record_id='{}|{}'.format(patient_info['party_key'], party_info['idcard'])
record_patient_txn = transaction.add_record_patient(
txn_signer=patient_signer,
batch_signer=patient_signer,
record_id=evaluation_record_id,
record_owner=public_key
)
batch_lst.append(record_patient_txn)
batch, batch_id = transaction.make_batch_and_id(batch_lst, patient_signer)
await security_messaging.add_patient(
request.app.config.VAL_CONN,
request.app.config.TIMEOUT,
[batch])
# try:
# await security_messaging.check_batch_status(
# request.app.config.VAL_CONN, [batch_id])
# except (ApiBadRequest, ApiInternalError) as err:
# # await auth_query.remove_auth_entry(
# # request.app.config.DB_CONN, request.json.get('email'))
# raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
@PATIENTS_BP.get('patients/revoke/<doctor_pkey>')
async def revoke_access(request, doctor_pkey):
"""Updates auth information for the authorized account"""
client_key = general.get_request_key_header(request)
client_signer = general.get_signer(request, client_key)
revoke_access_txn = consent_transaction.revoke_access(
txn_signer=client_signer,
batch_signer=client_signer,
doctor_pkey=doctor_pkey)
batch, batch_id = transaction.make_batch_and_id([revoke_access_txn], client_signer)
await security_messaging.revoke_access(
request.app.config.VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
@PATIENTS_BP.get('patients/grant/<doctor_pkey>')
async def grant_access(request, doctor_pkey):
"""Updates auth information for the authorized account"""
client_key = general.get_request_key_header(request)
client_signer = general.get_signer(request, client_key)
grant_access_txn = consent_transaction.grant_access(
txn_signer=client_signer,
batch_signer=client_signer,
doctor_pkey=doctor_pkey)
batch, batch_id = transaction.make_batch_and_id([grant_access_txn], client_signer)
await security_messaging.grant_access(
request.app.config.VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# @ACCOUNTS_BP.get('accounts/<key>')
# async def get_account(request, key):
# """Fetches the details of particular Account in state"""
# try:
# auth_key = common.deserialize_auth_token(
# request.app.config.SECRET_KEY,
# request.token).get('public_key')
# except (BadSignature, TypeError):
# auth_key = None
# account_resource = await accounts_query.fetch_account_resource(
# request.app.config.DB_CONN, key, auth_key)
# return response.json(account_resource)
#
# @ACCOUNTS_BP.patch('accounts')
# @authorized()
# async def update_account_info(request):
# """Updates auth information for the authorized account"""
# token = common.deserialize_auth_token(
# request.app.config.SECRET_KEY, request.token)
#
# update = {}
# if request.json.get('password'):
# update['hashed_password'] = bcrypt.hashpw(
# bytes(request.json.get('password'), 'utf-8'), bcrypt.gensalt())
# if request.json.get('email'):
# update['email'] = request.json.get('email')
#
# if update:
# updated_auth_info = await auth_query.update_auth_info(
# request.app.config.DB_CONN,
# token.get('email'),
# token.get('public_key'),
# update)
# new_token = common.generate_auth_token(
# request.app.config.SECRET_KEY,
# updated_auth_info.get('email'),
# updated_auth_info.get('publicKey'))
# else:
# updated_auth_info = await accounts_query.fetch_account_resource(
# request.app.config.DB_CONN,
# token.get('public_key'),
# token.get('public_key'))
# new_token = request.token
#
# return response.json(
# {
# 'authorization': new_token,
# 'account': updated_auth_info
# })
#
# def _create_account_dict(body, public_key):
# keys = ['label', 'description', 'email']
#
# account = {k: body[k] for k in keys if body.get(k) is not None}
#
# account['publicKey'] = public_key
# account['holdings'] = []
#
# return account
# def _create_auth_dict(request, public_key, private_key):
# auth_entry = {
# 'public_key': public_key,
# 'email': request.json['email']
# }
#
# auth_entry['encrypted_private_key'] = common.encrypt_private_key(
# request.app.config.AES_KEY, public_key, private_key)
# auth_entry['hashed_password'] = bcrypt.hashpw(
# bytes(request.json.get('password'), 'utf-8'), bcrypt.gensalt())
#
# return auth_entry
| 43.55102 | 123 | 0.611662 | 1,664 | 14,938 | 5.250601 | 0.145433 | 0.107016 | 0.078517 | 0.066155 | 0.483805 | 0.439052 | 0.400366 | 0.34703 | 0.330892 | 0.301934 | 0 | 0.005075 | 0.274468 | 14,938 | 342 | 124 | 43.678363 | 0.80107 | 0.200763 | 0 | 0.31401 | 0 | 0 | 0.170394 | 0.009204 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.062802 | 0.009662 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb473c10d24ec39e04dd53e4e0b6f82cbade7b5d | 3,596 | py | Python | CODE/losses/Hausdorff_loss.py | vivek-r-2000/BoundaryNet | fce8d51a516646c1001116d03872dbba9e4c5082 | [
"MIT"
] | 17 | 2021-06-07T12:30:23.000Z | 2022-03-07T06:32:25.000Z | CODE/losses/Hausdorff_loss.py | vivek-r-2000/BoundaryNet | fce8d51a516646c1001116d03872dbba9e4c5082 | [
"MIT"
] | 2 | 2021-07-13T13:24:14.000Z | 2022-03-08T07:21:09.000Z | CODE/losses/Hausdorff_loss.py | vivek-r-2000/BoundaryNet | fce8d51a516646c1001116d03872dbba9e4c5082 | [
"MIT"
] | 4 | 2021-06-26T15:12:44.000Z | 2021-11-08T16:36:52.000Z | import math
import torch
from sklearn.utils.extmath import cartesian
import numpy as np
from torch.nn import functional as F
import os
import time
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors.kde import KernelDensity
import skimage.io
from torch import nn
torch.set_default_dtype(torch.float32)
def _assert_no_grad(variables):
for var in variables:
assert not var.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
# def cdist(x, y):
# '''
# Input: x is a Nxd Tensor
# y is a Mxd Tensor
# Output: dist is a NxM matrix where dist[i,j] is the norm
# between x[i,:] and y[j,:]
# i.e. dist[i,j] = ||x[i,:]-y[j,:]||
# '''
# differences = x.unsqueeze(1) - y.unsqueeze(0)
# distances = torch.sum(torch.abs(differences), -1)
# return distances
def cdist(x, y):
'''
Input: x is a Nxd Tensor
y is a Mxd Tensor
Output: dist is a NxM matrix where dist[i,j] is the norm
between x[i,:] and y[j,:]
i.e. dist[i,j] = ||x[i,:]-y[j,:]||
'''
differences = x.unsqueeze(1) - y.unsqueeze(0)
distances = torch.sum(differences**2, -1).sqrt()
return distances
def averaged_hausdorff_distance(set1, set2, max_ahd=np.inf):
"""
Compute the Averaged Hausdorff Distance function
between two unordered sets of points (the function is symmetric).
Batches are not supported, so squeeze your inputs first!
:param set1: Array/list where each row/element is an N-dimensional point.
:param set2: Array/list where each row/element is an N-dimensional point.
:param max_ahd: Maximum AHD possible to return if any set is empty. Default: inf.
:return: The Averaged Hausdorff Distance between set1 and set2.
"""
if len(set1) == 0 or len(set2) == 0:
return max_ahd
set1 = np.array(set1)
set2 = np.array(set2)
assert set1.ndim == 2, 'got %s' % set1.ndim
assert set2.ndim == 2, 'got %s' % set2.ndim
assert set1.shape[1] == set2.shape[1], \
'The points in both sets must have the same number of dimensions, got %s and %s.'\
% (set2.shape[1], set2.shape[1])
d2_matrix = pairwise_distances(set1, set2, metric='euclidean')
res = np.average(np.min(d2_matrix, axis=0)) + \
np.average(np.min(d2_matrix, axis=1))
return res
class AveragedHausdorffLoss(nn.Module):
def __init__(self):
super(AveragedHausdorffLoss, self).__init__()
def forward(self, set1, set2):
"""
Compute the Averaged Hausdorff Distance function
between two unordered sets of points (the function is symmetric).
Batches are not supported, so squeeze your inputs first!
:param set1: Tensor where each row is an N-dimensional point.
:param set2: Tensor where each row is an N-dimensional point.
:return: The Averaged Hausdorff Distance between set1 and set2.
"""
assert set1.ndimension() == 2, 'got %s' % set1.ndimension()
assert set2.ndimension() == 2, 'got %s' % set2.ndimension()
assert set1.size()[1] == set2.size()[1], \
'The points in both sets must have the same number of dimensions, got %s and %s.'\
% (set2.size()[1], set2.size()[1])
d2_matrix = cdist(set1, set2)
term_1 = torch.sum(torch.min(d2_matrix, 1)[0])
term_2 = torch.sum(torch.min(d2_matrix, 0)[0])
res = (term_1+term_2)*0.5
return res
| 32.107143 | 94 | 0.637375 | 531 | 3,596 | 4.258004 | 0.284369 | 0.007961 | 0.055285 | 0.049536 | 0.535604 | 0.509951 | 0.488722 | 0.459973 | 0.459973 | 0.379478 | 0 | 0.029151 | 0.246385 | 3,596 | 111 | 95 | 32.396396 | 0.805166 | 0.377642 | 0 | 0.081633 | 0 | 0 | 0.15043 | 0 | 0 | 0 | 0 | 0 | 0.163265 | 1 | 0.102041 | false | 0 | 0.22449 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb484886b726aa53b9e6093b5153c7144fea1e19 | 3,366 | py | Python | tardis/energy_input/gamma_ray_estimators.py | sonachitchyan/tardis | a8bad890d8ccd906993012e954ea7bcd683a96b7 | [
"BSD-3-Clause"
] | null | null | null | tardis/energy_input/gamma_ray_estimators.py | sonachitchyan/tardis | a8bad890d8ccd906993012e954ea7bcd683a96b7 | [
"BSD-3-Clause"
] | null | null | null | tardis/energy_input/gamma_ray_estimators.py | sonachitchyan/tardis | a8bad890d8ccd906993012e954ea7bcd683a96b7 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numba import njit
from tardis.montecarlo.montecarlo_numba import njit_dict_no_parallel
from tardis.energy_input.util import (
angle_aberration_gamma,
doppler_gamma,
H_CGS_KEV,
ELECTRON_MASS_ENERGY_KEV,
kappa_calculation,
)
from tardis.energy_input.calculate_opacity import (
compton_opacity_calculation,
SIGMA_T,
photoabsorption_opacity_calculation,
)
def compton_emissivity_estimator(packet, distance):
"""Compton scattering emissivity estimator for integral
calculations
Parameters
----------
packet : GXPacket
Packet that needs its emissivity calculated
distance : float64
Distance packet has travelled
Returns
-------
float64, int
Unnormalized emissivity estimator, line index
"""
cmf_direction = angle_aberration_gamma(
packet.get_direction_vector(), packet.location_r
)
cmf_angle = np.dot(cmf_direction, [1, 0, 0])
frequency_factor = (
1
+ H_CGS_KEV * packet.nu_cmf / ELECTRON_MASS_ENERGY_KEV
+ (1.0 - cmf_angle)
)
line_index = GET_NEAREST_LINE_REDWARD_FUNCTION(
packet.nu_cmf / frequency_factor
)
partial_cross_section = (
3.0
/ 16.0
/ np.pi
* SIGMA_T
/ frequency_factor
/ frequency_factor
* (frequency_factor + (1.0 / frequency_factor) + cmf_angle**2.0 - 1.0)
)
doppler_factor = doppler_gamma(
packet.get_direction_vector(), packet.location_r
)
emissivity = (
packet.energy_rf
* partial_cross_section
* distance
* doppler_factor**2.0
/ frequency_factor
)
return emissivity, line_index
def pair_creation_estimator(packet, pair_creation_opacity, distance):
"""Calculates the emissivity for pair creation gamma-rays
Parameters
----------
packet : GXPacket
Packet that needs its emissivity calculated
pair_creation_opacity : float64
Opacity of the pair creation process
distance : float64
Distance packet has travelled
Returns
-------
float64
Emissivity estimator
"""
normalized_energy = (
2 * ELECTRON_MASS_ENERGY_KEV / (H_CGS_KEV * packet.nu_cmf)
)
emissivity = (
pair_creation_opacity * normalized_energy * packet.energy_rf * distance
)
return emissivity
@njit(**njit_dict_no_parallel)
def get_average_compton_fraction(energy):
def f(x, mu):
return 1.0 / (1.0 + x * (1.0 - mu))
def cross_section(x, mu):
return (
(3.0 * SIGMA_T)
/ (16.0 * np.pi)
* f(x, mu) ** 2.0
* (f(x, mu) + 1.0 / f(x, mu) - (1.0 - mu**2))
)
x = kappa_calculation(energy)
mus = np.linspace(-1, 1, 100)
dmu = mus[1] - mus[0]
sum = 0
norm = 0
for mu in mus:
sum += cross_section(x, mu) * f(x, mu) * dmu
norm += cross_section(x, mu) * dmu
integral = 1.0 - sum / norm
return 1 - integral
@njit(**njit_dict_no_parallel)
def deposition_estimator_kasen(energy, ejecta_density, iron_group_fraction):
return get_average_compton_fraction(energy) * compton_opacity_calculation(
energy, ejecta_density
) + photoabsorption_opacity_calculation(
energy, ejecta_density, iron_group_fraction
)
| 24.042857 | 79 | 0.637552 | 403 | 3,366 | 5.047146 | 0.260546 | 0.009833 | 0.009833 | 0.026549 | 0.299902 | 0.24238 | 0.158309 | 0.158309 | 0.060964 | 0 | 0 | 0.02451 | 0.272727 | 3,366 | 139 | 80 | 24.215827 | 0.806373 | 0.180333 | 0 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.059524 | 0.035714 | 0.202381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb4a77b5bf284f55b92447995a091eca79d4b70a | 5,875 | py | Python | smartsim/utils/helpers.py | neuralvis/SmartSim | 0248ffe8a61be58afe376c5c363172f3e91d227b | [
"BSD-2-Clause"
] | 117 | 2021-04-02T19:14:05.000Z | 2022-03-30T16:46:23.000Z | smartsim/utils/helpers.py | neuralvis/SmartSim | 0248ffe8a61be58afe376c5c363172f3e91d227b | [
"BSD-2-Clause"
] | 92 | 2021-04-02T22:02:31.000Z | 2022-03-30T18:28:30.000Z | smartsim/utils/helpers.py | neuralvis/SmartSim | 0248ffe8a61be58afe376c5c363172f3e91d227b | [
"BSD-2-Clause"
] | 20 | 2021-04-02T20:16:53.000Z | 2022-02-10T20:19:14.000Z | # BSD 2-Clause License
#
# Copyright (c) 2021, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A file of helper functions for SmartSim
"""
import os
import socket
from os import environ
from shutil import which
import psutil
from ..error import SSConfigError
def get_ip_from_interface(interface):
"""Get IPV4 address of a network interface
:param interface: interface name
:type interface: str
:raises ValueError: if the interface does not exist
:raises ValueError: if interface does not have an IPV4 address
:return: ip address of interface
:rtype: str
"""
net_if_addrs = psutil.net_if_addrs()
if interface not in net_if_addrs:
available = list(net_if_addrs.keys())
raise ValueError(
f"{interface} is not a valid network interface. "
f"Valid network interfaces are: {available}"
)
for info in net_if_addrs[interface]:
if info.family == socket.AF_INET:
return info.address
raise ValueError(f"interface {interface} doesn't have an IPv4 address")
def init_default(default, init_value, expected_type=None):
if init_value is None:
return default
if expected_type is not None and not isinstance(init_value, expected_type):
raise TypeError(f"Argument was of type {type(init_value)}, not {expected_type}")
return init_value
def expand_exe_path(exe):
"""Takes an executable and returns the full path to that executable
:param exe: exectable or file
:type exe: str
"""
# which returns none if not found
in_path = which(exe)
if not in_path:
if os.path.isfile(exe) and os.access(exe, os.X_OK):
return os.path.abspath(exe)
if os.path.isfile(exe) and not os.access(exe, os.X_OK):
raise SSConfigError(f"File, {exe}, is not an executable")
raise SSConfigError(f"Could not locate executable {exe}")
return os.path.abspath(in_path)
def is_valid_cmd(command):
try:
expand_exe_path(command)
return True
except SSConfigError:
return False
def get_env(env_var):
"""Retrieve an environment variable through os.environ
:param str env_var: environment variable to retrieve
:throws: SSConfigError
"""
try:
value = environ[env_var]
return value
except KeyError as e:
raise SSConfigError("SmartSim environment not set up!") from e
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38,
)
def colorize(string, color, bold=False, highlight=False):
"""
Colorize a string.
This function was originally written by John Schulman.
And then borrowed from spinningup
https://github.com/openai/spinningup/blob/master/spinup/utils/logx.py
"""
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append("1")
return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string)
def delete_elements(dictionary, key_list):
"""Delete elements from a dictionary.
:param dictionary: the dictionary from which the elements must be deleted.
:type dictionary: dict
:param key_list: the list of keys to delete from the dictionary.
:type key: any
"""
for key in key_list:
if key in dictionary:
del dictionary[key]
def cat_arg_and_value(arg_name, value):
"""Concatenate a command line argument and its value
This function returns ``arg_name`` and ``value
concatenated in the best possible way for a command
line execution, namely:
- if arg_name starts with `--` (e.g. `--arg`):
`arg_name=value` is returned (i.e. `--arg=val`)
- if arg_name starts with `-` (e.g. `-a`):
`arg_name value` is returned (i.e. `-a val`)
- if arg_name does not start with `-` and it is a
long option (e.g. `arg`):
`--arg_name=value` (i.e., `--arg=val`)
- if arg_name does not start with `-` and it is a
short option (e.g. `a`):
`-arg_name=value` (i.e., `-a val`)
:param arg_name: the command line argument name
:type arg_name: str
:param value: the command line argument value
:type value: str
"""
if arg_name.startswith("--"):
return "=".join((arg_name, str(value)))
elif arg_name.startswith("-"):
return " ".join((arg_name, str(value)))
elif len(arg_name) == 1:
return " ".join(("-" + arg_name, str(value)))
else:
return "=".join(("--" + arg_name, str(value)))
| 31.756757 | 88 | 0.676255 | 834 | 5,875 | 4.685851 | 0.340528 | 0.034033 | 0.012794 | 0.0174 | 0.155834 | 0.14304 | 0.10261 | 0.076766 | 0.076766 | 0.076766 | 0 | 0.008175 | 0.229617 | 5,875 | 184 | 89 | 31.929348 | 0.855281 | 0.513191 | 0 | 0.025641 | 0 | 0 | 0.123382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.076923 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb4af4ba9492cc2fccf5e1c170147154dd221d49 | 7,357 | py | Python | descarteslabs/client/services/metadata/smoke_tests/test_metadata.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 167 | 2017-03-23T22:16:58.000Z | 2022-03-08T09:19:30.000Z | descarteslabs/client/services/metadata/smoke_tests/test_metadata.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 93 | 2017-03-23T22:11:40.000Z | 2021-12-13T18:38:53.000Z | descarteslabs/client/services/metadata/smoke_tests/test_metadata.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 46 | 2017-03-25T19:12:14.000Z | 2021-08-15T18:04:29.000Z | # Copyright 2018-2020 Descartes Labs.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
from descarteslabs.client.services.metadata import Metadata
from descarteslabs.client.exceptions import NotFoundError
class TestMetadata(unittest.TestCase):
instance = None
@classmethod
def setUpClass(cls):
cls.instance = Metadata()
def test_available_products(self):
r = self.instance.available_products()
assert len(r) > 0
def test_search(self):
r = self.instance.search()
assert len(r["features"]) > 0
def test_search_dltile(self):
dltile = "256:16:30.0:15:-11:591"
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LC08:PRE:TOAR"],
dltile=dltile,
)
ids = [f["id"] for f in r["features"]]
assert "landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1" in ids
def test_sat_id(self):
r = self.instance.search(
start_datetime="2016-07-06", end_datetime="2016-07-07", sat_ids="LANDSAT_8"
)
assert len(r["features"]) > 0
def test_cloud_fraction(self):
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
sat_ids="LANDSAT_8",
cloud_fraction=0.5,
)
for feature in r["features"]:
assert feature["properties"]["cloud_fraction"] < 0.5
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
sat_ids="LANDSAT_8",
cloud_fraction=0.0,
)
for feature in r["features"]:
assert feature["properties"]["cloud_fraction"] == 0.0
def test_search_by_product(self):
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LC08:PRE:TOAR"],
)
assert len(r["features"]) > 0
def test_fields(self):
cases = [
[],
["id", "key"], # ["id", "key"]
["key"], # ["id", "key"]
["geometry"], # ["geometry", "id", "type"]
]
for fields in cases:
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products="landsat:LC08:PRE:TOAR",
limit=1,
fields=fields,
)
for feature in r["features"]:
if "id" not in fields:
fields.append("id")
if "geometry" in fields:
fields.append("type")
if "key" in fields or "geometry" in fields:
fields.append("properties")
assert sorted(feature.keys()) == sorted(fields)
def test_multiple_products_search(self):
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LE07:PRE:TOAR", "landsat:LC08:PRE:TOAR"],
)
assert len(r["features"]) > 0
def test_place(self):
r = self.instance.search(
products=["landsat:LC08:PRE:TOAR"],
place="north-america_united-states_iowa",
limit=1,
)
assert 1 == len(r["features"])
def test_summary(self):
r = self.instance.summary(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LC08:PRE:TOAR"],
pixels=True,
)
assert "products" in r
assert "count" in r
assert "pixels" in r
assert "bytes" in r
assert r["count"] > 0
def test_summary_dltile(self):
dltile = "256:16:30.0:15:-11:591"
r = self.instance.summary(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LC08:PRE:TOAR"],
dltile=dltile,
pixels=True,
)
assert "products" in r
assert "count" in r
assert "pixels" in r
assert "bytes" in r
assert r["count"] > 0
def test_summary_part(self):
r = self.instance.summary(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LC08:PRE:TOAR"],
interval="year",
pixels=True,
)
assert "count" in r
assert "pixels" in r
assert "bytes" in r
assert "items" in r
assert len(r["items"]) == 1
def test_features(self):
r = self.instance.features(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
sat_ids="LANDSAT_8",
batch_size=10,
)
first_21 = itertools.islice(r, 21)
assert len(list(first_21)) > 0
def test_products_search_sort(self):
r = self.instance.search(
start_datetime="2016-07-06",
end_datetime="2016-07-07",
products=["landsat:LC08:PRE:TOAR"],
sort_field="cloud_fraction",
sort_order="desc",
)
assert len(r["features"]) > 0
last = None
for feature in r["features"]:
current = feature["properties"]["cloud_fraction"]
if last is None:
last = current
continue
assert current <= last
def test_search_products(self):
r = self.instance.products(limit=1)
assert len(r) == 1
def test_products_get(self):
product_id = "landsat:LC08:PRE:TOAR"
r = self.instance.get_product(product_id)
assert r["id"] == product_id
def test_bands_get(self):
band_id = "landsat:LC08:PRE:TOAR:red"
try:
band = self.instance.get_band(band_id)
assert band_id == band["id"]
except NotFoundError:
pass
def test_bands_by_product_get(self):
product_id = "landsat:LC08:PRE:TOAR"
bands = self.instance.get_bands_by_product(product_id)
assert "derived:ndvi" in bands
assert "landsat:LC08:PRE:TOAR:swir2" in bands
assert len(bands) > 16 # 16 native bands
def test_derived_bands_get(self):
band_id = "derived:ndvi"
try:
d_band = self.instance.get_derived_band(band_id)
assert "bands" in d_band
except NotFoundError:
pass
def test_derived_bands_search(self):
bands = ["red", "nir"]
bands = self.instance.derived_bands(bands=bands)
def test_get_bands_by_id(self):
self.instance.get_bands_by_id("landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1")
if __name__ == "__main__":
unittest.main()
| 30.911765 | 87 | 0.563681 | 894 | 7,357 | 4.496644 | 0.200224 | 0.071642 | 0.083582 | 0.067164 | 0.536567 | 0.435572 | 0.426866 | 0.39403 | 0.377114 | 0.377114 | 0 | 0.066919 | 0.317521 | 7,357 | 237 | 88 | 31.042194 | 0.733718 | 0.085361 | 0 | 0.439153 | 0 | 0 | 0.166294 | 0.070332 | 0 | 0 | 0 | 0 | 0.185185 | 1 | 0.116402 | false | 0.010582 | 0.021164 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb4b79decffafb6f720122d88e30370b0da72c41 | 4,320 | py | Python | main_q.py | weightan/BohemianEigenvaluesPython | 7734959188861877b0c807e4db12ac0fd707c34f | [
"MIT"
] | 1 | 2021-06-30T13:02:27.000Z | 2021-06-30T13:02:27.000Z | main_q.py | weightan/BohemianEigenvaluesPython | 7734959188861877b0c807e4db12ac0fd707c34f | [
"MIT"
] | null | null | null | main_q.py | weightan/BohemianEigenvaluesPython | 7734959188861877b0c807e4db12ac0fd707c34f | [
"MIT"
] | null | null | null | import quaternion
import numpy as np
import math
import time
import random
from pol_div import *
import cmath
from tqdm import tqdm
import matplotlib.pyplot as plt
from numpy import linalg as LA
iterations = 1000_000
maxV = 40
N = 500
cmap = 'hot'
size = 10
def makeCompM(arrq, n):
for i in range(n):
temp = quaternion.as_float_array(arrq[i])
qval = temp[0] + math.sqrt(temp[1]**2 +temp[2]**2 +temp[3]**2)*1j
arrq[i] = qval
M = np.diag( np.ones((n-1)), k=1).astype(complex)
M[n-1:] = arrq
return M
def test():
start_main = time.time()
for i in range(1_0_000):
ex = np.quaternion(random.randrange(10), random.randrange(10), random.randrange(10), random.randrange(10))
ex2 = np.quaternion(-random.randrange(10), -random.randrange(10), -random.randrange(10), -random.randrange(10))
a1 = ex/ex2
a2 = ex2/ex
a3 = a1*a2*a2*a1
end_main = time.time()
secs = end_main - start_main
print("Main took ", secs)
def run():
coefficints = [np.quaternion(0, 0, 0, 0),
np.quaternion(1, 1, 1, 1 )]
nm = 10
coef = np.zeros((N, N, N), dtype = np.int32)
for i in range(iterations):#tqdm(
roots = []
num = [ random.choice(coefficints) for i in range(nm) ]
M = makeCompM(num, nm)
val, wec = LA.eig(M)
trace = np.array([(np.conj(val[i]) + val[i]).real for i in range(len(val))])
norm = np.array([(np.conj(val[i]) * val[i]).real for i in range(len(val))])
for j in range(nm):
den = [np.quaternion(1,0,0,0), np.quaternion(-trace[j],0,0,0), np.quaternion(norm[j],0,0,0) ]
print(trace[j], norm[j])
quot, rem = poly_divmod(num, den)
if type(rem[0]) is quaternion and rem[0] == np.quaternion(0, 0, 0, 0):
r0 = trace[j]/2 + math.sqrt(norm[j] - 0.25*trace[j]**2)*1j
root = [ r0.real, r0.imag]
try:
x = round(root[0] * N/8.8 + N/2)
y = round(root[1] * N/8.8 + N/2)
z = round(root[2] * N/8.8 + N/2)
except Exception as inst:
pass
if x < N and x > 0 and y < N and y > 0 and z < N and z > 0 and coef[x, y, z] < maxV:
coef[x, y, z] += 1
elif len(rem) >= 2:
f = rem[0]
g = rem[1]
#print(g)
x = -1*g / f
#print(x)
root = quaternion.as_float_array(x)
try:
x = round(root[0] * N/8.8 + N/2)
y = round(root[1] * N/8.8 + N/2)
z = round(root[2] * N/8.8 + N/2)
#print(root)
except Exception as inst:
pass
if x < N and x > 0 and y < N and y > 0 and z < N and z > 0 and coef[x, y, z] < maxV:
coef[x, y, z] += 1
"""
for j in roots:
#if type(j) is list:
#print(j)
try:
x = round(j[0] * N/8.8 + N/2)
y = round(j[1] * N/8.8 + N/2)
except Exception as inst:
pass
if x < N and x > 0 and y < N and y > 0 and coef[x, y] < maxV:
coef[x, y] += 1
"""
filenameArr = f'N_{N}_cmap_{cmap}_maxV_{maxV}_{random.randrange(10000000, 100000000)}'
np.save(filenameArr, coef)
####
"""
for i in range(N):
for j in range(N):
if coef[i, j]:
coef[i, j] += 700
####
plt.figure(num = None, figsize=(size, size), dpi=300)
plt.axis('off')
plot = plt.imshow(coef[], cmap = cmap, interpolation='lanczos' )
####
filenameImage = f'N_{N}_cmap_{cmap}_maxV_{maxV}_{random.randrange(10000000, 100000000)}.png'
plt.savefig(filenameImage, bbox_inches = 'tight', pad_inches=0.0)
####
#plt.show()
plt.close()
"""
if __name__ == "__main__":
run()
| 23.867403 | 121 | 0.45162 | 607 | 4,320 | 3.156507 | 0.227348 | 0.01357 | 0.070981 | 0.016701 | 0.401357 | 0.363779 | 0.346555 | 0.346555 | 0.340292 | 0.340292 | 0 | 0.07079 | 0.404861 | 4,320 | 180 | 122 | 24 | 0.674446 | 0.007407 | 0 | 0.205128 | 0 | 0.012821 | 0.028169 | 0.01784 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.025641 | 0.128205 | 0 | 0.179487 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb4e1960657b5f6922d0c3538a5e43012892dcc2 | 2,190 | py | Python | masks.py | thierrydecker/ipi-pyt010 | be40a58fe9deac2bc039083a827220f9fa9c5d4f | [
"Apache-2.0"
] | null | null | null | masks.py | thierrydecker/ipi-pyt010 | be40a58fe9deac2bc039083a827220f9fa9c5d4f | [
"Apache-2.0"
] | null | null | null | masks.py | thierrydecker/ipi-pyt010 | be40a58fe9deac2bc039083a827220f9fa9c5d4f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Exercice sur les fonctions
#
# Initialiser une variable chaine de caractères contenant un masque réseau
# en notation décimale (aaa.bbb.ccc.ddd).
#
# Ecrire une ou plusieures fonctions permettant de convertir le masque décimal
# dans son équivalent CIDR
#
# Exemple : 255.255.255.0 doit donner /24
# 255.255.0.0 doit donner /16
# 255.255.255.252 doit donner /30
#
# Si le masque donné en notation décimale est invalide, retourner None.
#
# Vous n'etes pas autorisés à utiliser l'instruction import.
#
def decimal_to_cidr(dm):
"""Calcul du masque CIDR à partir du masque décimal
inputs:
- Masque décimal
outputs:
- Masque CIDR
"""
# Did we get a string ?
if not isinstance(dm, str):
print("A string is needed")
return None
# Split the string into a list
splitted_dm = list(dm.split(sep='.'))
# Did we get four fields ?
if len(splitted_dm) != 4:
print("A decimal mask must have four fields")
return None
# Iterate over the mask's fields
for i in range(len(splitted_dm)):
# Did we get a numeric field ?
if not splitted_dm[i].isalnum():
print("Fields must be positive integers")
return None
# Convert the string into int
splitted_dm[i] = int(splitted_dm[i])
# Did we get an int > 255
if splitted_dm[i] > 255:
print("Fields must be positive integers less or equal to 255")
return None
# Format the field into its binary representation
splitted_dm[i] = format(splitted_dm[i], "0>8b")
# Build a string with the four fields concatenated
binary_mask = "".join(splitted_dm)
# Number of ones in the binary mask ?
cidr_mask = binary_mask.count('1')
# Is it a valid mask ?
if cidr_mask != binary_mask[0:cidr_mask].count('1'):
print("Invalid mask")
return None
return cidr_mask
def main():
decimal_mask = '255.255.255.128'
print("Le masque CIDR de {} est {}".format(decimal_mask, decimal_to_cidr(decimal_mask)))
if __name__ == '__main__':
main()
| 26.385542 | 92 | 0.63105 | 315 | 2,190 | 4.285714 | 0.438095 | 0.074074 | 0.048889 | 0.013333 | 0.048889 | 0.048889 | 0 | 0 | 0 | 0 | 0 | 0.040176 | 0.272603 | 2,190 | 82 | 93 | 26.707317 | 0.807282 | 0.450228 | 0 | 0.178571 | 0 | 0 | 0.181185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.285714 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb50fd98f06619dd02694f035ec896250cfc5ca7 | 2,471 | py | Python | examples/linear_classifier_example.py | fhoering/tf-yarn | 890a9adf05d1fe48cff94c066130ba4e46aeef21 | [
"Apache-2.0"
] | null | null | null | examples/linear_classifier_example.py | fhoering/tf-yarn | 890a9adf05d1fe48cff94c066130ba4e46aeef21 | [
"Apache-2.0"
] | null | null | null | examples/linear_classifier_example.py | fhoering/tf-yarn | 890a9adf05d1fe48cff94c066130ba4e46aeef21 | [
"Apache-2.0"
] | null | null | null | """
Example of using LinearClassifier
"""
import logging
logging.basicConfig(level="INFO") # noqa
import os
import pwd
import getpass
import sys
import warnings
import typing
import skein
from functools import partial
from subprocess import check_output
from datetime import datetime
import cluster_pack
from tf_yarn import Experiment, TaskSpec, run_on_yarn
import winequality
USER = getpass.getuser()
"""
1. Download winequality-*.csv from the Wine Quality dataset at UCI
ML repository
(https://archive.ics.uci.edu/ml/datasets/Wine+Quality).
2. Upload it to HDFS
3. Pass a full URI to either of the CSV files to the example
"""
WINE_EQUALITY_FILE = f"{cluster_pack.get_default_fs()}/user/{USER}/tf_yarn_test/winequality-red.csv"
"""
Output path of the learned model on hdfs
"""
HDFS_DIR = (f"{cluster_pack.get_default_fs()}/user/{USER}"
f"/tf_yarn_test/tf_yarn_{int(datetime.now().timestamp())}")
def experiment_fn() -> Experiment:
# To mitigate issue https://github.com/tensorflow/tensorflow/issues/32159 for tf >= 1.15
import tensorflow as tf
def train_input_fn():
dataset = winequality.get_dataset(WINE_EQUALITY_FILE, split="train")
return (dataset.shuffle(1000)
.batch(128)
.repeat())
def eval_input_fn():
dataset = winequality.get_dataset(WINE_EQUALITY_FILE, split="test")
return (dataset.shuffle(1000)
.batch(128))
estimator = tf.estimator.LinearClassifier(
feature_columns=winequality.get_feature_columns(),
model_dir=HDFS_DIR,
n_classes=winequality.get_n_classes())
return Experiment(
estimator,
tf.estimator.TrainSpec(train_input_fn, max_steps=10),
tf.estimator.EvalSpec(
eval_input_fn,
steps=10,
start_delay_secs=0,
throttle_secs=30))
if __name__ == "__main__":
pyenv_zip_path, env_name = cluster_pack.upload_env()
editable_requirements = cluster_pack.get_editable_requirements()
run_on_yarn(
pyenv_zip_path,
experiment_fn,
task_specs={
"chief": TaskSpec(memory="2 GiB", vcores=4),
"evaluator": TaskSpec(memory="2 GiB", vcores=1),
"tensorboard": TaskSpec(memory="2 GiB", vcores=1, tb_model_dir=HDFS_DIR)
},
files={
**editable_requirements,
os.path.basename(winequality.__file__): winequality.__file__,
}
)
| 29.070588 | 100 | 0.676244 | 314 | 2,471 | 5.06051 | 0.429936 | 0.034613 | 0.030208 | 0.033984 | 0.197609 | 0.182505 | 0.110761 | 0.110761 | 0.070485 | 0.070485 | 0 | 0.019699 | 0.219344 | 2,471 | 84 | 101 | 29.416667 | 0.804044 | 0.050992 | 0 | 0.034483 | 0 | 0 | 0.1143 | 0.08463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0.034483 | 0.258621 | 0 | 0.362069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb51d4c4c3f25e1f6a7c449b89c05fa01d3fe351 | 2,954 | py | Python | otp/video.py | NExTplusplus/object-trajectory-proposal | bf11db313ae9d517bb26562db8cd8209548b67cd | [
"MIT"
] | 2 | 2018-02-19T04:12:31.000Z | 2019-03-25T02:38:16.000Z | otp/video.py | NExTplusplus/object-trajectory-proposal | bf11db313ae9d517bb26562db8cd8209548b67cd | [
"MIT"
] | null | null | null | otp/video.py | NExTplusplus/object-trajectory-proposal | bf11db313ae9d517bb26562db8cd8209548b67cd | [
"MIT"
] | 2 | 2020-03-04T08:31:57.000Z | 2020-05-11T08:55:14.000Z | import skvideo.io as skvio
from skimage.transform import resize
from skimage import img_as_ubyte
import h5py
import logging
from .motion_field import get_motion_field_method
class Video():
"""docstring for Video"""
def __init__(self, data_path, motion_path = None):
self.data_path = data_path
self.motion_path = motion_path
self.meta = skvio.ffprobe(self.data_path)
fps = int(meta['video']['@r_frame_rate'].split('/')[0])
assert fps > 0, 'Broken video %s' % self.data_path
self._size = (int(meta['video']['@nb_frames']),
self.meta['video']['@height'], self.meta['video']['@width'])
def scale(self):
return self._size[1] / self.meta['video']['@height']
def size(self):
return self._size
def frames(self):
if not hasattr(self, '_frames'):
self._frames = skvio.vread(self.data_path)
self._size = self._frames.shape[:3]
height = 240
if self._size[1] > height:
self._size = (self._size[0], height,
height * self._size[2] // self._size[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
frame_size = self._size[1:]
rsz_frames = []
for frame in self._frames:
frame = resize(frame, frame_size, mode = 'constant')
frame = img_as_ubyte(frame)
rsz_frames.append(frame)
self._frames = np.asarray(rsz_frames)
return self._frames
def motions(self):
if not hasattr(self, '_motions'):
frames = self.frames()
path = '' if self.motion_path is None else self.motion_path
# load existing one given the path
if os.path.exists(path):
try:
with h5py.File(path, 'r') as fin:
self._motions = fin['flows'][:].astype(np.float32)
assert self._motions.shape[1: 2] == self._size[1: 2]
return self._motions
except Exception as err:
print(err, end = ', ')
print('recomputing...')
# compute motions
if 'mpegflow' in path:
compute_motion_field = get_motion_field_method('mpegflow')
elif 'ldof' in path:
compute_motion_field = get_motion_field_method('ldof')
elif 'deepflow' in path:
compute_motion_field = get_motion_field_method('deepflow')
else:
logging.warning('Unknown motion field method, mpegflow is used.')
compute_motion_field = get_motion_field_method('mpegflow')
self._motions = []
for i in range(len(frames) - 1):
motion = compute_motion_field(frames[i], frames[i + 1])
self._motions.append(motion)
self._motions = np.asarray(self._motions, dtype = np.float32)
# save
if not self.motion_path is None:
with h5py.File(path, 'w') as fout:
fout.create_dataset('flows',
data = np.asarray(self._motions, dtype = np.float16),
compression = 'gzip', compression_opts = 9)
return self._motions
| 36.02439 | 73 | 0.624577 | 385 | 2,954 | 4.574026 | 0.288312 | 0.074957 | 0.057922 | 0.056786 | 0.204429 | 0.136286 | 0.105622 | 0.105622 | 0.074957 | 0 | 0 | 0.012635 | 0.249831 | 2,954 | 81 | 74 | 36.469136 | 0.78204 | 0.025051 | 0 | 0.057143 | 0 | 0 | 0.083217 | 0 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.071429 | false | 0 | 0.085714 | 0.028571 | 0.242857 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb51e19f6f4039d51490675c2e9be729062f210b | 4,585 | py | Python | python_sandbox/python_sandbox/effective_python/item26.py | jduan/cosmos | c4bdc5b800c9b51084daf874769ac73e1c4b7dd5 | [
"MIT"
] | null | null | null | python_sandbox/python_sandbox/effective_python/item26.py | jduan/cosmos | c4bdc5b800c9b51084daf874769ac73e1c4b7dd5 | [
"MIT"
] | null | null | null | python_sandbox/python_sandbox/effective_python/item26.py | jduan/cosmos | c4bdc5b800c9b51084daf874769ac73e1c4b7dd5 | [
"MIT"
] | null | null | null | # Things to Remember
#
# Avoid using multiple inheritance if mix-in classes can achieve the same outcome.
#
# Use pluggable behaviors at the instance level to provide per-class customization when mix-in
# classes may require it. Subclass can override certain methods/hooks in the mixin.
#
# Compose mix-ins to create complex functionality from simple behaviors. A mixin can
# depend on another mixin.
# Python is an object-oriented language with built-in facilities for making multiple inheritance
# tractable (see Item 25: “Initialize Parent Classes with super”). However, it’s better to avoid
# multiple inheritance altogether. Mixin should be the only use case.
# If you find yourself desiring the convenience and encapsulation that comes with multiple
# inheritance, consider writing a mix-in instead. A mix-in is a small class that only defines a set
# of additional methods that a class should provide. Mix-in classes don’t define their own instance
# attributes nor require their __init__ constructor to be called. In other words,
# mixins only have behaviors. They don't maintain states.
# Writing mix-ins is easy because Python makes it trivial to inspect the current state of any object
# regardless of its type. Dynamic inspection lets you write generic functionality a single time, in
# a mix-in, that can be applied to many other classes. Mix-ins can be composed and layered to
# minimize repetitive code and maximize reuse.
from pprint import pprint
import json
class ToDictMixin(object):
def to_dict(self):
return self._traverse_dict(self.__dict__)
def _traverse_dict(self, instance_dict):
output = {}
for key, value in instance_dict.items():
output[key] = self._traverse(key, value)
return output
def _traverse(self, key, value):
if isinstance(value, ToDictMixin):
return value.to_dict()
elif isinstance(value, dict):
return self._traverse_dict(value)
elif isinstance(value, list):
return [self._traverse(key, i) for i in value]
elif hasattr(value, '__dict__'):
return self._traverse_dict(value.__dict__)
else:
return value
# Note how the JsonMixin class defines both instance methods and class methods. Mix-ins let you add
# either kind of behavior.
class JsonMixin:
@classmethod
def from_json(cls, data):
kwargs = json.loads(data)
return cls(**kwargs)
def to_json(self):
# this mixin depends on the ToDictMixin to work
return json.dumps(self.to_dict())
class BinaryTree(ToDictMixin):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BinaryTreeWithParent(BinaryTree):
def __init__(self, value, left=None, right=None, parent=None):
super().__init__(value, left=left, right=right)
self.parent = parent
def _traverse(self, key, value):
# Don't visit parent otherwise you will run into a loop
if isinstance(value, BinaryTreeWithParent) and key == 'parent':
return value.value
else:
return super()._traverse(key, value)
# Mix-ins can be composed together.
class DatacenterPack(ToDictMixin, JsonMixin):
def __init__(self, switch=None, machines=None):
self.switch = Switch(**switch)
self.machines = [Machine(**kwargs) for kwargs in machines]
class Switch(ToDictMixin, JsonMixin):
def __init__(self, ports, speed):
self.ports = ports
self.speed = speed
class Machine(ToDictMixin, JsonMixin):
def __init__(self, cores, ram, disk):
self.cores = cores
self.ram = ram
self.disk = disk
def main():
tree = BinaryTree(10,
left=BinaryTree(7, right=BinaryTree(9)),
right=BinaryTree(13, left=BinaryTree(11)))
pprint(tree.to_dict())
root = BinaryTreeWithParent(10)
root.left = BinaryTreeWithParent(7, parent=root)
root.left.right = BinaryTreeWithParent(9, parent=root.left)
pprint(root.to_dict())
serialized = """{
"switch": {"ports": 5, "speed": 1e9},
"machines": [
{"cores": 8, "ram": 32e9, "disk": 5e12},
{"cores": 4, "ram": 16e9, "disk": 1e12},
{"cores": 2, "ram": 4e9, "disk": 500e9}
]
}"""
datacenter_pack = DatacenterPack.from_json(serialized)
roundtrip = datacenter_pack.to_json()
assert json.loads(serialized) == json.loads(roundtrip)
if __name__ == '__main__':
main()
| 35 | 100 | 0.675682 | 598 | 4,585 | 5.056856 | 0.362876 | 0.009921 | 0.018188 | 0.021825 | 0.104167 | 0.045635 | 0.045635 | 0.021825 | 0 | 0 | 0 | 0.010795 | 0.232279 | 4,585 | 130 | 101 | 35.269231 | 0.848295 | 0.360305 | 0 | 0.051948 | 0 | 0 | 0.091191 | 0 | 0 | 0 | 0 | 0 | 0.012987 | 1 | 0.155844 | false | 0 | 0.025974 | 0.025974 | 0.415584 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb52dea8c19f8625fc8bb46f0dc223fddbae0963 | 1,166 | py | Python | movie-rating-classification/build_vocab.py | lijian10086/nlp-tutorial | 4b3773b13d975e7ca812dec6b9409e43dac44534 | [
"MIT"
] | 1,433 | 2018-12-14T06:20:28.000Z | 2022-03-31T14:12:50.000Z | movie-rating-classification/build_vocab.py | itsshaikaslam/nlp-tutorial-1 | 6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a | [
"MIT"
] | 14 | 2019-04-03T08:30:23.000Z | 2021-07-11T11:41:05.000Z | movie-rating-classification/build_vocab.py | itsshaikaslam/nlp-tutorial-1 | 6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a | [
"MIT"
] | 306 | 2018-12-20T09:41:24.000Z | 2022-03-31T05:07:14.000Z | import argparse
import nltk
def argparser():
p = argparse.ArgumentParser()
p.add_argument('-input', required=True)
p.add_argument('-output', required=True)
p.add_argument('-word_num',
type=int,
required=True,
help='how many words to use. the words are sorted by decreasing frequency.')
config = p.parse_args()
return config
if __name__ == "__main__":
config = argparser()
f = open(config.input, 'r')
vocabulary = []
for line in f:
if line.replace('\n', '').strip() != '':
vocabulary += line.replace('\n', '').strip().split()
vocabulary = nltk.Text(vocabulary)
print('build_vocab.py: number of tokens = {}'.format(len(vocabulary.tokens)))
print('build_vocab.py: number of unique tokens = {}'.format(len(set(vocabulary.tokens))))
print('build_vocab.py: frequency of vocabulary(top 10)\n{}'.format(vocabulary.vocab().most_common(10)))
f_out = open(config.output, 'w')
for idx, words in enumerate(dict(vocabulary.vocab().most_common(config.word_num)).keys()):
f_out.write(words + ' ' + str(idx) + '\n')
| 31.513514 | 107 | 0.611492 | 146 | 1,166 | 4.739726 | 0.486301 | 0.017341 | 0.052023 | 0.073699 | 0.212428 | 0.143064 | 0 | 0 | 0 | 0 | 0 | 0.00444 | 0.227273 | 1,166 | 36 | 108 | 32.388889 | 0.763596 | 0 | 0 | 0 | 0 | 0 | 0.204974 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.153846 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb55a87434b8c6d5633da93bc643c82605b6eb84 | 3,041 | py | Python | Machine Learning Summer School 2019 (Moscow, Russia)/tutorials/bayesian_deep_learning/mlss2019bdl/dataset.py | xuedong/rlss2019 | d7468c2fcf269d8afd6fb0f44993aa9797867944 | [
"MIT"
] | null | null | null | Machine Learning Summer School 2019 (Moscow, Russia)/tutorials/bayesian_deep_learning/mlss2019bdl/dataset.py | xuedong/rlss2019 | d7468c2fcf269d8afd6fb0f44993aa9797867944 | [
"MIT"
] | null | null | null | Machine Learning Summer School 2019 (Moscow, Russia)/tutorials/bayesian_deep_learning/mlss2019bdl/dataset.py | xuedong/rlss2019 | d7468c2fcf269d8afd6fb0f44993aa9797867944 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils.data import TensorDataset
from torchvision import datasets
from sklearn.utils import check_random_state
from sklearn.model_selection import train_test_split
def get_data(name, path="./data", train=True):
if name == "MNIST":
dataset = datasets.MNIST(path, train=train, download=True)
elif name == "KMNIST":
dataset = datasets.KMNIST(path, train=train, download=True)
images = dataset.data.float().unsqueeze(1)
return TensorDataset(images / 255., dataset.targets)
def get_dataset(n_train=20, n_valid=5000, n_pool=5000,
name="MNIST", path="./data", random_state=None):
random_state = check_random_state(random_state)
dataset = get_data(name, path, train=True)
S_test = get_data(name, path, train=False)
# create an imbalanced class label distribution for the train
targets = dataset.tensors[-1].cpu().numpy()
# split the dataset into validaton and train
ix_all = np.r_[:len(targets)]
ix_train, ix_valid = train_test_split(
ix_all, stratify=targets, shuffle=True,
train_size=max(n_train, 1), test_size=max(n_valid, 1),
random_state=random_state)
# prepare the datasets: pool, train and validation
if n_train < 1:
ix_train = np.r_[:0]
S_train = TensorDataset(*dataset[ix_train])
if n_valid < 1:
ix_valid = np.r_[:0]
S_valid = TensorDataset(*dataset[ix_valid])
# prepare the pool
ix_pool = np.delete(ix_all, np.r_[ix_train, ix_valid])
# we want to have lots of boring/useless examples in the pool
labels, share = (1, 2, 3, 4, 5, 6, 7, 8, 9), 0.95
pool_targets, dropped = targets[ix_pool], []
# deplete the pool of each class
for label in labels:
ix_cls = np.flatnonzero(pool_targets == label)
n_kept = int(share * len(ix_cls))
# pick examples at random to drop
ix_cls = random_state.permutation(ix_cls)
dropped.append(ix_cls[:n_kept])
ix_pool = np.delete(ix_pool, np.concatenate(dropped))
# select at most `n_pool` examples
if n_pool > 0:
ix_pool = random_state.permutation(ix_pool)[:n_pool]
S_pool = TensorDataset(*dataset[ix_pool])
return S_train, S_pool, S_valid, S_test
def collect(indices, dataset):
"""Collect the specified samples from the dataset and remove."""
assert len(dataset) > 0
mask = torch.zeros(len(dataset), dtype=torch.uint8)
mask[indices] = True
collected = TensorDataset(*dataset[mask])
dataset.tensors = dataset[~mask]
return collected
def merge(*datasets, out=None):
# Classes derived from Dataset support appending via
# `+` (__add__), but this breaks slicing.
data = [d.tensors for d in datasets if d is not None and d.tensors]
assert all(len(data[0]) == len(d) for d in data)
tensors = [torch.cat(tup, dim=0) for tup in zip(*data)]
if isinstance(out, TensorDataset):
out.tensors = tensors
return out
return TensorDataset(*tensors)
| 30.108911 | 71 | 0.673134 | 444 | 3,041 | 4.441441 | 0.310811 | 0.050203 | 0.016734 | 0.022819 | 0.06288 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015966 | 0.217363 | 3,041 | 100 | 72 | 30.41 | 0.812605 | 0.156527 | 0 | 0 | 0 | 0 | 0.01098 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.258621 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb55c33605126e7bbc70cb10ae271deb89f20640 | 927 | py | Python | JugandoCodewars/differentiatePolynome.py | blukitas/JugandoCodewars | aa4938fbba3911ba2d5f6fea2ff35fe3cbbf37b0 | [
"MIT"
] | null | null | null | JugandoCodewars/differentiatePolynome.py | blukitas/JugandoCodewars | aa4938fbba3911ba2d5f6fea2ff35fe3cbbf37b0 | [
"MIT"
] | null | null | null | JugandoCodewars/differentiatePolynome.py | blukitas/JugandoCodewars | aa4938fbba3911ba2d5f6fea2ff35fe3cbbf37b0 | [
"MIT"
] | null | null | null | # Create a function that differentiates a polynomial for a given value of x.
# Your function will receive 2 arguments: a polynomial as a string, and a point to evaluate the equation as an integer.
# Assumptions:
# There will be a coefficient near each x, unless the coefficient equals 1 or -1.
# There will be an exponent near each x, unless the exponent equals 0 or 1.
# All exponents will be greater or equal to zero
# Examples:
# differenatiate("12x+2", 3) ==> returns 12
# differenatiate("x^2+3x+2", 3) ==> returns 9
# https://www.codewars.com/kata/566584e3309db1b17d000027
def differenatiate(s, p):
s_eval = ''
for i in range(0, len(s)):
if s[i] == 'x':
if i != 0:
s_eval += '*'
s_eval += str(p)
else:
s_eval += s[i]
print(s_eval)
return eval(s_eval)
print(differenatiate("12x+2", 3))
print(differenatiate("x^2+3x+2", 3)) | 31.965517 | 119 | 0.634304 | 145 | 927 | 4.013793 | 0.482759 | 0.051546 | 0.037801 | 0.051546 | 0.130584 | 0.068729 | 0 | 0 | 0 | 0 | 0 | 0.064748 | 0.25027 | 927 | 29 | 120 | 31.965517 | 0.772662 | 0.612729 | 0 | 0 | 0 | 0 | 0.04298 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.153846 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb581c6464c5f1d980d6ca2264b5b883c4e38256 | 7,177 | py | Python | OLD THINGS/prac.py | AmirQadir/Auto-Object-Detection-and-Tracker | 24c6f4d18b0496ef19250ccc42f53a7f1f42ed3f | [
"MIT"
] | 1 | 2019-05-30T00:59:18.000Z | 2019-05-30T00:59:18.000Z | OLD THINGS/prac.py | AmirQadir/Auto-Object-Detection-and-Tracker | 24c6f4d18b0496ef19250ccc42f53a7f1f42ed3f | [
"MIT"
] | null | null | null | OLD THINGS/prac.py | AmirQadir/Auto-Object-Detection-and-Tracker | 24c6f4d18b0496ef19250ccc42f53a7f1f42ed3f | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018 BIG VISION LLC ALL RIGHTS RESERVED
#
from __future__ import print_function
import sys
import cv2
from random import randint
import argparse
import numpy as np
import cv2 as cv
from yolo_utils import infer_image
#Amir
from mtcnn.mtcnn import MTCNN
from skimage.measure import compare_ssim
from skimage.transform import resize
import glob
from FaceID import FaceID
#import face_recognition
trackerTypes = ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
def createTrackerByName(trackerType):
# Create a tracker based on tracker name
if trackerType == trackerTypes[0]:
tracker = cv2.TrackerBoosting_create()
elif trackerType == trackerTypes[1]:
tracker = cv2.TrackerMIL_create()
elif trackerType == trackerTypes[2]:
tracker = cv2.TrackerKCF_create()
elif trackerType == trackerTypes[3]:
tracker = cv2.TrackerTLD_create()
elif trackerType == trackerTypes[4]:
tracker = cv2.TrackerMedianFlow_create()
elif trackerType == trackerTypes[5]:
tracker = cv2.TrackerGOTURN_create()
elif trackerType == trackerTypes[6]:
tracker = cv2.TrackerMOSSE_create()
elif trackerType == trackerTypes[7]:
tracker = cv2.TrackerCSRT_create()
else:
tracker = None
print('Incorrect tracker name')
print('Available trackers are:')
for t in trackerTypes:
print(t)
return tracker
def yolo():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./yolov3-coco/',
help='The directory where the model weights and \
configuration files are.')
parser.add_argument('-w', '--weights',
type=str,
default='./yolov3-coco/yolov3.weights',
help='Path to the file which contains the weights \
for YOLOv3.')
parser.add_argument('-cfg', '--config',
type=str,
default='./yolov3-coco/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-vo', '--video-output-path',
type=str,
default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels',
type=str,
default='./yolov3-coco/coco-labels',
help='Path to the file having the \
labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence',
type=float,
default=0.5,
help='The model will reject boundaries which has a \
probabiity less than the confidence value. \
default: 0.5')
parser.add_argument('-th', '--threshold',
type=float,
default=0.3,
help='The threshold to use when applying the \
Non-Max Suppresion')
parser.add_argument('--download-model',
type=bool,
default=False,
help='Set to True, if the model weights and configurations \
are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
FLAGS, unparsed = parser.parse_known_args()
#print(FLAGS)
# Get the labels
labels = open(FLAGS.labels).read().strip().split('\n')
# Intializing colors to represent each label uniquely
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Load the weights and configutation to form the pretrained YOLOv3 model
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
# Get the output layer names of the model
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
################################
height, width = frame.shape[:2]
img, bboxes, _, classid, _ = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
global boxes
boxes = [] #It's a list now
j=0
for i in classid:
if i==0:
print("persons bounding box is: ",bboxes[j])
boxes.append(bboxes[j].copy())
print(boxes[i])
j=j+1
############################temp ###########33
#for index,value in enumerate(boxes):
itr = 0
for i in range(len(boxes)):
itr = itr + 1
name = 'dataset/' + str("person") + str(itr) + ".jpg"
y = boxes[i][1]
x = boxes[i][0]
h = boxes[i][3]
w = boxes[i][2]
crop_img = img[y:y+h,x:x+w]
cv.imwrite(name,crop_img)
detector = MTCNN()
print("I am a detector phewww !")
print(detector.detect_faces(crop_img))
face_cropped = detector.detect_faces(crop_img)
print(face_cropped,"Debug")
if(len(face_cropped)>0):
boxes_face = (face_cropped[0]['box'])
y1 = boxes_face[1]
x1 = boxes_face[0]
h1 = boxes_face[3]
w1 = boxes_face[2]
crop_img_2 = crop_img[y1:y1+h1, x1:x1+w1]
name = 'dataset/' + str("face")+ str(itr) + '.jpg'
cv.imwrite(name,crop_img_2)
#crop_img_2 = cv2.resize(crop_img_2,(100,100),interpolation=cv2.INTER_AREA)
rec = FaceID()
# Matching Part
images = []
for img in glob.glob("dataset/face*.jpg"):
n = cv2.imread(img)
images.append(n)
#for img in images:
# img = cv2.resize(img,(100,100),interpolation=cv2.INTER_AREA)
#if(np.linalg.norm(img-crop_img_2)>=0.9):
# val = np.linalg.norm(img-crop_img_2)
#print("Amir won",val)
# Matching Part End
##########################temp done#########33
my_tuple = []
for i in bboxes:
my_tuple.append(tuple(i))
#print(my_tuple)
# Create MultiTracker object
multiTracker = cv2.MultiTracker_create()
# Initialize MultiTracker
colors_multi = []
for bbox in my_tuple:
multiTracker.add(createTrackerByName(trackerType), frame, bbox)
colors_multi.append((randint(64, 255), randint(64, 255), randint(64, 255)))
return multiTracker, colors_multi
if __name__ == '__main__':
print("Default tracking algoritm is CSRT \n"
"Available tracking algorithms are:\n")
for t in trackerTypes:
print(t)
#trackerType = "CSRT"
trackerType = "CSRT"
# Set video to load
videoPath = "webcam.mp4"
# Create a video capture object to read videos
cap = cv2.VideoCapture(0)
# Read first frame
success, frame = cap.read()
# quit if unable to read the video file
if not success:
print('Failed to read video')
sys.exit(1)
## Select boxes
bboxes = []
colors = []
boxes=[]
################# copied code
multiTracker, colors_multi = yolo()
# Process video and track objects
while cap.isOpened():
success, frame = cap.read()
if not success:
break
# get updated location of objects in subsequent frames
success, boxes = multiTracker.update(frame)
# draw tracked objects
for i, newbox in enumerate(boxes):
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, p1, p2, colors_multi[i], 2, 1)
# show frame
cv2.imshow('MultiTracker', frame)
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
if cv2.waitKey(1) & 0xFF == 121:
multiTracker, colors_multi = yolo()
print("key presses")
| 23.686469 | 105 | 0.63815 | 954 | 7,177 | 4.712788 | 0.324948 | 0.017126 | 0.03403 | 0.051379 | 0.118105 | 0.05605 | 0.010231 | 0 | 0 | 0 | 0 | 0.025893 | 0.21973 | 7,177 | 302 | 106 | 23.764901 | 0.776964 | 0.147833 | 0 | 0.147929 | 0 | 0 | 0.116373 | 0.01293 | 0 | 0 | 0.001343 | 0 | 0 | 1 | 0.011834 | false | 0 | 0.076923 | 0 | 0.100592 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb5960464ec129fb8ad73243547ad27c3623545d | 10,262 | py | Python | BEC/Plugins/ScriptBan/__init__.py | Mateuus/A3UndeadBRLife | 2c4558c3cf66795bcfa126e3fe460a9b671d3ca9 | [
"OpenSSL"
] | null | null | null | BEC/Plugins/ScriptBan/__init__.py | Mateuus/A3UndeadBRLife | 2c4558c3cf66795bcfa126e3fe460a9b671d3ca9 | [
"OpenSSL"
] | null | null | null | BEC/Plugins/ScriptBan/__init__.py | Mateuus/A3UndeadBRLife | 2c4558c3cf66795bcfa126e3fe460a9b671d3ca9 | [
"OpenSSL"
] | null | null | null | # This plugin will ban a player that has triggered one of the filter rules.
# This plugin is only suted on one mission unless you use common filter files for all your mission.
# To set ban reasons for your rules you need to edit the file. ( reason.py )
# You also need to know what ID each rule will have.
import re
import os
import sys
sys.path.append(os.getcwd()+"\\Plugins\\ScriptBan")
import reason
# Define your servers this plugin is to be used on. use config names. leave it empty to start it on all or remove the variable.
SERVERS = ["a2.cfg", "a3.cfg"]
class Restrictions(object):
def __init__(self, instance):
self.bec = instance
self.cfgname = self.bec.cfgval.options.filename
if self.cfgname in SERVERS:
self.org_func_be_prosess_02 = self.bec._be_prosess_02
self.bec._be_prosess_02 = self.extend_pros02
################################################################################################################
### Regular Expressions!.
################################################################################################################
self.addbackpackcargo_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: addbackpackcargo restriction #(\d+)', re.I)
self.addmagazinecargo_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: addmagazinecargo restriction #(\d+)', re.I)
self.attachto_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: attachto restriction #(\d+)', re.I)
self.addweaponcargo_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: addweaponcargo restriction #(\d+)', re.I)
self.createvehicle_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: createvehicle restriction #(\d+)', re.I)
self.deletevehicle_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: deletevehicle restriction #(\d+)', re.I)
self.mpeventhandler_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: mpeventhandler restriction #(\d+)', re.I)
self.publicvariable_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: publicVariable restriction #(\d+)', re.I)
self.publicvariableval_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: publicVariable value restriction #(\d+)', re.I)
self.remotecontrol_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: remoteControl restriction #(\d+)', re.I)
self.remoteexec_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: remoteExec restriction #(\d+)', re.I)
self.script_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: script restriction #(\d+)', re.I)
self.selectplayer_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: selectplayer restriction #(\d+)', re.I)
self.setdamage_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: setdamage restriction #(\d+)', re.I)
self.setvariable_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: setvariable restriction #(\d+)', re.I)
self.setvariableval_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: setvariable value restriction #(\d+)', re.I)
self.teamswitch_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: teamswitch restriction #(\d+)', re.I)
self.waypointcondition_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: waypointcondition restriction #(\d+)', re.I)
self.waypointstatement_regex = re.compile(r'Player #(\d+) (.+) \((.{32})\) has been kicked by BattlEye: waypointstatement restriction #(\d+)', re.I)
def scriptban(func):
def extended_data(*args, **kwargs):
try:
return func(*args, **kwargs)
finally:
self = args[0]
sdta = args[1]
command = False
# Addbackpackcargo Restriction
if self.addbackpackcargo_regex.search(sdta):
info = self.addbackpackcargo_regex.search(sdta).groups()
beid = info[0]
name = info[1]
guid = info[2]
tid = info[3]
if reason.addbackpackcargo_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.addbackpackcargo_reason[tid]
# Addmagazinecargo Restriction
elif self.addmagazinecargo_regex.search(sdta):
info = self.addmagazinecargo_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.addmagazinecargo_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.addmagazinecargo_reason[tid]
# Addweaponcargo Restriction
elif self.addweaponcargo_regex.search(sdta):
info = self.addweaponcargo_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.addweaponcargo_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.addweaponcargo_reason[tid]
# Attachto Restriction
elif self.attachto_regex.search(sdta):
info = self.attachto_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.attachto_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.attachto_reason[tid]
# Createvehicle Restriction
elif self.createvehicle_regex.search(sdta):
info = self.createvehicle_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.createvehicle_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.createvehicle_reason[tid]
# Deletevehicle Restriction
elif self.deletevehicle_regex.search(sdta):
info = self.deletevehicle_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.deletevehicle_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.deletevehicle_reason[tid]
# Mpeventhandler Restriction
elif self.mpeventhandler_regex.search(sdta):
info = self.mpeventhandler_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.mpeventhandler_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.mpeventhandler_reason[tid]
# Publicvariable Restriction
elif self.publicvariable_regex.search(sdta):
info = self.publicvariable_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.publicvariable_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.publicvariable_reason[tid]
# Publicvariableval Restriction
elif self.publicvariableval_regex.search(sdta):
info = self.publicvariableval_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.publicvariableval_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.publicvariableval_reason[tid]
# Remotecontrol Restriction
elif self.remotecontrol_regex.search(sdta):
info = self.remotecontrol_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.remotecontrol_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.remotecontrol_reason[tid]
# Remoteexec Restriction
elif self.remoteexec_regex.search(sdta):
info = self.remoteexec_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.remoteexec_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.remoteexec_reason[tid]
# Script Restriction
elif self.script_regex.search(sdta):
info = self.script_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.script_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.script_reason[tid]
# Selectplayer Restriction
elif self.selectplayer_regex.search(sdta):
info = self.selectplayer_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.selectplayer_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.selectplayer_reason[tid]
# Setdamage Restriction
elif self.setdamage_regex.search(sdta):
info = self.setdamage_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.setdamage_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.setdamage_reason[tid]
# Setvariable Restriction
elif self.setvariable_regex.search(sdta):
info = self.setvariable_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.setvariable_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.setvariable_reason[tid]
# Setvariableval Restriction
elif self.setvariableval_regex.search(sdta):
info = self.setvariableval_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.setvariableval_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.setvariableval_reason[tid]
# Teamswitch Restriction
elif self.teamswitch_regex.search(sdta):
info = self.teamswitch_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.teamswitch_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.teamswitch_reason[tid]
# Waypointcondition Restriction
elif self.waypointcondition_regex.search(sdta):
info = self.waypointcondition_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.waypointcondition_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.waypointcondition_reason[tid]
# Waypointstatement Restriction
elif self.waypointstatement_regex.search(sdta):
info = self.waypointstatement_regex.search(sdta).groups()
guid = info[2]
tid = info[3]
if reason.waypointcondition_reason.has_key(tid):
command = "addban "+guid+" 0 BEC : "+reason.waypointstatement_reason[tid]
# Send the ban command..
if command:
self.bec._Bec_queuelist.append(command)
return extended_data
@scriptban
def extend_pros02(self, data):
self.org_func_be_prosess_02(data)
# function bec will use to start the plugin
def start(x):
Restrictions(x)
| 45.608889 | 155 | 0.651238 | 1,281 | 10,262 | 5.10929 | 0.115535 | 0.063866 | 0.087089 | 0.043545 | 0.623377 | 0.421696 | 0.40932 | 0.40932 | 0.4055 | 0.397708 | 0 | 0.013517 | 0.185344 | 10,262 | 225 | 156 | 45.608889 | 0.769378 | 0.098226 | 0 | 0.243902 | 0 | 0 | 0.231863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030488 | false | 0 | 0.02439 | 0 | 0.073171 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb5a4e3ba14bb2b0201367de323e0145937debfb | 4,154 | py | Python | main.py | ba-talibe/RGB | 2bf0d6fa95344b23f9860fbb4534c4f0635ddefd | [
"MIT"
] | null | null | null | main.py | ba-talibe/RGB | 2bf0d6fa95344b23f9860fbb4534c4f0635ddefd | [
"MIT"
] | null | null | null | main.py | ba-talibe/RGB | 2bf0d6fa95344b23f9860fbb4534c4f0635ddefd | [
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QLabel, QSlider,
QFrame, QApplication)
from PyQt5.QtGui import QColor, QIcon, QPixmap, QFont
from PyQt5.QtCore import Qt, QSize
import sys
class FenPrincipal(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle("RBG")
self.resize(500, 300)
self.initFen()
self.show()
def initFen(self):
mainLayout = QVBoxLayout()
#configuration du champ de couleur
font = QFont("Times New Roman", 12)
frameLayout = QVBoxLayout()
self.couleur = QFrame()
self.couleur.setFixedSize(200, 150)
self.col = QColor(0, 0, 0)
self.couleur.setStyleSheet("QWidget { background-color: %s }" % self.col.name())
self.staticStr = "Hexadecimal : "
self.hexValue = QLabel(self.staticStr + ":" + self.getHex(0, 0, 0))
self.hexValue.setFont(font)
frameLayout.addWidget(self.couleur)
frameLayout.addWidget(self.hexValue)
frameLayout.setAlignment(Qt.AlignCenter)
#configuration de la personalisation de couleur Rouge
iSize = QSize(10, 10)
redLayout = QHBoxLayout()
rIcon = QLabel()
rIcon.setPixmap(QPixmap("red.ico").scaled(iSize))
rLabel = QLabel("Rouge")
self.rSlider = QSlider(Qt.Horizontal)
self.rSlider.setMaximum(255)
self.rSlider.valueChanged.connect(self.changeRed)
self.rValue = QLabel("0")
redLayout.addWidget(rIcon)
redLayout.addWidget(rLabel)
redLayout.addWidget(self.rSlider)
redLayout.addWidget(self.rValue)
#configuration de la personalisation de couleur Vert
greenLayout = QHBoxLayout()
gIcon = QLabel()
gIcon.setPixmap(QPixmap("green.ico").scaled(iSize))
gLabel = QLabel("Vert")
self.gSlider = QSlider(Qt.Horizontal)
self.gSlider.setMaximum(255)
self.gValue = QLabel("0")
self.gSlider.valueChanged.connect(self.changeGreen)
greenLayout.addWidget(gIcon)
greenLayout.addWidget(gLabel)
greenLayout.addWidget(self.gSlider)
greenLayout.addWidget(self.gValue)
#configuration de la personalisation de couleur Bleu
blueLayout = QHBoxLayout()
bIcon = QLabel()
bIcon.setPixmap(QPixmap("blue.ico").scaled(iSize))
bLabel = QLabel("Blue")
self.bSlider = QSlider(Qt.Horizontal)
self.bSlider.setMaximum(255)
self.bValue = QLabel("0")
self.bSlider.valueChanged.connect(self.changeBlue)
blueLayout.addWidget(bIcon)
blueLayout.addWidget(bLabel)
blueLayout.addWidget(self.bSlider)
blueLayout.addWidget(self.bValue)
mainLayout.addLayout(frameLayout)
mainLayout.addLayout(redLayout)
mainLayout.addLayout(greenLayout)
mainLayout.addLayout(blueLayout)
self.setLayout(mainLayout)
def getHex(self, r, g, b):
return " #" +str(hex(r))[2:] + str(hex(g))[2:] + str(hex(b))[2:]
def changeRed(self, val):
self.rValue.setText(str(val))
self.col.setRed(val)
self.couleur.setStyleSheet("QWidget { background-color: %s }" % self.col.name())
r, g, b = self.col.red(), self.col.green(), self.col.blue()
self.hexValue.setText(self.staticStr + ":" + self.getHex(r, g, b))
def changeGreen(self, val):
self.gValue.setText(str(val))
self.col.setGreen(val)
self.couleur.setStyleSheet("QWidget { background-color: %s }" % self.col.name())
r, g, b = self.col.red(), self.col.green(), self.col.blue()
self.hexValue.setText(self.staticStr + ":" + self.getHex(r, g, b))
def changeBlue(self, val):
self.bValue.setText(str(val))
self.col.setBlue(val)
self.couleur.setStyleSheet("QWidget { background-color: %s }" % self.col.name())
r, g, b = self.col.red(), self.col.green(), self.col.blue()
self.hexValue.setText(self.staticStr + ":" + self.getHex(r, g, b))
app = QApplication(sys.argv)
fen = FenPrincipal()
sys.exit(app.exec_())
| 36.438596 | 88 | 0.629273 | 463 | 4,154 | 5.62635 | 0.2527 | 0.045681 | 0.008061 | 0.047601 | 0.258349 | 0.235317 | 0.1881 | 0.1881 | 0.1881 | 0.1881 | 0 | 0.013287 | 0.239047 | 4,154 | 113 | 89 | 36.761062 | 0.810819 | 0.045017 | 0 | 0.10989 | 0 | 0 | 0.051994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065934 | false | 0 | 0.043956 | 0.010989 | 0.131868 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb5cafdad3aa512173fe809f94fa852f4b853442 | 5,377 | py | Python | pylewm/modes/hint_window.py | ellet/PyleWM | cbe4453c7deb771647be55fc4b4afff2317b1938 | [
"MIT"
] | 25 | 2018-02-19T20:22:48.000Z | 2021-11-15T11:36:49.000Z | pylewm/modes/hint_window.py | ellet/PyleWM | cbe4453c7deb771647be55fc4b4afff2317b1938 | [
"MIT"
] | 1 | 2018-02-19T20:27:16.000Z | 2020-01-11T08:02:20.000Z | pylewm/modes/hint_window.py | ellet/PyleWM | cbe4453c7deb771647be55fc4b4afff2317b1938 | [
"MIT"
] | 2 | 2020-02-25T16:44:42.000Z | 2021-05-22T09:58:22.000Z | import pylewm.modes.overlay_mode
import pylewm.modes.hint_helpers
import pylewm
from pylewm.rects import Rect
import win32gui, win32api, win32con
class WindowItem:
pass
class HintWindowMode(pylewm.modes.overlay_mode.OverlayMode):
def __init__(self, hintkeys, hotkeys):
super(HintWindowMode, self).__init__(hotkeys)
self.hintkeys = hintkeys
self.item_list = []
self.overlay_global()
self.bg_color = (0,0,0)
self.hint_color = (255,255,0)
self.border_color = (0, 128, 255)
self.title_color = (128,128,128)
self.floating_border = (255,0,255)
self.border_width = 1
self.box_size = (200, 50)
self.selection_text = ""
self.max_hidden_row = 10
self.closed = False
self.dirty = True
hidden_windows = {}
for hwnd, window in pylewm.windows.Windows.items():
if win32gui.IsWindow(hwnd):
if window.is_dropdown:
continue
if window.minimized:
continue
if window.space and not window.space.visible:
if window.space.monitor not in hidden_windows:
hidden_windows[window.space.monitor] = []
hidden_windows[window.space.monitor].append(window)
continue
item = WindowItem()
item.window = window
item.rect = window.rect
item.is_hidden = False
self.item_list.append(item)
for monitor, window_list in hidden_windows.items():
show_count = len(window_list)
item_width = monitor.rect.width / min(self.max_hidden_row, show_count)
for i in range(0, show_count):
window = window_list[i]
item = WindowItem()
item.window = window
item.title = win32gui.GetWindowText(window.handle)
item.rect = Rect.from_pos_size(
(monitor.rect.left + (i%self.max_hidden_row) * item_width,
monitor.rect.bottom - 30*(int(i/self.max_hidden_row)+1)),
(item_width, 30))
item.is_hidden = True
self.item_list.append(item)
pylewm.modes.hint_helpers.create_hints(self.item_list, self.hintkeys)
def should_clear(self):
return True
def should_draw(self):
if self.dirty:
self.dirty = False
return True
return False
def close(self):
self.closed = True
pylewm.hotkeys.queue_command(pylewm.hotkeys.escape_mode)
def confirm_selection(self, item):
pylewm.focus.set_focus(item.window)
self.close()
def end_mode(self):
super(HintWindowMode, self).end_mode()
def update_selection(self):
self.dirty = True
any_hints = False
for item in self.item_list:
if item.hint == self.selection_text:
self.confirm_selection(item)
break
elif item.hint.startswith(self.selection_text):
any_hints = True
if not any_hints:
self.selection_text = ""
def handle_key(self, key, isMod):
if self.closed:
return True
if not isMod and key.down:
if len(key.key) == 1 and not key.alt.isSet and not key.app.isSet and not key.ctrl.isSet and not key.win.isSet:
self.selection_text += key.key
self.update_selection()
return True
elif key.key == 'backspace' and len(self.selection_text) >= 1:
self.selection_text = self.selection_text[:-1]
self.update_selection()
return True
return super(HintWindowMode, self).handle_key(key, isMod)
def draw(self, overlay):
if self.closed:
return
for item in self.item_list:
if not item.hint.startswith(self.selection_text):
continue
rect = self.abs_to_overlay(item.rect)
if item.is_hidden:
overlay.draw_box(rect, self.bg_color)
overlay.draw_border(rect, self.border_color, self.border_width)
overlay.draw_text(
item.hint,
self.hint_color,
Rect((rect.left + 5, rect.top, rect.left + 60, rect.bottom)),
(0.5, 0.5)
)
overlay.draw_text(
item.title,
self.title_color,
Rect((rect.left + 65, rect.top, rect.right - 5, rect.bottom)),
(0.0, 0.5),
font=overlay.font_small
)
else:
box_rect = Rect.centered_around(rect.center, self.box_size)
overlay.draw_box(box_rect, self.bg_color)
if item.window.floating:
overlay.draw_border(box_rect, self.floating_border, 3)
overlay.draw_text(
item.hint,
self.hint_color,
box_rect,
(0.5, 0.5)
)
@pylewm.commands.PyleCommand
def start_hint_window(hotkeys={}, hintkeys="asdfjkl;"):
HintWindowMode(hintkeys, hotkeys)() | 35.143791 | 122 | 0.547145 | 612 | 5,377 | 4.632353 | 0.20915 | 0.04127 | 0.053968 | 0.022575 | 0.186949 | 0.0903 | 0.041623 | 0.025397 | 0 | 0 | 0 | 0.023066 | 0.363028 | 5,377 | 153 | 123 | 35.143791 | 0.804672 | 0 | 0 | 0.257576 | 0 | 0 | 0.003161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0.007576 | 0.037879 | 0.007576 | 0.189394 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb5d1ed62eb3a42c40f4cae39f7857a23081a54d | 3,045 | py | Python | usps/main.py | shumatepf/identity-give-usps | 1bb195f7828cd97f60f04e287d6bf7111d02c52c | [
"CC0-1.0"
] | null | null | null | usps/main.py | shumatepf/identity-give-usps | 1bb195f7828cd97f60f04e287d6bf7111d02c52c | [
"CC0-1.0"
] | 8 | 2021-07-14T23:12:36.000Z | 2022-03-30T23:22:37.000Z | usps/main.py | shumatepf/identity-give-usps | 1bb195f7828cd97f60f04e287d6bf7111d02c52c | [
"CC0-1.0"
] | 4 | 2020-09-02T17:05:00.000Z | 2021-01-12T10:11:41.000Z | """
USPS Microservice FastAPI Web App.
"""
import json
import logging
import ssl
import base64
from typing import Optional
from uuid import UUID
from http import HTTPStatus
from fastapi import FastAPI, Header
from fastapi.responses import JSONResponse
from aiohttp import ClientSession, ClientError
from pydantic import BaseModel
from google.oauth2 import service_account
from google.auth.transport.requests import Request
from starlette_prometheus import metrics, PrometheusMiddleware
from usps import settings
app = FastAPI()
app.add_middleware(PrometheusMiddleware)
app.add_route("/metrics/", metrics)
class AddressVerificationInfo(BaseModel):
"""
Request model for the USPS AII /confidence_indicator API.
"""
first_name: str
last_name: str
middle_name: Optional[str] = None
suffix: Optional[str] = None
delivery_address: str
address_city_state_zip: str
class VerifiedResponse(BaseModel):
"""
Response model for valid (2XX) responses from the USPS API.
"""
uid: UUID
confidence_indicator: str
class ErrorResponse(BaseModel):
"""
Response model for failed (non-2XX) responses from the USPS API.
"""
uid: UUID
error: str
USPS_UUID = "5738f577-d283-49ec-9695-32b106c049d8"
USPS_URL = "https://cat-aii.usps.gov/"
if not settings.DEBUG:
credentials = service_account.IDTokenCredentials.from_service_account_info(
json.loads(base64.b64decode(settings.USPS_SERVICE_INFO)),
target_audience=settings.USPS_TARGET_AUDIENCE,
)
credentials.refresh(Request())
sslcontext = ssl.create_default_context(cafile="cat-aii-root.cer")
@app.post(
"/confidence_indicator",
response_model=VerifiedResponse,
responses={400: {"model": ErrorResponse}, 500: {"model": ErrorResponse}},
)
async def confidence_indicator(
address_verification_info: AddressVerificationInfo,
http_x_consumer_custom_id: str = Header(None),
):
"""
Confidence Indicator function that proxies requests to the USPS API.
"""
if settings.DEBUG:
logging.debug("Skipping network requests while in debug mode")
return JSONResponse({"uid": USPS_UUID, "confidence_indicator": "50.00"})
if not credentials.valid:
credentials.refresh(Request())
logging.info("Refreshed credentials")
headers = {
"authorization": f"bearer {credentials.token}",
"content-type": "application/json",
}
try:
async with ClientSession(headers=headers) as session:
async with session.post(
USPS_URL, data=address_verification_info.dict(), ssl=sslcontext
) as response:
return JSONResponse(
status_code=response.status, content=await response.read()
)
except ClientError as error:
logging.error("Aiohttp error: %s", error)
return JSONResponse(
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
content={"error": "ClientError while validating address"},
)
| 27.681818 | 80 | 0.702135 | 341 | 3,045 | 6.140762 | 0.419355 | 0.054441 | 0.014327 | 0.023878 | 0.031519 | 0.031519 | 0.031519 | 0.031519 | 0 | 0 | 0 | 0.018227 | 0.207225 | 3,045 | 109 | 81 | 27.93578 | 0.849213 | 0.071264 | 0 | 0.082192 | 0 | 0 | 0.124861 | 0.021182 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.205479 | 0 | 0.424658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb5ddc7ef51d3b1adb906324f02b68fad2497346 | 1,860 | py | Python | src/test.py | ittybe/ChessNerdsDestroyer | 3fc6dcee3ae8b48b56b78221665f425e0cd158c9 | [
"MIT"
] | null | null | null | src/test.py | ittybe/ChessNerdsDestroyer | 3fc6dcee3ae8b48b56b78221665f425e0cd158c9 | [
"MIT"
] | null | null | null | src/test.py | ittybe/ChessNerdsDestroyer | 3fc6dcee3ae8b48b56b78221665f425e0cd158c9 | [
"MIT"
] | null | null | null | from image_slicer import slice
tiles = slice(r"C:\Users\Superuser\Documents\Lightshot\chess\board.png", 64, 8, 8)
for tile in tiles:
print(f"row: {tile.row}")
print(f"col: {tile.column}")
print()
filepath = r"C:\Users\Superuser\Documents\Lightshot\chess\tiles" + f"\somethnig"
filename = f"{tile.row}_{tile.column}.png"
tile.save(filepath + filename)
#!/usr/bin/python
# -*- coding: utf-8 -*-
{
'h1': ('white', 'R'),
'g1': None,
'f1': ('white', 'B'),
'e1': ('white', 'K'),
'd1': ('white', 'Q'),
'c1': ('white', 'R'),
'b1': None,
'a1': ('white', 'R'),
'h2': ('white', 'P'),
'g2': ('white', 'P'),
'f2': ('white', 'P'),
'e2': ('white', 'P'),
'd2': None,
'c2': ('white', 'P'),
'b2': ('white', 'P'),
'a2': ('white', 'P'),
'h3': None,
'g3': None,
'f3': ('white', 'N'),
'e3': None,
'd3': None,
'c3': ('white', 'N'),
'b3': ('white', 'P'),
'a3': None,
'h4': None,
'g4': None,
'f4': None,
'e4': None,
'd4': ('white', 'P'),
'c4': None,
'b4': None,
'a4': None,
'h5': None,
'g5': None,
'f5': None,
'e5': None,
'd5': ('black', 'P'),
'c5': None,
'b5': None,
'a5': None,
'h6': None,
'g6': None,
'f6': ('black', 'N'),
'e6': None,
'd6': None,
'c6': None,
'b6': None,
'a6': None,
'h7': ('black', 'P'),
'g7': ('black', 'P'),
'f7': ('black', 'P'),
'e7': ('black', 'P'),
'd7': None,
'c7': ('black', 'P'),
'b7': ('black', 'P'),
'a7': ('black', 'P'),
'h8': ('black', 'R'),
'g8': None,
'f8': ('black', 'B'),
'e8': ('black', 'K'),
'd8': ('black', 'Q'),
'c8': ('black', 'R'),
'b8': ('black', 'N'),
'a8': ('black', 'N'),
}
| 23.25 | 85 | 0.392473 | 223 | 1,860 | 3.264574 | 0.475336 | 0.074176 | 0.019231 | 0.043956 | 0.107143 | 0.107143 | 0.107143 | 0 | 0 | 0 | 0 | 0.053241 | 0.303226 | 1,860 | 79 | 86 | 23.544304 | 0.508488 | 0.02043 | 0 | 0 | 0 | 0 | 0.287766 | 0.075819 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.013333 | 0 | 0.013333 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb5e7d6ff170cf0fe680d1002cde0bb9008c1381 | 4,231 | py | Python | monitoring/prober/scd/test_operation_special_cases.py | rpai1/dss | 79d8110c336851b155a6e5417692ec68b70c0c07 | [
"Apache-2.0"
] | null | null | null | monitoring/prober/scd/test_operation_special_cases.py | rpai1/dss | 79d8110c336851b155a6e5417692ec68b70c0c07 | [
"Apache-2.0"
] | null | null | null | monitoring/prober/scd/test_operation_special_cases.py | rpai1/dss | 79d8110c336851b155a6e5417692ec68b70c0c07 | [
"Apache-2.0"
] | null | null | null | """Basic Operation tests:
- make sure the Operation doesn't exist with get or query
- create the Operation with a 60 minute length
- get by ID
- search with earliest_time and latest_time
- mutate
- delete
"""
import datetime
import json
import uuid
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib.scd import SCOPE_SC
OP1_ID = '00000020-b6ee-4082-b6e7-75eb4f000000'
OP2_ID = '00000000-ee51-4700-873d-e10911000000'
def test_ensure_clean_workspace(scd_session):
for op_id in (OP1_ID, OP2_ID):
resp = scd_session.get('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
resp = scd_session.get('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 404, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_request_1(scd_session):
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(OP1_ID), json=req)
assert resp.status_code == 200, resp.content
resp = scd_session.delete('/operation_references/{}'.format(OP1_ID))
assert resp.status_code == 200, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_request_2(scd_session):
with open('./scd/resources/op_request_2.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(OP2_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_query_degenerate_polygon(scd_session):
with open('./scd/resources/op_request_3.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 200, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_query_not_area_too_large(scd_session):
with open('./scd/resources/op_request_4.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 200, resp.content
# ID conversion bug exposure
# Reproduces issue #314
@default_scope(SCOPE_SC)
def test_id_conversion_bug(scd_session):
sub_uuid = uuid.uuid4()
time_ref = datetime.datetime.utcnow() + datetime.timedelta(days=1)
time_start = datetime.datetime(time_ref.year, time_ref.month, time_ref.day, 1, 30)
time_end = datetime.datetime(time_ref.year, time_ref.month, time_ref.day, 22, 15)
req = {
"extents": {
"volume": {
"outline_polygon": {
"vertices": [
{ "lng": -91.49723052978516, "lat": 41.70085834502109 },
{ "lng": -91.50341033935547, "lat": 41.6770148220322 },
{ "lng": -91.47989273071289, "lat": 41.67509157220958 },
{ "lng": -91.4663314819336, "lat": 41.69329603398001 },
{ "lng": -91.49723052978516, "lat": 41.70085834502109 }
]
},
"altitude_upper": {"units": "M", "reference": "W84", "value": 764.79037},
"altitude_lower": {"units": "M", "reference": "W84", "value": 23.24352}
},
"time_start": {"value": time_start.isoformat() + "Z", "format": "RFC3339"},
"time_end": {"value": time_end.isoformat() + "Z", "format": "RFC3339"}
},
"old_version": 0,
"uss_base_url": "http://localhost:12012/services/uss/public/uss/v1/",
"notify_for_constraints": True
}
resp = scd_session.put('/subscriptions/{}'.format(sub_uuid), json=req)
assert resp.status_code == 200, resp.content
req["extents"]["time_start"]["value"] = (time_start + datetime.timedelta(hours=1)).isoformat() + "Z"
req["old_version"] = 1
resp = scd_session.put('/subscriptions/{}'.format(sub_uuid), json=req)
assert resp.status_code == 200, resp.content
resp = scd_session.delete('/subscriptions/{}'.format(sub_uuid))
assert resp.status_code == 200, resp.content
| 35.258333 | 102 | 0.68849 | 576 | 4,231 | 4.84375 | 0.289931 | 0.060932 | 0.060215 | 0.071685 | 0.603943 | 0.562007 | 0.525448 | 0.486022 | 0.430108 | 0.430108 | 0 | 0.087116 | 0.161664 | 4,231 | 119 | 103 | 35.554622 | 0.699464 | 0.096431 | 0 | 0.283951 | 0 | 0 | 0.206169 | 0.111785 | 0 | 0 | 0 | 0 | 0.135802 | 1 | 0.074074 | false | 0.012346 | 0.061728 | 0 | 0.135802 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb619fc6d32bfbd7a36b34114ff26ee0e896ca26 | 579 | py | Python | lino_book/projects/20121124/settings.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 3 | 2016-08-25T05:58:09.000Z | 2019-12-05T11:13:45.000Z | lino_book/projects/20121124/settings.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 18 | 2016-11-12T21:38:58.000Z | 2019-12-03T17:54:38.000Z | lino_book/projects/20121124/settings.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 9 | 2016-10-15T11:12:33.000Z | 2021-09-22T04:37:37.000Z | from lino.projects.std.settings import *
# configure_plugin('countries', country_code='BE')
class Site(Site):
verbose_name = "20121124"
# demo_fixtures = ["few_countries", "few_cities", "demo"]
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
yield 'lino_book.projects.20121124'
SITE = Site(globals())
DEBUG = True
# INSTALLED_APPS = ['lino_book.projects.20121124']
#
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': ':memory:'
# }
# }
#SECRET_KEY = "123"
| 18.677419 | 61 | 0.633851 | 65 | 579 | 5.430769 | 0.661538 | 0.110482 | 0.090652 | 0.135977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06087 | 0.205527 | 579 | 30 | 62 | 19.3 | 0.706522 | 0.492228 | 0 | 0 | 0 | 0 | 0.124113 | 0.095745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb649548166bd368f93e2aa06b4e56191bc71438 | 5,715 | py | Python | examples/fake_data/plot_basic_usage.py | abeelen/nikamap | 84867d977dc2f648dba137bf0d7a8c0ebe030419 | [
"MIT"
] | 1 | 2019-06-07T07:59:41.000Z | 2019-06-07T07:59:41.000Z | examples/fake_data/plot_basic_usage.py | abeelen/nikamap | 84867d977dc2f648dba137bf0d7a8c0ebe030419 | [
"MIT"
] | 1 | 2020-04-22T08:54:50.000Z | 2020-04-22T09:00:30.000Z | examples/fake_data/plot_basic_usage.py | abeelen/nikamap | 84867d977dc2f648dba137bf0d7a8c0ebe030419 | [
"MIT"
] | 1 | 2021-08-24T02:39:07.000Z | 2021-08-24T02:39:07.000Z | """
===========
Basic usage
===========
This example shows the basic operation on the :class:`nikamap.NikaMap` object
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.table import Table
from astropy.coordinates import SkyCoord, Angle
from nikamap import NikaMap
###############################################################################
# Read the data
# -------------
#
# By default the read routine will read the 1mm band, but any band can
# be read
#
# .. note:: This fake dataset as been generated by the :mod:`fake_map.py` script
data_path = os.getcwd()
nm = NikaMap.read(os.path.join(data_path, 'fake_map.fits'))
################################################################################
# NikaMap is derived from the `astropy.NDData` class and thus you can access and and manipulate the data the same way
#
# * `nm.data` : an np.array containing the brightness
# * `nm.wcs` : a WCS object describing the astrometry of the image
# * `nm.uncertainy.array` : a np.array containing the uncertainty array
# * `nm.mask` : a boolean mask of the observations
# * `nm.meta` : a copy of the header of the original map
print(nm)
###############################################################################
#
print(nm.wcs)
################################################################################
# NikaMap objects support slicing like numpy arrays, thus one can access
# part of the dataset
print(nm[96:128, 96:128])
################################################################################
# Basic Plotting
# --------------
# thus they can be plotted directly using maplotlib routines
plt.imshow(nm.data)
################################################################################
# or using the convience routine of :class:`nikamap.NikaMap`
#
fig, axes = plt.subplots(ncols=2, subplot_kw={'projection': nm.wcs})
levels = np.logspace(np.log10(2 * 1e-3), np.log10(10e-3), 4)
nm[275:300, 270:295].plot(ax=axes[0], levels=levels)
nm[210:260, 260:310].plot(ax=axes[1], levels=levels)
################################################################################
#
nm.plot_SNR(cbar=True)
################################################################################
# or the power spectrum density of the data :
fig, ax = plt.subplots()
powspec, bins = nm.plot_PSD(ax=ax)
islice = nm.get_square_slice()
_ = nm[islice, islice].plot_PSD(ax=ax)
################################################################################
# Beware that these PSD are based on an non-uniform noise, thus dominated by the
# largest noise part of the map
################################################################################
# Match filtering
# ---------------
#
# A match filter algorithm can be applied to the data to improve
# the detectability of sources. Here using the gaussian beam as the filter
mf_nm = nm.match_filter(nm.beam)
mf_nm.plot_SNR()
################################################################################
# Source detection & photometry
# -----------------------------
#
# A peak finding algorithm can be applied to the SNR datasets
mf_nm.detect_sources(threshold=3)
################################################################################
# The resulting catalog is stored in the `sources` property of the :class:`nikamap.NikaMap` object
print(mf_nm.sources)
################################################################################
# and can be overploted on the SNR maplotlib
mf_nm.plot_SNR(cat=True)
################################################################################
# There is two available photometries :
# * **peak_flux** : to retrieve point sources flux directly on the pixel value of the map, ideadlly on the matched filtered map
# * **psf_flux** : which perfom psf fitting on the pixels at the given position
mf_nm.phot_sources(peak=True, psf=False)
################################################################################
# catalog which can be transfered to the un-filtered dataset, where psf fitting can be performed
nm.phot_sources(sources=mf_nm.sources, peak=False, psf=True)
################################################################################
# the `sources` attribute now contains both photometries
print(nm.sources)
################################################################################
# which can be compared to the original fake source catalog
fake_sources = Table.read('fake_map.fits', 'FAKE_SOURCES')
fake_sources.meta['name'] = 'fake sources'
nm.plot_SNR(cat=[(fake_sources, '^'), (nm.sources, '+')])
################################################################################
# or in greater details :
fake_coords = SkyCoord(fake_sources['ra'], fake_sources['dec'], unit="deg")
detected_coords = SkyCoord(nm.sources['ra'], nm.sources['dec'], unit="deg")
idx, sep2d, _ = fake_coords.match_to_catalog_sky(detected_coords)
good = sep2d < 10 * u.arcsec
idx = idx[good]
sep2d = sep2d[good]
ra_off = Angle(fake_sources[good]['ra'] - nm.sources[idx]['ra'], 'deg')
dec_off = Angle(fake_sources[good]['dec'] - nm.sources[idx]['dec'], 'deg')
fig, axes = plt.subplots(ncols=2)
for method in ['flux_psf', 'flux_peak']:
axes[0].errorbar(fake_sources[good]['amplitude'],
nm.sources[idx][method],
yerr=nm.sources[idx]['e{}'.format(method)],
fmt='o',
label=method)
axes[0].legend(loc='best')
axes[0].set_xlabel('input flux [mJy]')
axes[0].set_ylabel('detected flux [mJy]')
axes[1].scatter(ra_off.arcsecond, dec_off.arcsecond)
axes[1].set_xlabel('R.A. off [arcsec]')
axes[1].set_ylabel('Dec. off [arcsec]')
| 35.06135 | 127 | 0.52126 | 671 | 5,715 | 4.357675 | 0.368107 | 0.03078 | 0.012312 | 0.015048 | 0.069083 | 0.0342 | 0 | 0 | 0 | 0 | 0 | 0.012716 | 0.119335 | 5,715 | 162 | 128 | 35.277778 | 0.56825 | 0.349956 | 0 | 0 | 0 | 0 | 0.090337 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12963 | 0 | 0.12963 | 0.092593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb64da90e43b8fc174324fa7f9eea0235f88fc2d | 670 | py | Python | 2016/aoc/day22/day22.solver.py | scottbilas/advent-of-code | 5181c2c0e5b0638389e923d8dd98d54063697f91 | [
"Unlicense"
] | 3 | 2018-12-13T15:50:53.000Z | 2020-12-02T20:59:56.000Z | 2016/aoc/day22/day22.solver.py | scottbilas/advent-of-code | 5181c2c0e5b0638389e923d8dd98d54063697f91 | [
"Unlicense"
] | 1 | 2021-12-04T11:40:02.000Z | 2021-12-04T11:40:02.000Z | 2016/aoc/day22/day22.solver.py | scottbilas/advent-of-code | 5181c2c0e5b0638389e923d8dd98d54063697f91 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
def getinput(): return open('day22.input.txt').read()
### PART 1
import re
def solve1():
nums = [int(i) for i in re.findall(r'\d+', getinput())]
avail = sorted(zip(nums[3::6], nums[4::6]), key=lambda v: (v[1], v[0]))
used = [v for v in sorted(avail) if v[0] > 0]
pairs, ia = 0, 0
for iu, vu in enumerate(used):
while ia < len(avail) and vu[0] > avail[ia][1]:
ia += 1
pairs += len(avail) - ia
if vu[0] <= vu[1]:
pairs -= 1
return pairs
s1 = solve1()
print(s1)
assert s1 == 903
### PART 2
#s2 = solve('fbgdceah', getinput(), True)
#print(s2)
#assert s2 == 'aghfcdeb'
| 19.142857 | 75 | 0.535821 | 110 | 670 | 3.263636 | 0.5 | 0.011142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063136 | 0.267164 | 670 | 34 | 76 | 19.705882 | 0.668024 | 0.158209 | 0 | 0 | 0 | 0 | 0.032491 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.117647 | false | 0 | 0.058824 | 0.058824 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb64ef1e3d9b673e65e1c75bc5be5ec57d8c51e0 | 1,245 | py | Python | tests/unit/models/reddit/test_widgets.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 2,360 | 2015-01-03T18:27:44.000Z | 2022-03-26T23:24:49.000Z | tests/unit/models/reddit/test_widgets.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 1,187 | 2015-01-04T18:42:10.000Z | 2022-03-28T13:46:33.000Z | tests/unit/models/reddit/test_widgets.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 591 | 2015-01-04T17:33:34.000Z | 2022-03-27T20:28:26.000Z | from json import dumps
from pytest import raises
from praw.models import (
SubredditWidgets,
SubredditWidgetsModeration,
Widget,
WidgetModeration,
)
from praw.models.base import PRAWBase
from praw.models.reddit.widgets import WidgetEncoder
from ... import UnitTest
class TestWidgetEncoder(UnitTest):
def test_bad_encode(self):
data = [
1,
"two",
SubredditWidgetsModeration(self.reddit.subreddit("subreddit"), self.reddit),
]
with raises(TypeError):
dumps(data, cls=WidgetEncoder) # should throw TypeError
def test_good_encode(self):
data = [
1,
"two",
PRAWBase(self.reddit, _data={"_secret": "no", "3": 3}),
self.reddit.subreddit("four"),
]
assert '[1, "two", {"3": 3}, "four"]' == dumps(data, cls=WidgetEncoder)
class TestWidgets(UnitTest):
def test_subredditwidgets_mod(self):
sw = SubredditWidgets(self.reddit.subreddit("fake_subreddit"))
assert isinstance(sw.mod, SubredditWidgetsModeration)
def test_widget_mod(self):
w = Widget(self.reddit, {})
assert isinstance(w.mod, WidgetModeration)
assert w.mod.widget == w
| 27.065217 | 88 | 0.629719 | 129 | 1,245 | 5.992248 | 0.356589 | 0.07762 | 0.054334 | 0.03881 | 0.046572 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007576 | 0.257831 | 1,245 | 45 | 89 | 27.666667 | 0.829004 | 0.017671 | 0 | 0.166667 | 0 | 0 | 0.058149 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb64ef495f0155c0116838e0b3bd5976f793f941 | 5,036 | py | Python | src/game.py | CalebABG/iNvader-SpAcers | 8a4518b831cc35ecf4427c78151f1f0e83093629 | [
"MIT"
] | 1 | 2015-07-29T14:23:31.000Z | 2015-07-29T14:23:31.000Z | src/game.py | CalebABG/Invader-Spacers | 8a4518b831cc35ecf4427c78151f1f0e83093629 | [
"MIT"
] | null | null | null | src/game.py | CalebABG/Invader-Spacers | 8a4518b831cc35ecf4427c78151f1f0e83093629 | [
"MIT"
] | null | null | null | import rocket_attachment
import utilities
from player import Player
from shield import *
# Note to self, fix player score system, collisions if needed
# Fix enemy kill system, and update system
class Game:
def __init__(self, width=750, height=650, frames=78, shield_mode=1):
utilities.GAME_FRAME_RATE = frames
utilities.GAME_WINDOW = pygame.display.set_mode((width, height), pygame.DOUBLEBUF, 32)
self.shield_level = shield_mode
""" Time counter """
self.time_played = 0
""" Amount of time game lasts """
self.game_time = 60 * 3 # In seconds
""" Creates an all sprites group """
self.all_sprites = pygame.sprite.Group()
""" Player group """
self.player_group = pygame.sprite.Group()
""" Bullet group """
self.bullet_group = pygame.sprite.Group()
""" Shield group """
self.shield_group = pygame.sprite.Group()
""" Aliens group """
self.alien_group = pygame.sprite.Group()
""" rocket objects group """ # Change to group which contains the AI or Aliens
self.rockets_group = pygame.sprite.Group()
""" Creates the player and gives it: a position on the screen, a
speed and adds to player group """
self.player = Player(width, height, utilities.GAME_DELTA)
self.player.set_position(width / 2, height - 24)
self.player_group.add(self.player)
def run(self):
display_width, display_height = pygame.display.get_window_size()
""" Updates/Refreshes the game fps_rate per second(s) """
self.time_played += utilities.GAME_DELTA / 1000.0
remaining_time = utilities.GAME_DELTA - self.time_played
if len(self.player_group) == 0:
utilities.GAME_STATE = 3
return
""" Runs the particle graphics() method while the game runs """
utilities.draw_particles(display_width, display_height, utilities.GAME_COLORS["white"], self.all_sprites)
""" Runs the alien graphics() method and limits the aliens on the screen """
while len(self.alien_group) < 60:
utilities.draw_aliens(display_width, display_height, self.alien_group, self.player, self.player_group)
keys = pygame.key.get_pressed()
""" Handles player shoot event """
self.player.reg_shot(keys, self.alien_group, display_height, self.bullet_group)
""" Handles events for rocket creation """
if keys[pygame.K_DOWN]:
if len(self.rockets_group) < 8:
rockets_1 = rocket_attachment.RocketAttachment(display_width, display_height, self.alien_group)
rockets_1.set_position(self.player.rect.left - 1, self.player.rect.y)
self.rockets_group.add(rockets_1)
rockets_2 = rocket_attachment.RocketAttachment(display_width, display_height, self.alien_group)
rockets_2.set_position(self.player.rect.right - 3, self.player.rect.y)
self.rockets_group.add(rockets_2)
gravity = -7 # Default value is -7
if rockets_1.vy or rockets_2.vy == 0:
rockets_1.vy += gravity
rockets_2.vy += gravity
""" Handles events for shield creation on key press """
if keys[pygame.K_SPACE]:
if len(self.shield_group) > 0:
self.shield_group.empty()
else:
shield = Shield(self.player, self.alien_group, self.shield_level)
self.shield_group.add(shield)
""" Fills the screen with a color """
utilities.GAME_WINDOW.fill(utilities.GAME_COLORS["black"]) # Default color = Colors["majestic blue"]
""" Draws all of the sprites on the screen and updates them """
self.all_sprites.draw(utilities.GAME_WINDOW)
self.all_sprites.update()
""" Draws and updates the alien sprites """
self.alien_group.draw(utilities.GAME_WINDOW)
self.alien_group.update()
""" Draws and updates the bullets """
self.bullet_group.draw(utilities.GAME_WINDOW)
self.bullet_group.update()
""" Draws and updates the rockets """
self.rockets_group.draw(utilities.GAME_WINDOW)
self.rockets_group.update()
""" Draws and updates the shield """
self.shield_group.draw(utilities.GAME_WINDOW)
self.shield_group.update()
""" Draws the player on the screen and updates the player """
self.player_group.draw(utilities.GAME_WINDOW)
self.player_group.update(self.alien_group, utilities.GAME_DELTA)
""" Sets the title which of the window and displays info for multiple variables and objects """
pygame.display.set_caption(
"FPS: %.2d | all_sprites: %s | bullet_group: %s| Time played: %.2d | Remaining time: %.2d"
% (utilities.GAME_CLOCK.get_fps(), len(self.all_sprites), len(self.bullet_group), self.time_played,
remaining_time))
| 39.03876 | 114 | 0.636219 | 634 | 5,036 | 4.876972 | 0.241325 | 0.071475 | 0.045278 | 0.044631 | 0.215718 | 0.16947 | 0.089586 | 0.076973 | 0.076973 | 0.050453 | 0 | 0.012628 | 0.260921 | 5,036 | 128 | 115 | 39.34375 | 0.818109 | 0.043487 | 0 | 0 | 0 | 0.014925 | 0.026008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.059701 | 0 | 0.119403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb657c013b48fdca3534f14824ad9ccdf6b7d908 | 945 | py | Python | webpage/metadata.py | acdh-oeaw/4dpuzzle | 7856bbd82c7dfa8da1d5f1ad40593219a35b3cfe | [
"MIT"
] | null | null | null | webpage/metadata.py | acdh-oeaw/4dpuzzle | 7856bbd82c7dfa8da1d5f1ad40593219a35b3cfe | [
"MIT"
] | 6 | 2020-06-05T18:32:02.000Z | 2022-02-10T07:22:24.000Z | webpage/metadata.py | acdh-oeaw/4dpuzzle | 7856bbd82c7dfa8da1d5f1ad40593219a35b3cfe | [
"MIT"
] | 1 | 2020-06-30T13:52:41.000Z | 2020-06-30T13:52:41.000Z | # this files contains basic metadata about the project. This data will be used
# (by default) in the base.html and index.html
PROJECT_METADATA = {
'title': 'p4d',
'author': 'Peter Andorfer',
'subtitle': 'puzzle 4 d',
'description': 'The project A Puzzle in 4D aims to provide digital long-term preservation for\
the rich archaeological resources of the Austrian excavation project at Tell el-Daba in Egypt.',
'github': 'https://gitlab.com/acdh-oeaw/p4d/p4da-app',
'purpose_de': 'Webapplikation',
'purpose_en': 'Web App',
'version': '0.0.1',
'matomo_id': '160',
'matomo_url': '//matomo.acdh.oeaw.ac.at/',
'imprint': '/imprint',
'social_media': [
('fab fa-twitter fa-2x', 'https://twitter.com/ACDH_OeAW'),
('fab fa-youtube fa-2x', 'https://www.youtube.com/channel/UCgaEMaMbPkULYRI5u6gvG-w'),
],
'app_type': 'database', # database|website|service|tool|digital-edition
}
| 41.086957 | 100 | 0.660317 | 128 | 945 | 4.8125 | 0.695313 | 0.038961 | 0.035714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01938 | 0.180952 | 945 | 22 | 101 | 42.954545 | 0.776486 | 0.17672 | 0 | 0 | 0 | 0 | 0.48062 | 0.0323 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb65bc0685dbca59865f68cbae384bf3fe0d5dc4 | 1,559 | py | Python | setup.py | ivica-k/tfvars2markdown | 17d8ccee95d1877e4c433b75f5a734c9e126eb1f | [
"MIT"
] | 2 | 2020-01-10T11:01:49.000Z | 2021-02-16T22:10:58.000Z | setup.py | ivica-k/tfvars2markdown | 17d8ccee95d1877e4c433b75f5a734c9e126eb1f | [
"MIT"
] | null | null | null | setup.py | ivica-k/tfvars2markdown | 17d8ccee95d1877e4c433b75f5a734c9e126eb1f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from setuptools import setup
from setuptools.command.install import install
VERSION = "0.0.4"
def get_install_reqs():
with open("requirements.txt") as reqs_file:
return reqs_file.read()
def get_long_description():
with open("README.md") as readme_file:
return readme_file.read()
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = f"Git tag '{tag}' does not match the version of this app '{VERSION}'"
sys.exit(info)
setup(
name="tfvars2markdown",
version=VERSION,
description="Converts Terraform 0.12+ variables file into a Markdown table",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Ivica Kolenkaš",
author_email="ivica.kolenkas@gmail.com",
url="https://github.com/ivica-k/tfvars2markdown",
packages=["tfvars2markdown"],
package_dir={"tfvars2markdown": "src"},
entry_points={
"console_scripts": ["tfvars2markdown=src.main:cli"],
},
install_requires=get_install_reqs(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
cmdclass={
'verify': VerifyVersionCommand,
}
)
| 26.423729 | 88 | 0.664529 | 186 | 1,559 | 5.446237 | 0.553763 | 0.05923 | 0.027641 | 0.031589 | 0.071076 | 0.071076 | 0.071076 | 0.071076 | 0 | 0 | 0 | 0.011419 | 0.213598 | 1,559 | 58 | 89 | 26.87931 | 0.814845 | 0.052598 | 0 | 0 | 0 | 0 | 0.348063 | 0.03535 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.093023 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb6b87f44a1ccb9a11483043b04a469eb2de00c7 | 565 | py | Python | experiments/experiments_2/bs3_exp10_runner.py | petrroll/msc-neuro | cdfb5f0ad1a6974fdde6ac9760364d5545d70690 | [
"MIT"
] | 1 | 2019-10-26T19:38:42.000Z | 2019-10-26T19:38:42.000Z | experiments/experiments_2/bs3_exp10_runner.py | petrroll/msc-neuro | cdfb5f0ad1a6974fdde6ac9760364d5545d70690 | [
"MIT"
] | null | null | null | experiments/experiments_2/bs3_exp10_runner.py | petrroll/msc-neuro | cdfb5f0ad1a6974fdde6ac9760364d5545d70690 | [
"MIT"
] | 1 | 2021-03-23T14:54:04.000Z | 2021-03-23T14:54:04.000Z | import os
import sys
sys.path.append(os.getcwd())
import utils.runners as urun
if __name__ == "__main__":
exp_folder = "experiments_2"
exp = "bs3_exp10"
runner = urun.get_runner(sys.argv[1])
run = 0
for lin_scale in [True, False]:
for input_scale in ['normalize_mean_std', 'times_mil', 'identity']:
runner(
exp_folder, exp,
f"--exp_folder={exp_folder} --exp={exp} --run={run} --lin_scale={lin_scale} --input_scale={input_scale}",
run
)
run += 1
| 26.904762 | 122 | 0.566372 | 73 | 565 | 4.054795 | 0.534247 | 0.121622 | 0.121622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017677 | 0.299115 | 565 | 20 | 123 | 28.25 | 0.729798 | 0 | 0 | 0 | 0 | 0.058824 | 0.293805 | 0.132743 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb6b8969b4b54daaf465f0f47b60e23c402c8e62 | 2,845 | py | Python | src/tasca.py | jesuschm/tasca | 371460bdf9ea4306dad5d2241cc307ec2e64dee1 | [
"MIT"
] | null | null | null | src/tasca.py | jesuschm/tasca | 371460bdf9ea4306dad5d2241cc307ec2e64dee1 | [
"MIT"
] | null | null | null | src/tasca.py | jesuschm/tasca | 371460bdf9ea4306dad5d2241cc307ec2e64dee1 | [
"MIT"
] | null | null | null | import sys
import logging
from application.commands_service import post, read, follow, wall
from infra.databases.mongo import MongoRepository
_repo = MongoRepository()
_verbose = False
def main():
try:
command = None
if _repo.client:
logging.info("[+] Hello friend! Welcome to the Tasca. Get fun! ")
logging.info("[+] Control + C to exit.\n")
while command != "^C":
try:
command = str(input("> "))
# Posting command
if '->' in command:
data = command.split(" -> ")
if len(data) == 2:
post(_repo, username = data[0], message = data[1])
else:
logging.error("[-] Bad post command. Correct format: [username] -> [message].")
elif 'follows' in command:
data = command.split(" follows ")
if len(data) == 2:
user = data[0]
follow_user = data[1]
rc = follow(_repo, username = user, follow_username = follow_user)
if rc:
logging.debug(f"[+] {user} is now following {follow_user}.")
else:
logging.error(f"[-] Error trying to follow {follow_user}")
else:
logging.error("[-] Bad follow command. Correct format: [username] -> [username].")
elif 'wall' in command:
data = command.split(" wall")
if len(data) == 2 and data[1] == '':
wall(_repo, username = data[0])
else:
logging.error("[-] Bad wall command. Correct format: [username] wall.")
else:
data = command.split(" ")
if len(data) == 1:
read(_repo, username = command)
else:
logging.error("[-] Bad username to read. Usernames don't contain spaces.")
except Exception as e:
logging.error(f"[-] Error: {e}.")
else:
raise("Database not connected.")
except KeyboardInterrupt:
logging.info(f"\n[+] Quitting.. Bye!")
sys.exit(0)
except Exception as e:
logging.error(f"[-] Error: {e}. Quitting.")
sys.exit(1)
if __name__ == "__main__":
"""Entry point
"""
main() | 41.231884 | 110 | 0.410193 | 242 | 2,845 | 4.735537 | 0.355372 | 0.073298 | 0.069808 | 0.066318 | 0.205061 | 0.108202 | 0.064572 | 0.064572 | 0.064572 | 0 | 0 | 0.008158 | 0.482953 | 2,845 | 69 | 111 | 41.231884 | 0.770904 | 0.005272 | 0 | 0.22807 | 0 | 0 | 0.18632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.070175 | 0 | 0.087719 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb6bc619a63b43d5e2ea5c67c93afe6234007a78 | 10,584 | py | Python | robosuite/controllers/hmfc.py | hermanjakobsen/robosuite | 9b4342df5bbf5158026259dd10ccf5bde53a1caf | [
"MIT"
] | null | null | null | robosuite/controllers/hmfc.py | hermanjakobsen/robosuite | 9b4342df5bbf5158026259dd10ccf5bde53a1caf | [
"MIT"
] | null | null | null | robosuite/controllers/hmfc.py | hermanjakobsen/robosuite | 9b4342df5bbf5158026259dd10ccf5bde53a1caf | [
"MIT"
] | null | null | null | from robosuite.controllers.base_controller import Controller
import robosuite.utils.transform_utils as T
import numpy as np
class HybridMotionForceController(Controller):
def __init__(self,
sim,
eef_name,
joint_indexes,
actuator_range,
input_max=1,
input_min=-1,
output_max=(0.05, 0.05, 0.05, 0.5, 0.5, 0.5),
output_min=(-0.05, -0.05, -0.05, -0.5, -0.5, -0.5),
policy_freq=20,
**kwargs # does nothing; used so no error raised when dict is passed with extra terms used previously
):
super().__init__(
sim,
eef_name,
joint_indexes,
actuator_range,
)
# Control dimension
self.control_dim = 0 # action space dimension.
# input and output max and min (allow for either explicit lists or single numbers)
self.input_max = self.nums2array(input_max, self.control_dim)
self.input_min = self.nums2array(input_min, self.control_dim)
self.output_max = self.nums2array(output_max, self.control_dim)
self.output_min = self.nums2array(output_min, self.control_dim)
# control frequency
self.control_freq = policy_freq
self.control_timestep = 1 / self.control_freq
# subspace
self.S_f = np.array([[0, 0, 1, 0, 0, 0]]).reshape([6,1]) # force-control-subspace (only doing force control in z)
self.S_v = np.array([[1, 0, 0, 0, 0], # motion-control-subspace (x, y, ori_x, ori_y, ori_z)
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
# stiffness of the interaction [should be estimated (this one is chosen at random)]
self.K = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 100, 0, 0, 0],
[0, 0, 0, 5, 0, 0],
[0, 0, 0, 0, 5, 0],
[0, 0, 0, 0, 0, 1]])
self.C = np.linalg.inv(self.K)
# inverse subspaces
self.S_v_inv = self.get_S_inv(self.S_v)
self.S_f_inv = self.get_S_inv(self.S_f)
# derivative of stiffnes interaction
self.K_dot = self.get_K_dot()
# force control dynamics
self.K_Plambda = 15 # force gain
self.K_Dlambda = 2*np.sqrt(self.K_Plambda)#self.K_Plambda*0.001 # force damping
# position control dynamics
self.Pp = 150 # x and y pos gain
self.Dp = 2*np.sqrt(self.Pp) # x and y pos damping
# orientation control dynamics
self.Po = 100 # orientation gain
self.Do = 2*np.sqrt(self.Po) # orientation damping
self.K_Pr = np.array([[self.Pp, 0, 0, 0, 0], # Stiffness matrix
[0, self.Pp, 0, 0, 0],
[0, 0, self.Po, 0, 0],
[0, 0, 0, self.Po, 0],
[0, 0, 0, 0, self.Po]])
self.K_Dr = np.array([[self.Dp, 0, 0, 0, 0], # Damping matrix
[0, self.Dp, 0, 0, 0],
[0, 0, self.Do, 0, 0],
[0, 0, 0, self.Do, 0],
[0, 0, 0, 0, self.Do]])
# initialize robot
self.robot = None
self.probe_id = None
# initialize desired trajectories
self.p_d = np.zeros(2) # position trajectory
self.r_d_dot = np.zeros(5)
self.r_d_ddot = np.zeros(5)
self.f_d = 5 # force trajectory (N)
self.f_d_dot = 0
self.f_d_ddot = 0
self.goal_ori = np.array([-0.69192486, 0.72186726, -0.00514253, -0.01100909]) # (x, y, z, w) quaternion
# initialize trajectory point from environment
self.traj_pos = None # will be sat from the enviromnent
# initialize measurements
self.z_force = 0 # force in z-direction
self.prev_z_force = self.z_force # force in z-direction from previous timestep
self.z_force_running_mean = self.z_force
self.v = np.zeros(5) # angular and linear (excluding z) velocity
def _initialize_measurements(self):
self.probe_id = self.sim.model.body_name2id(self.robot.gripper.root_body)
self.z_force = self.robot.ee_force[-1]
#self.z_force = self.sim.data.cfrc_ext[self.probe_id][-1]
self.prev_z_force = self.z_force
self.z_force_running_mean = 0
self.v = self.get_eef_velocity()
# Must be called in environment's reset function
def set_robot(self, robot):
self.robot = robot
self._initialize_measurements()
def quatdiff_in_euler_radians(self, quat_curr, quat_des):
quat_dist = T.quat_distance(quat_curr, quat_des)
return -T.mat2euler(T.quat2mat(quat_dist))
# Fetch linear (excluding z) and angular velocity of eef
def get_eef_velocity(self):
lin_v = self.robot._hand_vel[:-1]
ang_v = self.robot._hand_ang_vel
return np.append(lin_v, ang_v)
def get_lambda_dot(self, analytical=True):
if analytical:
# Fetch the derivative of the force as in equation (9.66) in chapter 9.4 of The Handbook of Robotics
return np.linalg.multi_dot([self.S_f_inv, self.K_dot, self.J_full, self.joint_vel])
return (self.z_force - self.prev_z_force) / self.control_timestep
# Fetch the psudoinverse of S_f or S_v as in equation (9.34) in chapter 9.3 of The Handbook of Robotics
def get_S_inv(self, S):
a = np.linalg.inv(np.linalg.multi_dot([S.T, self.C, S]))
return np.array(np.linalg.multi_dot([a, S.T, self.C]))
# Fetch K' as in equation (9.49) in chapter 9.3 of The Handbook of Robotics
def get_K_dot(self):
return np.linalg.multi_dot([self.S_f, self.S_f_inv, np.linalg.inv(self.C)])
# Calculate the error in position and orientation (in the subspace subject to motion control)
def get_delta_r(self, ori, goal_ori, p, p_d):
delta_pos = p_d - p[:2]
delta_ori = self.quatdiff_in_euler_radians(ori, goal_ori)
return np.append(delta_pos, delta_ori)
# Calculate f_lambda (part of equation 9.62) as in equation (9.65) in chapter 9.3 of The Handbook of Robotics
def calculate_f_lambda(self, f_d_ddot, f_d_dot, f_d):
lambda_dot = self.get_lambda_dot()
lambda_a = f_d_ddot
lambda_b = np.dot(self.K_Dlambda,(f_d_dot - lambda_dot))
lambda_c = np.dot(self.K_Plambda,(f_d - self.z_force))
return max(lambda_a + lambda_b + lambda_c, 0)
# Calculate alpha_v (part of equation 9.62) as on page 213 in chapter 9.3 of The Handbook of Robotics
def calculate_alpha_v(self, ori, r_d_ddot, r_d_dot, p, p_d):
delta_r = self.get_delta_r(ori, self.goal_ori, p, p_d)
return r_d_ddot + np.dot(self.K_Dr, r_d_dot - self.v) + np.dot(self.K_Pr, delta_r)
# Calculate alpha (part of equation 9.16) as in equation (9.62) in chapter 9.3 of The Handbook of Robotics
def calculate_alpha(self, alpha_v,f_lambda):
P_v = np.dot(self.S_v, self.S_v_inv)
C_dot = np.dot(np.identity(6) - P_v, self.C)
return np.dot(self.S_v, alpha_v) + f_lambda * np.dot(C_dot, self.S_f).flatten()
def set_goal(self, action):
# update position trajectory
prev_p_d = self.p_d
self.p_d = self.traj_pos[:-1]
prev_r_d_dot = self.r_d_dot
p_dot = np.subtract(self.p_d, prev_p_d) / self.control_timestep
ori_dot = np.array([0, 0, 0])
self.r_d_dot = np.append(p_dot, ori_dot)
self.r_d_ddot = np.subtract(self.r_d_dot, prev_r_d_dot) / self.control_timestep
# update force trajectory
self.f_d = self.f_d # constant
self.f_d_dot = 0
self.f_d_ddot = 0
def run_controller(self):
# Update state
self.update()
# eef measurements
#self.z_force = self.sim.data.cfrc_ext[self.probe_id][-1]
self.z_force = self.robot.ee_force[-1]
self.z_force_running_mean = 0.1 * self.z_force + (1 - 0.1) * self.z_force_running_mean
self.v = self.get_eef_velocity()
pos = self.ee_pos
ori = T.mat2quat(self.ee_ori_mat) # (x, y, z, w) quaternion
h_e = np.array([0, 0, self.z_force, 0, 0, 0])
# control law
alpha_v = self.calculate_alpha_v(ori, self.r_d_ddot, self.r_d_dot, pos, self.p_d)
f_lambda = self.calculate_f_lambda(self.f_d_ddot, self.f_d_dot, self.f_d)
alpha = self.calculate_alpha(alpha_v, -f_lambda)
cartesian_inertia = np.linalg.inv(np.linalg.multi_dot([self.J_full, np.linalg.inv(self.mass_matrix), self.J_full.T]))
# torque computations
self.external_torque = np.dot(self.J_full.T, h_e)
self.desired_torque = np.linalg.multi_dot([self.J_full.T ,cartesian_inertia, alpha])
self.torques = self.desired_torque + self.torque_compensation + self.external_torque
# update measurement
self.prev_z_force = self.z_force
# Always run superclass call for any cleanups at the end
super().run_controller()
return self.torques
def update_initial_joints(self, initial_joints):
# First, update from the superclass method
super().update_initial_joints(initial_joints)
# We also need to reset the goal in case the old goals were set to the initial configuration
self.reset_goal()
def reset_goal(self):
"""
Resets the goal to the current state of the robot
"""
self.p_d = np.array(self.ee_pos)[:-1]
@property
def control_limits(self):
"""
Returns the limits over this controller's action space, overrides the superclass property
2-tuple:
- (np.array) minimum action values
- (np.array) maximum action values
"""
low, high = self.input_min, self.input_max
return low, high
@property
def name(self):
return "HMFC" | 37.136842 | 125 | 0.572279 | 1,573 | 10,584 | 3.642085 | 0.17864 | 0.028975 | 0.031943 | 0.028626 | 0.259382 | 0.205446 | 0.16687 | 0.109618 | 0.099319 | 0.098621 | 0 | 0.039799 | 0.323413 | 10,584 | 285 | 126 | 37.136842 | 0.760229 | 0.238946 | 0 | 0.132075 | 0 | 0 | 0.000504 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.018868 | 0.012579 | 0.220126 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb6dd4fa9e43ed5dfd1c00fb689d90505b9d1b0f | 1,556 | py | Python | test/e2e/test_router.py | uprush/cartpole-rl-remote | 4a6a8c30563c3db571939e5b4a99a9721de919bd | [
"MIT"
] | 24 | 2018-04-04T14:41:29.000Z | 2020-12-10T02:01:11.000Z | test/e2e/test_router.py | davsuacar/cartpole-rl-remote | 7111c9752cb663c1ef7a3815bd7c9f3c2d199ab9 | [
"MIT"
] | 8 | 2018-06-19T15:24:22.000Z | 2022-02-09T23:31:26.000Z | test/e2e/test_router.py | davsuacar/cartpole-rl-remote | 7111c9752cb663c1ef7a3815bd7c9f3c2d199ab9 | [
"MIT"
] | 2 | 2018-06-19T15:00:48.000Z | 2019-04-10T07:58:39.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import numpy as np
import json
import argparse
import sys
from cartpole.client.seldon.client import SeldonClient
def main():
parser = argparse.ArgumentParser(description='Test visdom server.')
parser.add_argument('--visdom-config', type=json.loads,
default=None,
help='The visdom server configuration to send metrics'
'. Example \'{"server": "http://localhost", "env": "run"}\''
'. By default there is not a visualizer.')
parser.add_argument('-api-server',
help='The seldon api server.')
parser.add_argument('-router-name',
help='Name of router.')
parser.add_argument('-pref-branch',
type=int,
help='The branch all traffic will be redirected to.')
parser.add_argument('--num-reqs', type=int,
default=100,
help='Number of requests.')
args = parser.parse_args()
sclient = SeldonClient(args.api_server)
sclient.force_branch_router(
np.array([[0, 0, 1, 1]]),
pref_branch=args.pref_branch,
iters=args.num_reqs,
router_name=args.router_name,
vis_config=args.visdom_config
)
if __name__ == "__main__":
try:
main()
except Exception as ex:
print('Unexpected error: %s' % ex)
sys.exit(1)
sys.exit(0)
| 30.509804 | 89 | 0.56491 | 177 | 1,556 | 4.830508 | 0.514124 | 0.052632 | 0.099415 | 0.053801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013133 | 0.31491 | 1,556 | 50 | 90 | 31.12 | 0.788931 | 0.054627 | 0 | 0 | 0 | 0 | 0.226839 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.131579 | 0 | 0.157895 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb705bdb5ae0fcd0de35a0f02e003fcaf1ec2b63 | 2,857 | py | Python | habari/apps/crawl/tasks.py | ppolle/habari | 671b98c361ce593f708bc15f69dd3aa6fe72b128 | [
"MIT"
] | 3 | 2020-06-08T08:39:06.000Z | 2020-07-30T10:46:22.000Z | habari/apps/crawl/tasks.py | ppolle/habari | 671b98c361ce593f708bc15f69dd3aa6fe72b128 | [
"MIT"
] | 9 | 2021-03-19T11:18:58.000Z | 2022-02-10T15:48:35.000Z | habari/apps/crawl/tasks.py | ppolle/habari | 671b98c361ce593f708bc15f69dd3aa6fe72b128 | [
"MIT"
] | 1 | 2021-09-22T07:23:03.000Z | 2021-09-22T07:23:03.000Z | import re
from celery import shared_task
from django.utils import timezone
from habari.apps.crawl.models import NewsSource, Crawl, Article
from habari.apps.crawl.crawlers import (smcrawler, tscrawler, dncrawler2, dmcrawler,
eacrawler, bdcrawler, ctcrawler, secrawler)
@shared_task(autoretry_for=(Exception,))
def frequent_crawlers():
crawlers ={
'DN': dncrawler2.DNCrawler,
'SM': smcrawler.SMCrawler,
'TS': tscrawler.TSCrawler,
'DM': dmcrawler.DMCrawler
}
for key, value in crawlers.items():
crawler = value()
crawl = crawler.run()
@shared_task(autoretry_for=(Exception,))
def non_frequent_crawlers():
crawlers = {
'EA':eacrawler.EACrawler,
'CT':ctcrawler.CTCrawler,
'SE':secrawler.SECrawler
}
for key, value in crawlers.items():
crawler = value()
crawl = crawler.run()
@shared_task(autoretry_for=(Exception,))
def bd_crawler():
bd = bdcrawler.BDCrawler
crawler = bd()
crawl = crawler.run()
@shared_task(autoretry_for=(Exception,))
def retry_failed_crawls():
'''
Retry crawls that have an error status or stuck in a Crawing status
'''
crawler_classes = {
'DN': dncrawler2.DNCrawler,
'SM': smcrawler.SMCrawler,
'TS': tscrawler.TSCrawler,
'DM': dmcrawler.DMCrawler,
'EA': eacrawler.EACrawler,
'CT': ctcrawler.CTCrawler,
'BD': bdcrawler.BDCrawler,
'SE': secrawler.SECrawler
}
a_while_ago = timezone.now() - timezone.timedelta(minutes=45)
sources = NewsSource.objects.all()
for source in sources:
if source.crawl_set.last().status == Crawl.StatusType.Error:
crawl_class = crawler_classes.get(source.slug)
crawl = crawl_class().run()
if source.crawl_set.last().status != Crawl.StatusType.Good and source.crawl_set.last().crawl_time <= a_while_ago:
crawl_class = crawler_classes.get(source.slug)
crawl = crawl_class().run()
@shared_task(autoretry_for=(Exception,))
def sanitize_author_lists_with_empty_strings():
'''
Correct Standard Media author lists that crawl for lists with a empty strings. This causes author url errors on standard media pages
'''
crawler_classes = {
'DN': dncrawler2.DNCrawler,
'SM': smcrawler.SMCrawler,
'TS': tscrawler.TSCrawler,
'DM': dmcrawler.DMCrawler,
'EA': eacrawler.EACrawler,
'CT': ctcrawler.CTCrawler,
'BD': bdcrawler.BDCrawler,
'SE': secrawler.SECrawler
}
non_dictionary_classes = ['DN','TS','BD','SE']
articles = Article.objects.filter(author__contains=[''])
for article in articles:
slug = article.news_source.slug
article_url = {'article_url':article.article_url}
crawler_class = crawler_classes.get(slug)
if slug in non_dictionary_classes:
update_details = crawler_class().update_article_details(article_url['article_url'])
else:
update_details = crawler_class().update_article_details(article_url)
article.author = update_details['author']
article.save()
| 30.073684 | 133 | 0.729086 | 357 | 2,857 | 5.661064 | 0.285714 | 0.029688 | 0.047006 | 0.054429 | 0.533399 | 0.533399 | 0.496784 | 0.47996 | 0.439386 | 0.415141 | 0 | 0.002449 | 0.142457 | 2,857 | 94 | 134 | 30.393617 | 0.822449 | 0.070004 | 0 | 0.545455 | 0 | 0 | 0.031084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064935 | false | 0 | 0.064935 | 0 | 0.12987 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb723f976a879e3ec071aba0d2fd2ea33c25c14b | 1,390 | py | Python | learntools/ml_intermediate/ex1.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 359 | 2018-03-23T15:57:52.000Z | 2022-03-25T21:56:28.000Z | learntools/ml_intermediate/ex1.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 84 | 2018-06-14T00:06:52.000Z | 2022-02-08T17:25:54.000Z | learntools/ml_intermediate/ex1.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 213 | 2018-05-02T19:06:31.000Z | 2022-03-20T15:40:34.000Z | import os
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from learntools.core import *
class BestModel(CodingProblem):
_var = 'best_model'
_hint = ("Which model gets the lowest MAE score?")
_solution = CS(
"""best_model = model_3
""")
def check(self, best_model):
assert type(best_model) == RandomForestRegressor, \
("Set the value of `best_model` to one of `model_1`, `model_2`, "
"`model_3`, `model_4`, or `model_5`.")
params = best_model.get_params()
assert params['n_estimators'] == 100 and params['criterion'] == 'mae' \
and params['random_state'] == 0, \
("Set the value of `best_model` to one of `model_1`, `model_2`, "
"`model_3`, `model_4`, or `model_5`. Select the model that gets the lowest MAE.")
class Predictions(CodingProblem):
_var = 'my_model'
_hint = ("You need only set `my_model` to a random forest model. You are welcome (but "
"not required) to choose one of the five models above.")
_solution = CS(
"""# Define a model
my_model = best_model
""")
def check(self, my_model):
assert type(my_model) == RandomForestRegressor, \
"Please change `my_model` to a random forest model."
qvars = bind_exercises(globals(), [
BestModel,
Predictions,
],
var_format='step_{n}',
)
__all__ = list(qvars)
| 30.217391 | 92 | 0.640288 | 185 | 1,390 | 4.589189 | 0.437838 | 0.084806 | 0.030624 | 0.037691 | 0.207303 | 0.207303 | 0.207303 | 0.143698 | 0.143698 | 0.143698 | 0 | 0.014098 | 0.234532 | 1,390 | 45 | 93 | 30.888889 | 0.783835 | 0 | 0 | 0.117647 | 0 | 0 | 0.39302 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb72e6556a8c2f7a8910faae3d3ac84a2578fd73 | 3,834 | py | Python | ntou/kobe/views.py | asas1asas200/ntou2.0 | 7f5f7ae6d09c998081c5621c77b65a5c85939284 | [
"MIT"
] | 3 | 2020-03-20T02:25:46.000Z | 2020-07-19T06:18:27.000Z | ntou/kobe/views.py | asas1asas200/ntou2.0 | 7f5f7ae6d09c998081c5621c77b65a5c85939284 | [
"MIT"
] | 16 | 2020-03-16T08:10:25.000Z | 2022-01-13T02:21:50.000Z | ntou/kobe/views.py | asas1asas200/ntou2.0 | 7f5f7ae6d09c998081c5621c77b65a5c85939284 | [
"MIT"
] | 1 | 2020-03-15T14:05:00.000Z | 2020-03-15T14:05:00.000Z | from django.shortcuts import render, redirect, get_object_or_404
from .forms import postForm, registerForm, deleteForm
from .models import KobePost, registrationReviewer
from .autoCheck import checkPost
from .facebookApi import FbManger
from .postDeleteTokenCreater import createToken
from django.contrib import messages
import json
def home(request):
return render(request, 'home.html', {})
def coc(request):
return render(request, 'coc.html', {})
def about(request):
return render(request, 'about.html', {})
def contribution(request):
return render(request, 'contribution.html', {})
def registrationSuccess(request):
return render(request, 'registrationSuccess.html', {})
def postlist(request):
post = KobePost.objects.all()
return render(request, 'postlist.html', {'post': post})
def registration(request):
if request.method == "POST":
form = registerForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.registerIpAddress = str(getIp(request))
if post.agreeCoc: # check coc policy
post.save()
return redirect('/registrationSuccess/')
else:
messages.warning(request, '請先閱讀並同意行為準則')
else:
form = registerForm()
return render(request, 'registration.html', {'form': form})
def postSystem(request):
if request.method == "POST":
form = postForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.ipAddress = str(getIp(request))
post.token = str(createToken.token())
if post.cocPolicy: # check coc policy
if checkPost.check(post.content) == True: #auto check post
if post.photo:
post.check = 'True'
post.save()
poster = FbManger(post, post.content, post.postTime) #post to facebook
post.postId = poster.imgPoster(post.photo)
post.checkPosted = 'True'
post.save()
return redirect('/postsuccess/' + str(post.id) + '/')
else:
post.check = 'True'
post.save()
poster = FbManger(post, post.content, post.postTime) #post to facebook
post.postId = poster.poster()
post.checkPosted = 'True'
post.save()
return redirect('/postsuccess/' + str(post.id) + '/')
else:
post.save()
return redirect('/')
else:
messages.warning(request, '請先閱讀並同意行為準則')
else:
form = postForm()
return render(request, 'post.html', {'form': form})
def postDelete(post_token):
post = KobePost.objects.get(token = post_token) #get post by post token
print(post.postId)
poster = FbManger(post, post.content, post.postTime)
poster.postDeleter(post.postId) #delete post
def deletePostSystem(request):
if request.method == "POST":
form = deleteForm(request.POST)
if form.is_valid():
deleteToken = form.cleaned_data['deleteToken']
postDelete(deleteToken)
return redirect('/')
else:
form = deleteForm()
return render(request, 'deletepost.html', {'form': form})
def postSuccess(request, id):
post = KobePost.objects.get(id = id)
return render(request, 'postSuccess.html', {'post': post})
def getIp(request):
xForwardedFor = request.META.get('HTTP_X_FORWARDED_FOR')
if xForwardedFor:
ip = xForwardedFor.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
| 36.169811 | 94 | 0.584246 | 387 | 3,834 | 5.754522 | 0.248062 | 0.053884 | 0.085317 | 0.058374 | 0.293669 | 0.293669 | 0.237539 | 0.178716 | 0.178716 | 0.178716 | 0 | 0.001491 | 0.300469 | 3,834 | 105 | 95 | 36.514286 | 0.828859 | 0.029473 | 0 | 0.376344 | 0 | 0 | 0.081336 | 0.01212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.086022 | 0.053763 | 0.387097 | 0.010753 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb74dadff1c9a23928431c6e94223737bbb5a3d3 | 3,741 | py | Python | cml/items.py | DrivePixels/django-cml | eba7b399f234037c33ee975f8fe7162b0b8e1b6f | [
"BSD-3-Clause"
] | 22 | 2016-02-25T08:09:58.000Z | 2022-02-02T07:25:57.000Z | cml/items.py | DrivePixels/django-cml | eba7b399f234037c33ee975f8fe7162b0b8e1b6f | [
"BSD-3-Clause"
] | 9 | 2016-03-02T21:47:14.000Z | 2020-02-24T05:33:02.000Z | cml/items.py | DrivePixels/django-cml | eba7b399f234037c33ee975f8fe7162b0b8e1b6f | [
"BSD-3-Clause"
] | 21 | 2016-03-12T10:12:00.000Z | 2022-01-31T18:54:06.000Z | # -*- coding: utf-8 -
from __future__ import absolute_import
from decimal import Decimal
from datetime import datetime
PROCESSED_ITEMS = ('Group', 'PropertyVariant', 'Property', 'PropertyVariant', 'Sku', 'Tax', 'Product', 'Offer', 'Order')
class BaseItem(object):
def __init__(self, xml_element=None):
self.xml_element = xml_element
class Group(BaseItem):
def __init__(self, *args, **kwargs):
super(Group, self).__init__(*args, **kwargs)
self.id = u''
self.name = u''
self.groups = []
class Property(BaseItem):
def __init__(self, *args, **kwargs):
super(Property, self).__init__(*args, **kwargs)
self.id = u''
self.name = u''
self.value_type = u''
self.for_products = False
class PropertyVariant(BaseItem):
def __init__(self, *args, **kwargs):
super(PropertyVariant, self).__init__(*args, **kwargs)
self.id = u''
self.value = u''
self.property_id = u''
class Sku(BaseItem):
def __init__(self, *args, **kwargs):
super(Sku, self).__init__(*args, **kwargs)
self.id = u''
self.name = u''
self.name_full = u''
self.international_abbr = u''
class Tax(BaseItem):
def __init__(self, *args, **kwargs):
super(Tax, self).__init__(*args, **kwargs)
self.name = u''
self.value = Decimal()
class AdditionalField(BaseItem):
def __init__(self, *args, **kwargs):
super(AdditionalField, self).__init__(*args, **kwargs)
self.name = u''
self.value = u''
class Product(BaseItem):
def __init__(self, *args, **kwargs):
super(Product, self).__init__(*args, **kwargs)
self.id = u''
self.name = u''
self.sku_id = u''
self.group_ids = []
self.properties = []
self.tax_name = u''
self.image_path = u''
self.additional_fields = []
class PriceType(BaseItem):
def __init__(self, *args, **kwargs):
super(PriceType, self).__init__(*args, **kwargs)
self.id = u''
self.name = u''
self.currency = u''
self.tax_name = u''
self.tax_in_sum = False
class Price(BaseItem):
def __init__(self, *args, **kwargs):
super(Price, self).__init__(*args, **kwargs)
self.representation = u''
self.price_type_id = u''
self.price_for_sku = Decimal()
self.currency_name = u''
self.sku_name = u''
self.sku_ratio = Decimal()
class Offer(BaseItem):
def __init__(self, *args, **kwargs):
super(Offer, self).__init__(*args, **kwargs)
self.id = u''
self.name = u''
self.sku_id = u''
self.prices = []
class Client(BaseItem):
def __init__(self):
self.id = u''
self.name = u''
self.role = u'Покупатель'
self.full_name = u''
self.first_name = u''
self.last_name = u''
self.address = u''
class OrderItem(BaseItem):
def __init__(self):
self.id = u''
self.name = u''
self.sku = Sku(None)
self.price = Decimal()
self.quant = Decimal()
self.sum = Decimal()
class Order(BaseItem):
def __init__(self, *args, **kwargs):
super(Order, self).__init__(*args, **kwargs)
self.id = u''
self.number = u''
self.date = datetime.now().date()
self.currency_name = u''
self.currency_rate = Decimal()
self.operation = u'Заказ товара'
self.role = u'Продавец'
self.sum = Decimal()
self.client = Client()
self.time = datetime.now().time()
self.comment = u''
self.items = []
self.additional_fields = []
| 23.980769 | 120 | 0.562416 | 437 | 3,741 | 4.503432 | 0.173913 | 0.099085 | 0.082317 | 0.125508 | 0.474594 | 0.423272 | 0.423272 | 0.233232 | 0.202236 | 0.16565 | 0 | 0.000375 | 0.287624 | 3,741 | 155 | 121 | 24.135484 | 0.738086 | 0.005079 | 0 | 0.405405 | 0 | 0 | 0.025806 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126126 | false | 0 | 0.027027 | 0 | 0.279279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb75ae95c3e5134443dc193cf504095490baedbf | 1,216 | py | Python | 命名实体识别/hmm/model_utils.py | zhangdddong/beautifulNLP | 295987cc03c9afb52008917d9d141fdb2eb66ba5 | [
"Apache-2.0"
] | 10 | 2019-12-25T12:52:39.000Z | 2021-06-22T09:02:34.000Z | 命名实体识别/hmm/model_utils.py | zhangdddong/beautifulNLP | 295987cc03c9afb52008917d9d141fdb2eb66ba5 | [
"Apache-2.0"
] | null | null | null | 命名实体识别/hmm/model_utils.py | zhangdddong/beautifulNLP | 295987cc03c9afb52008917d9d141fdb2eb66ba5 | [
"Apache-2.0"
] | 4 | 2020-09-28T04:17:11.000Z | 2021-06-03T06:16:02.000Z | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
__author__ = 'zd'
import numpy as np
def log(v):
if v == 0:
return np.log(v + 0.00000001)
return np.log(v)
def get_matrix(word2id, tag2id, id2tag, file_path):
"""
:param word2id:
:param tag2id:
:param id2tag:
:param file_path:
:return:
"""
num_words = len(word2id) # 训练集中词的数量
num_tags = len(tag2id) # 训练集中的标签的数量
pi = np.zeros(num_tags) # 初始状态概率矩阵
A = np.zeros((num_tags, num_words)) # 发射概率矩阵
B = np.zeros((num_tags, num_tags)) # 状态转移概率矩阵
# 计算矩阵中的对应数据出现的次数
prev_tag = ''
for line in open(file_path, encoding='UTF-8'):
items = line.split(' ')
wordId, tagId = word2id[items[0]], tag2id[items[1].rstrip()]
if prev_tag == '':
pi[tagId] += 1
A[tagId][wordId] += 1
else:
A[tagId][wordId] += 1
B[tag2id[prev_tag]][tagId] += 1
if items[0] == '.':
prev_tag = ''
else:
prev_tag = id2tag[tagId]
# normalize 将统计的个数化成概率
pi = pi / sum(pi)
for i in range(num_tags):
A[i] /= sum(A[i])
B[i] /= sum(B[i])
# 到此为止,计算完了模型的所有参数:pi A B
return pi, A, B
| 23.384615 | 68 | 0.527961 | 164 | 1,216 | 3.786585 | 0.378049 | 0.067633 | 0.048309 | 0.067633 | 0.05475 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038554 | 0.317434 | 1,216 | 51 | 69 | 23.843137 | 0.709639 | 0.179276 | 0 | 0.193548 | 0 | 0 | 0.009404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.032258 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb77aed594b4e453552d28f4c6e0beaa38475f69 | 5,463 | py | Python | timesheet.py | ZJAllen/timesheet | ebe847b0fc6ee6ba22a35233d52ce9f0b82695e9 | [
"MIT"
] | null | null | null | timesheet.py | ZJAllen/timesheet | ebe847b0fc6ee6ba22a35233d52ce9f0b82695e9 | [
"MIT"
] | null | null | null | timesheet.py | ZJAllen/timesheet | ebe847b0fc6ee6ba22a35233d52ce9f0b82695e9 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import calendar
# import appex
# import ui
import os
class Filenames():
timesheet = 'timesheet.txt'
monday = 'monday.txt'
tuesday = 'tuesday.txt'
wednesday = 'wednesday.txt'
thursday = 'thursday.txt'
friday = 'friday.txt'
# Create insance of Filenames class
files = Filenames()
class Clock():
timeIn = None
timeOut = None
lunch = 0
currentWeekday = ''
currentDate = ''
monday = 0
tuesday = 0
wednesday = 0
thursday = 0
friday = 0
def setDayMinutes(self, day, mins):
if day == 'monday':
self.monday = mins
if day == 'tuesday':
self.tuesday = mins
if day == 'wednesday':
self.wednesday = mins
if day == 'thursday':
self.thursday = mins
if day == 'friday':
self.friday = mins
def getWeekTotal(self):
return self.monday + self.tuesday + self.wednesday + self.thursday + self.friday
# Create instance of Clock class
clock = Clock()
### Function Definitions ###
# Gets current total week time from all individual time sheets.
def getTotalWeekTime(today_int):
for i in range(today_int, -1, -1):
weekday = calendar.day_name[i].lower()
with open(f'{weekday}.txt', 'r') as f:
dayMinutes = int(f.readlines()[-1].strip().replace('Total: ',''))
clock.setDayMinutes(weekday, dayMinutes)
return clock.getWeekTotal()
# Convert clock in and out times into hours and minutes worked
def parseWorkTime():
workDuration = clock.timeOut - clock.timeIn - timedelta(minutes=clock.lunch)
workHours = int(workDuration.seconds/3600)
workMinutes = int((workDuration.seconds/60) - (workHours*60))
workDurationMinutes = (workHours * 60) + workMinutes
return workDurationMinutes, workHours, workMinutes
# Reset the clock in/out time at the end of the day.
# This is used for pseudo-error checking
def resetTime():
clock.timeIn = None
clock.timeOut = None
def resetWeek():
clock.monday = 0
clock.tuesday = 0
clock.wednesday = 0
clock.thursday = 0
clock.friday = 0
for i in range(0, 4, 1):
weekday = calendar.day_name[i].lower()
os.remove(f'{weekday}.txt')
def writeToFile(filename, msg):
with open(filename, 'a+') as f:
f.write(msg)
f.write('\n')
def getFileName(day_int):
return f'{calendar.day_name[day_int].lower()}.txt'
def getLastLine(filename):
with open(filename, 'r') as f:
lastLine = f.readlines()[-1].strip()
return lastLine
def existClockIn():
try:
weekday = datetime.now().weekday()
# weekday = datetime(2019, 12, 6, 7, 30).weekday()
with open(f'{getFileName(weekday)}', 'r') as f:
f.readlines()[0]
return True
except:
return False
def clockIn():
# clock.timeIn = datetime(2019, 12, 6, 7, 30)
clock.timeIn = datetime.now()
clock.currentWeekday = calendar.day_name[clock.timeIn.weekday()]
clock.currentDate = f'{clock.timeIn.month}/{clock.timeIn.day}/{clock.timeIn.year}'
writeToFile(files.timesheet, f'{clock.currentWeekday} {clock.currentDate}')
clockString = f'Clock In: {clock.timeIn.hour}:{clock.timeIn.minute}'
writeToFile(files.timesheet, clockString)
writeToFile(getFileName(clock.timeIn.weekday()), clockString)
return clockString
def getClockInTime():
weekday = datetime.now().weekday()
# weekday = datetime(2019, 12, 6, 7, 30).weekday()
clockTime = getLastLine(getFileName(weekday)).replace('Clock In: ', '').split(':')
year = datetime.now().year
# year = 2019
month = datetime.now().month
# month = 12
day = datetime.now().day
# day = 2
hour = int(clockTime[0])
minute = int(clockTime[1])
clock.timeIn = datetime(year, month, day, hour, minute)
clock.currentWeekday = calendar.day_name[clock.timeIn.weekday()]
def clockOut():
clock.timeOut = datetime.now()
# clock.timeOut = datetime(2019, 12, 6, 16, 15)
clockString = f'Clock Out: {clock.timeOut.hour}:{clock.timeOut.minute}'
writeToFile(files.timesheet, clockString)
writeToFile(getFileName(clock.timeOut.weekday()), clockString)
(totalMinutes, workHours, workMinutes) = parseWorkTime()
writeToFile(files.timesheet, f'Total: {int(totalMinutes/60)}:{int(totalMinutes%60)}')
writeToFile(getFileName(clock.timeOut.weekday()), f'Total: {totalMinutes}')
#clock.setDayMinutes(clock.currentWeekday.lower(), totalMinutes)
weekTotal = getTotalWeekTime(clock.timeOut.weekday())
writeToFile(files.timesheet, f'Total Week Time: {int(weekTotal/60)}:{int(weekTotal%60)}\n')
if clock.timeOut.weekday() == 4:
resetWeek()
resetTime()
return clockString
def processClock(msg):
if msg == 'in':
if not existClockIn():
clockString = clockIn()
else:
clockString = 'You are already clocked in!'
if msg == 'out':
if (clock.timeIn is not None) and (clock.timeOut is None):
clockString = clockOut()
elif existClockIn():
getClockInTime()
clockString = clockOut()
else:
clockString = 'You are not yet clocked in!'
return clockString
def main():
print(processClock('in'))
print(processClock('out'))
if __name__ == '__main__':
main() | 26.014286 | 95 | 0.631887 | 629 | 5,463 | 5.461049 | 0.228935 | 0.048035 | 0.0131 | 0.017467 | 0.159825 | 0.125764 | 0.120524 | 0.103639 | 0.033188 | 0.033188 | 0 | 0.021128 | 0.237598 | 5,463 | 210 | 96 | 26.014286 | 0.803601 | 0.110562 | 0 | 0.113636 | 0 | 0 | 0.129626 | 0.06471 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.022727 | 0.015152 | 0.348485 | 0.015152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb78e07f4d675ee939bd0e8ed2ba068efa19f7f3 | 814 | py | Python | 4_Eventos/1_actionChains.py | Gabriel-limadev/selenium_study_with_python | 593a4df74cda624b8548d6e7eca4fe2685fd93f2 | [
"MIT"
] | null | null | null | 4_Eventos/1_actionChains.py | Gabriel-limadev/selenium_study_with_python | 593a4df74cda624b8548d6e7eca4fe2685fd93f2 | [
"MIT"
] | null | null | null | 4_Eventos/1_actionChains.py | Gabriel-limadev/selenium_study_with_python | 593a4df74cda624b8548d6e7eca4fe2685fd93f2 | [
"MIT"
] | null | null | null | from time import sleep
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
# Usando ActionChains
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
# Configurando a página
url = 'https://selenium.dunossauro.live/aula_08_a'
b = Chrome()
b.get(url)
b.maximize_window()
text = 'Gabriel'
# Hi-level
element = b.find_element(By.NAME, 'texto')
# text.send_keys(text)
# Low-level
# Configurando ActionChains
ac = ActionChains(b)
ac.move_to_element(element)
ac.click(element)
def digita_com(key):
for l in text:
ac.key_down(key)
ac.key_down(l)
ac.key_up(l)
ac.key_up(key)
digita_com(Keys.SHIFT)
digita_com((Keys.UP))
# perform é o executador de tudo que envolve actionChains
ac.perform() | 23.257143 | 64 | 0.744472 | 125 | 814 | 4.728 | 0.472 | 0.081218 | 0.142132 | 0.137056 | 0.13198 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002886 | 0.148649 | 814 | 35 | 65 | 23.257143 | 0.849928 | 0.201474 | 0 | 0 | 0 | 0 | 0.083981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |