hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fcff3d85fe99542b6a26eb81efd2192cd0645652 | 622 | py | Python | src/mono_test.py | hb-jones/rp1-ros | afc2888f5da7a2b4cc6d9e07ce017f26996e58c3 | [
"Apache-2.0"
] | null | null | null | src/mono_test.py | hb-jones/rp1-ros | afc2888f5da7a2b4cc6d9e07ce017f26996e58c3 | [
"Apache-2.0"
] | null | null | null | src/mono_test.py | hb-jones/rp1-ros | afc2888f5da7a2b4cc6d9e07ce017f26996e58c3 | [
"Apache-2.0"
] | null | null | null | import time, cv2, json
from vision.monocular import Monocular, test_publisher_pixel_coordinate
def test_func(var):
return
def main():
cam = Monocular(test_publisher_pixel_coordinate)
#cam = Monocular(test_func)
cam.start_loop()
cam.debug_mode = True
cam.debug_type = "cont"
time.sleep(3)
while True:
cv2.imshow("Colour Image", cam.debug_frame_output)
# Press q if you want to end the loop
time.sleep(0.1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.loop_running = False
if __name__ == "__main__":
main()
| 23.923077 | 72 | 0.622186 | 83 | 622 | 4.39759 | 0.590361 | 0.106849 | 0.120548 | 0.147945 | 0.20274 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017817 | 0.278135 | 622 | 25 | 73 | 24.88 | 0.7951 | 0.099678 | 0 | 0 | 0 | 0 | 0.046904 | 0 | 0 | 0 | 0.007505 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0.055556 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcffe22efcddd524aff30bbd278fd51ef10be4d7 | 1,301 | py | Python | modules/input_output.py | hhandika/journal-unlocker | b35133565c991688c726643949a102beefdedfc8 | [
"MIT"
] | 1 | 2020-11-29T08:10:16.000Z | 2020-11-29T08:10:16.000Z | modules/input_output.py | hhandika/journal-unlocker | b35133565c991688c726643949a102beefdedfc8 | [
"MIT"
] | null | null | null | modules/input_output.py | hhandika/journal-unlocker | b35133565c991688c726643949a102beefdedfc8 | [
"MIT"
] | null | null | null | import webbrowser as wb
from modules import converter
class UserInput(converter.LinkConverter):
def __init__(self, libs, url):
self.libs = libs
self.url = url
def _invalid_input(self):
print(f"\x1b[0;31mERROR: \x1b[0m{self.url} is invalid URL\n")
print("Please, enter a valid url or doi name.")
print("The url should start with 'http'")
print("If it is a doi name, the prefix should start with '10.'")
def _convert_doi(self):
doi = 'https://doi.org/'
link = doi + self.url
access_link = converter.LinkConverter(link, self.libs)
wb.open(access_link.convert_url())
def _convert_http(self):
access_link = converter.LinkConverter(self.url, self.libs)
wb.open(access_link.convert_url())
def _convert_url(self):
if self.url.startswith('10'):
self._convert_doi()
elif self.url.startswith('http'):
self._convert_http()
else:
self._invalid_input()
exit()
def _is_empty_input(self):
self.url = input("Enter url/doi:")
self._convert_url()
def check_user_input(self):
if self.url is None:
self._is_empty_input()
else:
self._convert_url() | 30.255814 | 72 | 0.597233 | 169 | 1,301 | 4.384615 | 0.325444 | 0.075574 | 0.052632 | 0.08637 | 0.118758 | 0.118758 | 0.118758 | 0.118758 | 0.118758 | 0.118758 | 0 | 0.010823 | 0.289777 | 1,301 | 43 | 73 | 30.255814 | 0.791126 | 0 | 0 | 0.171429 | 0 | 0.028571 | 0.162826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.057143 | 0 | 0.285714 | 0.114286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e011925c6302c4ad1c6ff5f9211ec00aafbb0c5 | 959 | py | Python | tests/file_iterators/test_trillian_xml.py | mtlynch/chat_unifier | 7449ca4f2dd48d8b76fc29e150643076dd0b3334 | [
"MIT"
] | 2 | 2018-10-16T18:39:06.000Z | 2019-01-22T01:38:09.000Z | tests/file_iterators/test_trillian_xml.py | mtlynch/chat_unifier | 7449ca4f2dd48d8b76fc29e150643076dd0b3334 | [
"MIT"
] | 2 | 2018-10-19T00:00:21.000Z | 2018-10-19T17:15:25.000Z | tests/file_iterators/test_trillian_xml.py | mtlynch/chat_unifier | 7449ca4f2dd48d8b76fc29e150643076dd0b3334 | [
"MIT"
] | null | null | null | import os
import unittest
import mock
from chat_unifier.file_iterators import trillian_xml
class TrillianXmlFileIteratorTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_picks_correct_trillian_log_files(self):
with mock.patch.object(os, 'walk') as mock_walk:
mock_walk.return_value = [
('/logs', ('AIM', 'junk'), ('README.txt',)),
('/logs/AIM', ('Query',), ()),
('/logs/AIM/Query', (),
('DummyBuddy123.xml', 'DummyBuddy123-assets.xml',
'DummyBuddy123.log', 'DummyBuddy234.xml',
'DummyBuddy234-assets.xml', 'DummyBuddy234.log')),
('/logs/junk', (), ('junk.png',)),
]
self.assertEqual([
'/logs/AIM/Query/DummyBuddy123.xml',
'/logs/AIM/Query/DummyBuddy234.xml'
], [f for f in trillian_xml.iterate_files('/logs')])
| 33.068966 | 68 | 0.552659 | 95 | 959 | 5.442105 | 0.484211 | 0.067698 | 0.092843 | 0.096712 | 0.108317 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035451 | 0.294056 | 959 | 28 | 69 | 34.25 | 0.728213 | 0 | 0 | 0 | 0 | 0 | 0.271116 | 0.118874 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e037702ba339edd20a07331a6e5b456dd7810a0 | 952 | py | Python | SpriteSheetPacker/packer.py | ithilelda/ModLoaderLite | 14899f8d1a406563600c2abe76ac1a2c58c6135d | [
"MIT"
] | 1 | 2022-01-19T05:16:02.000Z | 2022-01-19T05:16:02.000Z | SpriteSheetPacker/packer.py | ithilelda/ModLoaderLite | 14899f8d1a406563600c2abe76ac1a2c58c6135d | [
"MIT"
] | null | null | null | SpriteSheetPacker/packer.py | ithilelda/ModLoaderLite | 14899f8d1a406563600c2abe76ac1a2c58c6135d | [
"MIT"
] | null | null | null | import os
import math
from PIL import Image
def pack_directory(path):
def name_comparer(entry):
return int(entry.name.strip('image .png'))
files = [e for e in os.scandir(path) if e.name.endswith('png') and not e.name.startswith('output')]
files.reverse()
images = [Image.open(e.path) for e in files]
x = max([i.size[0] for i in images])
y = max([i.size[1] for i in images])
column = 32
row = math.ceil(len(images) / column)
output = Image.new('RGBA', (x * column, y * row))
count = 0
for i in images:
c = count % column
r = count // column
x_center = c * x + x // 2
y_center = r * y + y // 2
width = i.size[0]
height = i.size[1]
output.paste(i, (x_center - width // 2, y_center - height // 2))
count += 1
output_name = os.path.join(path, 'output.png')
output.save(output_name)
path = r'E:\export\fuwen'
pack_directory(path) | 28.848485 | 103 | 0.582983 | 152 | 952 | 3.592105 | 0.375 | 0.03663 | 0.032967 | 0.065934 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017291 | 0.271008 | 952 | 33 | 104 | 28.848485 | 0.769452 | 0 | 0 | 0 | 0 | 0 | 0.050367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0.035714 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e047aa179f50c2fad3a719fb925ab7747594f6a | 9,184 | py | Python | basedir/__init__.py | fenhl/python-xdg-basedir | 17032de6c9728ddba5c40a983c50088d69dc631e | [
"MIT"
] | null | null | null | basedir/__init__.py | fenhl/python-xdg-basedir | 17032de6c9728ddba5c40a983c50088d69dc631e | [
"MIT"
] | null | null | null | basedir/__init__.py | fenhl/python-xdg-basedir | 17032de6c9728ddba5c40a983c50088d69dc631e | [
"MIT"
] | null | null | null | import collections.abc
import json
import os
import os.path
import pathlib
import subprocess
def parse_version_string():
path = pathlib.Path(__file__).resolve().parent # go up one level, from repo/basedir.py to repo, where README.md is located
try:
version = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=str(path)).decode('utf-8').strip('\n')
if version == 'master':
try:
with (path / 'README.md').open() as readme:
for line in readme.read().splitlines():
if line.startswith('This is `python-xdg-basedir` version '):
return line.split(' ')[4]
except:
pass
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=str(path)).decode('utf-8').strip('\n')
except:
pass
__version__ = parse_version_string()
class BaseDirFile(collections.abc.Sequence):
def __init__(self, paths, filename, flags='r'):
self.paths = [pathlib.Path(p) for p in paths]
self.filename = filename
self.flags = 'r'
def __enter__(self):
self.fobj = (self.path / self.filename).open(self.flags)
return self.fobj
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.fobj.close()
self.fobj = None
else:
try:
self.fobj.close()
finally:
pass
return False
def __getitem__(self, value):
if isinstance(value, slice):
return [path / self.filename for path in self.paths[value]]
else:
return self.paths[value] / self.filename
def __iter__(self):
for path in self.paths:
yield path / self.filename
def __len__(self):
return len(self.paths)
def __str__(self):
return ':'.join(str(path / self.filename) for path in self.paths)
def lazy_json(self, existing_only=False, readable_only=False, writeable_only=False, default=None, *, init=False):
"""Return a lazyjson object representing the file(s). Requires the lazyjson module.
Optional arguments:
existing_only -- If true, exclude files from the multifile which don't exist at the time of the call. Defaults to False.
readable_only -- If true, exclude files from the multifile for which opening in read mode fails at the time of the call. Defaults to False.
writeable_only -- If true, exclude files from the multifile for which opening in write mode fails at the time of the call. Defaults to False.
default -- A JSON-encodable Python object which is appended to the end of the multifile as a lazyjson.PythonFile, and can be used to provide default values for config files. Defaults to None.
Keyword-only arguments:
init -- If true, create the file on the first path if none of the files exists, and write the “default” argument to it. Defaults to False.
Returns:
A lazyjson.MultiFile created from the paths of this file.
Raises:
ImportError for lazyjson.
"""
import lazyjson
paths = []
for path in self.paths:
if existing_only and not (path / self.filename).exists():
continue
if readable_only:
try:
(path / self.filename).open().close()
except IOError:
continue
if writeable_only:
try:
(path / self.filename).open('a').close()
except IOError:
continue
paths.append(path / self.filename)
if init and not any((path / self.filename).exists() for path in self.paths):
for path in self.paths:
try:
path.mkdir(parents=True, exist_ok=True)
with (path / self.filename).open('w') as f:
json.dump(default, f, indent=4, sort_keys=True)
print(file=f)
except IOError:
continue
else:
break
paths.append(lazyjson.PythonFile(default))
return lazyjson.MultiFile(*paths)
def json(self, base=None):
def patch_json(base, new):
new_json = json.load(new)
if type(new_json) is dict:
if type(base) is not dict:
return new_json
base.update(new_json)
return base
elif type(new_json) is list:
if type(base) is not list:
return new_json
return base + new_json
else:
return new_json
return self.read(patch=patch_json, base=base)
async def json_async(self, base=None):
async def patch_json_async(base, new):
new_json = json.loads(await new.read())
if type(new_json) is dict:
if type(base) is not dict:
return new_json
base.update(new_json)
return base
elif type(new_json) is list:
if type(base) is not list:
return new_json
return base + new_json
else:
return new_json
return await self.read_async(patch=patch_json_async, base=base)
@property
def path(self):
for iter_path in self.paths:
if (iter_path / self.filename).exists():
return iter_path
def read(self, patch=None, base=None):
"""If patch is None (the default), this returns the contents of the first found file.
If patch is not None, it must be a function of the form patch(base, new). This function will then read all existing files in reverse order, and call the patch function with the results of the last call as the first argument, and a file object representing the current file as the second argument. The end result is returned.
"""
if patch is None:
for path in self.paths:
if (path / self.filename).exists():
with (path / self.filename).open() as f:
return f.read()
else:
for path in reversed(self.paths):
if (path / self.filename).exists():
with (path / self.filename).open() as new:
base = patch(base, new)
return base
async def read_async(self, patch=None, base=None):
"""If patch is None (the default), this returns the contents of the first found file.
If patch is not None, it must be a coroutine of the form patch(base, new). This coroutine will then read all existing files in reverse order, and call the patch coroutine with the results of the last call as the first argument, and an aiofiles async file object representing the current file as the second argument. The end result is returned.
"""
import aiofiles
if patch is None:
for path in self.paths:
if (path / self.filename).exists():
async with aiofiles.open(path / self.filename) as f:
return await f.read()
else:
for path in reversed(self.paths):
if (path / self.filename).exists():
async with aiofiles.open(path / self.filename) as new:
base = await patch(base, new)
return base
class BaseDir:
def __call__(self, filename, flags='r'):
return BaseDirFile([self.path], filename, flags=flags)
def __init__(self, envar, default):
self.path = pathlib.Path(os.environ.get(envar) or default)
def __str__(self):
return str(self.path)
def config(self, filename):
return Config(self(filename))
class BaseDirs:
def __call__(self, filename, flags='r'):
return BaseDirFile([self.home] + self.paths, filename, flags=flags)
def __init__(self, envar, default, home):
if isinstance(home, BaseDir):
self.home = home.path
else:
self.home = pathlib.Path(home)
self.paths = os.environ.get(envar) or default
if isinstance(self.paths, str):
self.paths = [pathlib.Path(p) for p in self.paths.split(':')]
def __iter__(self):
yield self.home
for path in self.paths:
yield path
def __str__(self, include_home=False):
paths = ([self.home] if include_home else []) + list(self.paths)
return ':'.join(str(p) for p in paths)
def config(self, filename):
return Config(self(filename))
data_home = BaseDir('XDG_DATA_HOME', pathlib.Path.home() / '.local' / 'share')
config_home = BaseDir('XDG_CONFIG_HOME', pathlib.Path.home() / '.config')
data_dirs = BaseDirs('XDG_DATA_DIRS', ['/usr/local/share', '/usr/share'], data_home)
config_dirs = BaseDirs('XDG_CONFIG_DIRS', ['/etc/xdg'], config_home)
cache_home = BaseDir('XDG_CACHE_HOME', pathlib.Path.home() / '.cache')
| 39.416309 | 351 | 0.582753 | 1,171 | 9,184 | 4.457728 | 0.179334 | 0.062069 | 0.058238 | 0.028736 | 0.472797 | 0.44387 | 0.402107 | 0.382184 | 0.318582 | 0.283142 | 0 | 0.000643 | 0.323062 | 9,184 | 232 | 352 | 39.586207 | 0.83899 | 0.157557 | 0 | 0.420455 | 0 | 0 | 0.035605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.017045 | 0.045455 | 0.039773 | 0.369318 | 0.005682 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e054a3c6b84d4385ce18db1a386851ca484d0f1 | 2,861 | py | Python | convert_to_tf_records.py | andreweskeclarke/toxic_kaggle | e761f4aad0688423bdbe4098fe7efddddea4b0d9 | [
"MIT"
] | null | null | null | convert_to_tf_records.py | andreweskeclarke/toxic_kaggle | e761f4aad0688423bdbe4098fe7efddddea4b0d9 | [
"MIT"
] | null | null | null | convert_to_tf_records.py | andreweskeclarke/toxic_kaggle | e761f4aad0688423bdbe4098fe7efddddea4b0d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import collections
import csv
import tensorflow as tf
import numpy as np
from google_research.bert.tokenizer import FullTokenizer
def main():
MAX_SEQUENCE_LENGTH = 512
parser = argparse.ArgumentParser()
parser.add_argument('--do_lower_case', action='store_true')
parser.add_argument('--input_file', help='A file containing many lines for tokenization',
type=str)
parser.add_argument('--output_training_file', help='The output TF Record file',
type=str)
parser.add_argument('--output_validation_file', help='The output TF Record file',
type=str)
parser.add_argument('--validation_ratio', help='The output TF Record file',
type=float)
parser.add_argument('--vocab_file', help='A file containing the dictionary for tokenization',
type=str, default='models/vocab.txt')
args = parser.parse_args()
tokenizer = FullTokenizer(args.vocab_file, args.do_lower_case)
training_writer = tf.python_io.TFRecordWriter(args.output_training_file)
validation_writer = tf.python_io.TFRecordWriter(args.output_validation_file)
with open(args.input_file) as f:
for i, row in enumerate(csv.reader(f)):
if i == 0: continue
tokens = []
tokens.append("[CLS]")
tokens.extend(tokenizer.tokenize(row[1])[0:(MAX_SEQUENCE_LENGTH-2)])
tokens.append("[SEP]")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < MAX_SEQUENCE_LENGTH:
input_ids.append(0)
mask.append(0)
segment_ids.append(0)
targets = list([int(i) for i in row[2:]])
features = collections.OrderedDict()
features["input_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(input_ids)))
features["input_mask"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(mask)))
features["segment_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(segment_ids)))
features["label_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(targets)))
features["is_real_example"] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
if np.random.random() > args.validation_ratio:
training_writer.write(tf_example.SerializeToString())
else:
validation_writer.write(tf_example.SerializeToString())
training_writer.close()
validation_writer.close()
if __name__ == '__main__':
main()
| 47.683333 | 110 | 0.645928 | 350 | 2,861 | 5.065714 | 0.314286 | 0.047377 | 0.05753 | 0.053582 | 0.347434 | 0.279752 | 0.259447 | 0.19797 | 0.19797 | 0.173153 | 0 | 0.016026 | 0.236631 | 2,861 | 59 | 111 | 48.491525 | 0.795788 | 0.00734 | 0 | 0.055556 | 0 | 0 | 0.130328 | 0.016203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.111111 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e05fc418915f277ab4a685fd9b838701e4c7fe8 | 14,246 | py | Python | fedot/core/operations/evaluation/operation_implementations/data_operations/sklearn_transformations.py | vkirilenko/FEDOT | d287d899e47fe5aebf52c12733892e103b75842b | [
"BSD-3-Clause"
] | 1 | 2021-11-09T10:24:38.000Z | 2021-11-09T10:24:38.000Z | fedot/core/operations/evaluation/operation_implementations/data_operations/sklearn_transformations.py | vkirilenko/FEDOT | d287d899e47fe5aebf52c12733892e103b75842b | [
"BSD-3-Clause"
] | null | null | null | fedot/core/operations/evaluation/operation_implementations/data_operations/sklearn_transformations.py | vkirilenko/FEDOT | d287d899e47fe5aebf52c12733892e103b75842b | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional
import numpy as np
from sklearn.decomposition import KernelPCA, PCA
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, PolynomialFeatures, StandardScaler
from fedot.core.data.data import InputData
from fedot.core.data.data import data_has_categorical_features, divide_data_categorical_numerical, str_columns_check
from fedot.core.operations.evaluation.operation_implementations. \
implementation_interfaces import DataOperationImplementation, EncodedInvariantImplementation
class ComponentAnalysisImplementation(DataOperationImplementation):
""" Class for applying PCA and kernel PCA models form sklearn
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
self.pca = None
self.params = None
self.amount_of_features = None
def fit(self, input_data):
"""
The method trains the PCA model
:param input_data: data with features, target and ids for PCA training
:return pca: trained PCA model (optional output)
"""
self.amount_of_features = np.array(input_data.features).shape[1]
if self.amount_of_features > 1:
self.check_and_correct_params()
self.pca.fit(input_data.features)
return self.pca
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):
"""
Method for transformation tabular data using PCA
:param input_data: data with features, target and ids for PCA applying
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return input_data: data with transformed features attribute
"""
if self.amount_of_features > 1:
transformed_features = self.pca.transform(input_data.features)
else:
transformed_features = input_data.features
# Update features
output_data = self._convert_to_output(input_data,
transformed_features)
return output_data
def check_and_correct_params(self) -> None:
""" Method check if amount of features in data enough for n_components
parameter in PCA or not. And if not enough - fixes it
"""
current_parameters = self.pca.get_params()
if type(current_parameters['n_components']) == int:
if current_parameters['n_components'] > self.amount_of_features:
current_parameters['n_components'] = self.amount_of_features
self.pca.set_params(**current_parameters)
self.params = current_parameters
def get_params(self):
return self.pca.get_params()
class PCAImplementation(ComponentAnalysisImplementation):
""" Class for applying PCA from sklearn
:param params: optional, dictionary with the hyperparameters
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.pca = PCA(svd_solver='full', n_components='mle')
else:
self.pca = PCA(**params)
self.params = params
self.amount_of_features = None
class KernelPCAImplementation(ComponentAnalysisImplementation):
""" Class for applying kernel PCA from sklearn
:param params: optional, dictionary with the hyperparameters
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.pca = KernelPCA()
else:
self.pca = KernelPCA(**params)
self.params = params
class OneHotEncodingImplementation(DataOperationImplementation):
""" Class for automatic categorical data detection and one hot encoding """
def __init__(self, **params: Optional[dict]):
super().__init__()
default_params = {
'drop': 'if_binary'
}
if not params:
# Default parameters
self.encoder = OneHotEncoder(**default_params)
else:
self.encoder = OneHotEncoder(**{**params, **default_params})
self.categorical_ids = None
self.non_categorical_ids = None
def fit(self, input_data: InputData):
""" Method for fit encoder with automatic determination of categorical features
:param input_data: data with features, target and ids for encoder training
:return encoder: trained encoder (optional output)
"""
features = input_data.features
categorical_ids, non_categorical_ids = str_columns_check(features)
# Indices of columns with categorical and non-categorical features
self.categorical_ids = categorical_ids
self.non_categorical_ids = non_categorical_ids
if len(categorical_ids) == 0:
pass
else:
categorical_features = np.array(features[:, categorical_ids])
self.encoder.fit(categorical_features)
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):
"""
The method that transforms the categorical features in the original
dataset, but does not affect the rest features
:param input_data: data with features, target and ids for transformation
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: output data with transformed features table
"""
features = input_data.features
if len(self.categorical_ids) == 0:
# If there are no categorical features in the table
transformed_features = features
else:
# If categorical features are exists
transformed_features = self._make_new_table(features)
# Update features
output_data = self._convert_to_output(input_data,
transformed_features)
return output_data
def _make_new_table(self, features):
"""
The method creates a table based on categorical and real features
:param features: tabular data for processing
:return transformed_features: transformed features table
"""
categorical_features = np.array(features[:, self.categorical_ids])
self._check_same_categories(categorical_features)
transformed_categorical = self.encoder.transform(categorical_features).toarray()
# If there are non-categorical features in the data
if len(self.non_categorical_ids) == 0:
transformed_features = transformed_categorical
else:
# Stack transformed categorical and non-categorical data
non_categorical_features = np.array(features[:, self.non_categorical_ids])
frames = (non_categorical_features, transformed_categorical)
transformed_features = np.hstack(frames)
return transformed_features
def _check_same_categories(self, categorical_features):
encoder_unique_categories = sorted(list(np.hstack(self.encoder.categories_)))
features_unique_categories = sorted(np.unique(np.array(categorical_features)))
if encoder_unique_categories != features_unique_categories:
raise ValueError('Category in test data was not exist in train.')
def get_params(self):
return self.encoder.get_params()
class PolyFeaturesImplementation(EncodedInvariantImplementation):
""" Class for application of PolynomialFeatures operation on data,
where only not encoded features (were not converted from categorical using
OneHot encoding) are used
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.operation = PolynomialFeatures(include_bias=False)
else:
# Checking the appropriate params are using or not
poly_params = {k: params[k] for k in
['degree', 'interaction_only']}
self.operation = PolynomialFeatures(include_bias=False,
**poly_params)
self.params = params
def get_params(self):
return self.operation.get_params()
class ScalingImplementation(EncodedInvariantImplementation):
""" Class for application of Scaling operation on data,
where only not encoded features (were not converted from categorical using
OneHot encoding) are used
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.operation = StandardScaler()
else:
self.operation = StandardScaler(**params)
self.params = params
def get_params(self):
return self.operation.get_params()
class NormalizationImplementation(EncodedInvariantImplementation):
""" Class for application of MinMax normalization operation on data,
where only not encoded features (were not converted from categorical using
OneHot encoding) are used
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.operation = MinMaxScaler()
else:
self.operation = MinMaxScaler(**params)
self.params = params
def get_params(self):
return self.operation.get_params()
class ImputationImplementation(DataOperationImplementation):
""" Class for applying imputation on tabular data
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
default_params_categorical = {'strategy': 'most_frequent'}
self.params_cat = {**params, **default_params_categorical}
self.params_num = params
if not params:
# Default parameters
self.imputer_cat = SimpleImputer(**default_params_categorical)
self.imputer_num = SimpleImputer()
else:
self.imputer_cat = SimpleImputer(**self.params_cat)
self.imputer_num = SimpleImputer(**self.params_num)
def fit(self, input_data: InputData):
"""
The method trains SimpleImputer
:param input_data: data with features
:return imputer: trained SimpleImputer model
"""
features_with_replaced_inf = np.where(np.isin(input_data.features,
[np.inf, -np.inf]),
np.nan,
input_data.features)
input_data.features = features_with_replaced_inf
if data_has_categorical_features(input_data):
numerical, categorical = divide_data_categorical_numerical(input_data)
if len(categorical.features.shape) == 1:
self.imputer_cat.fit(categorical.features.reshape(-1, 1))
else:
self.imputer_cat.fit(categorical.features)
if len(numerical.features.shape) == 1:
self.imputer_num.fit(numerical.features.reshape(-1, 1))
else:
self.imputer_num.fit(numerical.features)
else:
if len(input_data.features.shape) == 1:
self.imputer_num.fit(input_data.features.reshape(-1, 1))
else:
self.imputer_num.fit(input_data.features)
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool] = None):
"""
Method for transformation tabular data using SimpleImputer
:param input_data: data with features
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return input_data: data with transformed features attribute
"""
features_with_replaced_inf = np.where(np.isin(input_data.features,
[np.inf, -np.inf]),
np.nan,
input_data.features)
input_data.features = features_with_replaced_inf
if data_has_categorical_features(input_data):
numerical, categorical = divide_data_categorical_numerical(input_data)
if len(categorical.features.shape) == 1:
categorical_features = self.imputer_cat.transform(categorical.features.reshape(-1, 1))
else:
categorical_features = self.imputer_cat.transform(categorical.features)
if len(numerical.features.shape) == 1:
numerical_features = self.imputer_num.transform(numerical.features.reshape(-1, 1))
else:
numerical_features = self.imputer_num.transform(numerical.features)
transformed_features = np.hstack((categorical_features, numerical_features))
else:
if len(input_data.features.shape) == 1:
transformed_features = self.imputer_num.transform(input_data.features.reshape(-1, 1))
else:
transformed_features = self.imputer_num.transform(input_data.features)
output_data = self._convert_to_output(input_data, transformed_features, data_type=input_data.data_type)
return output_data
def fit_transform(self, input_data, is_fit_pipeline_stage: Optional[bool] = None):
"""
Method for training and transformation tabular data using SimpleImputer
:param input_data: data with features
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return input_data: data with transformed features attribute
"""
self.fit(input_data)
output_data = self.transform(input_data)
return output_data
def get_params(self) -> dict:
dictionary = {'imputer_categorical': self.params_cat, 'imputer_numerical': self.params_num}
return dictionary
| 38.711957 | 116 | 0.653727 | 1,536 | 14,246 | 5.835286 | 0.132161 | 0.045186 | 0.03414 | 0.018967 | 0.548142 | 0.507308 | 0.444382 | 0.427982 | 0.382573 | 0.361821 | 0 | 0.002312 | 0.271374 | 14,246 | 367 | 117 | 38.817439 | 0.861175 | 0.240418 | 0 | 0.462687 | 0 | 0 | 0.017469 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119403 | false | 0.004975 | 0.039801 | 0.024876 | 0.258706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e08575a33b1bf77c5ac73909e8b814796a5d007 | 421 | py | Python | json_extract.py | Puratinamu/Application-Document-Generator | 6e6068fc8f360f8b2ccd15ca66d028ad37055593 | [
"Apache-2.0"
] | 1 | 2019-03-01T18:10:56.000Z | 2019-03-01T18:10:56.000Z | json_extract.py | Puratinamu/Application-Document-Generator | 6e6068fc8f360f8b2ccd15ca66d028ad37055593 | [
"Apache-2.0"
] | null | null | null | json_extract.py | Puratinamu/Application-Document-Generator | 6e6068fc8f360f8b2ccd15ca66d028ad37055593 | [
"Apache-2.0"
] | null | null | null | import json
def extract(filename):
''' (str) -> list of object
Given the filename of a JSON file in the current directory, extract and return
its data.
:param filename: name of the JSON file:
:return data: data extracted from the JSON file:
'''
file = open(filename, 'r')
json_decode = json.load(file)
data = []
for item in json_decode:
data.append(item)
return data
| 24.764706 | 82 | 0.64133 | 60 | 421 | 4.466667 | 0.516667 | 0.089552 | 0.08209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.268409 | 421 | 16 | 83 | 26.3125 | 0.87013 | 0.477435 | 0 | 0 | 0 | 0 | 0.005208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e0a45dc70c943024981f87b2475e50be7bce1eb | 1,990 | py | Python | src/DataExploration/Python/PHE_PracticeProfileIndicators_Analysis.py | shashidarette/healthcare_dataviz | f638a60b3e3f31e68704fe73cce700e01bf25dfe | [
"MIT"
] | null | null | null | src/DataExploration/Python/PHE_PracticeProfileIndicators_Analysis.py | shashidarette/healthcare_dataviz | f638a60b3e3f31e68704fe73cce700e01bf25dfe | [
"MIT"
] | null | null | null | src/DataExploration/Python/PHE_PracticeProfileIndicators_Analysis.py | shashidarette/healthcare_dataviz | f638a60b3e3f31e68704fe73cce700e01bf25dfe | [
"MIT"
] | null | null | null |
# coding: utf-8
# ## This notebook is to analyse the practice indicators available from PHE dataset
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
# General Practice Indicators from PHE (It includes England, CCG level indicators as well)
GPIndicators = pd.read_csv('..\..\..\..\MAIN_PROJECT\Data\PHE\PracticeProfileIndicators.csv', sep=',', encoding = "ISO-8859-1")
GPIndicators.head()
# In[3]:
GPIndicators.columns
# In[4]:
# types of areas
GPIndicators['Area Type'].unique()
# In[9]:
# types of indicators data available
GPIndicators['Indicator Name'].unique()
# In[10]:
# types of indicators data available
GPIndicators['Indicator ID'].unique()
# In[7]:
# Time periods avaoilable
GPIndicators['Time period'].unique()
# In[26]:
# AreaTypeFilters
GPFilter = GPIndicators['Area Type'] == 'GP'
CCGFilter = GPIndicators['Area Type'] == 'CCGs (since 4/2017)'
EnglandFilter = GPIndicators['Area Type'] == 'Country'
# In[27]:
# Data by AreaType
GP_Data = GPIndicators[GPFilter]
CCG_Data = GPIndicators[CCGFilter]
England_Data = GPIndicators[EnglandFilter]
# In[28]:
# Save to CSV
GP_Data.to_csv('PHE_GP_Indicators.csv', sep=',')
# In[29]:
# Save to CSV
CCG_Data.to_csv('PHE_CCG_Indicators.csv', sep=',')
# In[30]:
# Save to CSV
England_Data.to_csv('PHE_ENG_Indicators.csv', sep=',')
# In[36]:
# Extract IMD 2015 data from GP Data
GP_IMD_2k15_Filter = GP_Data['Indicator Name'] == 'Deprivation score (IMD 2015)'
GP_IMD_2k15_Data = GP_Data[GP_IMD_2k15_Filter]
GP_IMD_2k15_Data.to_csv('PHE_GP_IMD_2k15_Indicators.csv', sep=',')
# In[49]:
# Scatter plot of per-patient act cost for UK
IMDColumnFilter = GP_IMD_2k15_Data['Indicator Name'] == 'Deprivation score (IMD 2015)'
IMDData = GP_IMD_2k15_Data[IMDColumnFilter]
x = np.arange(0, IMDData['Indicator Name'].count(), 1)
y = IMDData['Value']
plt.scatter(x, y)
plt.xlabel("Practice Index")
plt.ylabel("IMD Score")
plt.show()
| 16.722689 | 127 | 0.711055 | 288 | 1,990 | 4.767361 | 0.395833 | 0.025492 | 0.045885 | 0.03496 | 0.186453 | 0.16606 | 0.16606 | 0 | 0 | 0 | 0 | 0.039883 | 0.143216 | 1,990 | 118 | 128 | 16.864407 | 0.765396 | 0.273869 | 0 | 0 | 0 | 0 | 0.283286 | 0.111898 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.129032 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e0a8cd86646fb6782dcda7592bb726bc8b672cd | 656 | py | Python | python/agenciesModule.py | leomaurodesenv/travel-dataset-generator | 6d97012ea50ab94d8513cd4d4a5b17d36fa94469 | [
"CC-BY-4.0"
] | 3 | 2019-09-24T20:53:21.000Z | 2021-07-06T17:39:01.000Z | python/agenciesModule.py | leomaurodesenv/travel-dataset-generator | 6d97012ea50ab94d8513cd4d4a5b17d36fa94469 | [
"CC-BY-4.0"
] | null | null | null | python/agenciesModule.py | leomaurodesenv/travel-dataset-generator | 6d97012ea50ab94d8513cd4d4a5b17d36fa94469 | [
"CC-BY-4.0"
] | 2 | 2019-09-24T20:57:11.000Z | 2020-02-28T15:07:55.000Z | #- Import packages
import random
'''
@module: Travel Dataset Generator
@authors: Leonardo Mauro <leomaurodesenv>
@link: https://github.com/Argo-Solutions/travel-dataset-generator GitHub
@license: Creative Commons BY License
@copyright: 2019 Argo Solutions
@access: public
'''
#- Functions
def funcAgencyGenerator(flightTypes):
'''
Generate random agency services, based on predefinitions.
- flightTypes: types of flight
'''
agency = dict()
types = list(flightTypes.copy().keys())
random.shuffle(types)
typesMany = random.randint(1, len(types))
agency['types'] = [types[i] for i in range(typesMany)]
return agency
| 26.24 | 72 | 0.71189 | 75 | 656 | 6.226667 | 0.693333 | 0.055675 | 0.094218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009158 | 0.167683 | 656 | 24 | 73 | 27.333333 | 0.846154 | 0.178354 | 0 | 0 | 0 | 0 | 0.018182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e18059d12840f8a0185aebcabdddc4603c7b343 | 9,891 | py | Python | test/graph/test_graph.py | openeuler-mirror/pkgship | 5aaa4953023fde8ff03892fe5608f0711a26a942 | [
"MulanPSL-1.0"
] | null | null | null | test/graph/test_graph.py | openeuler-mirror/pkgship | 5aaa4953023fde8ff03892fe5608f0711a26a942 | [
"MulanPSL-1.0"
] | null | null | null | test/graph/test_graph.py | openeuler-mirror/pkgship | 5aaa4953023fde8ff03892fe5608f0711a26a942 | [
"MulanPSL-1.0"
] | 1 | 2021-11-20T00:10:53.000Z | 2021-11-20T00:10:53.000Z | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
import os
import operator
from test.cli.depend_commands import DependTestBase
from packageship.application.core.depend import DispatchDepend
from packageship.application.serialize.validate import validate
from packageship.application.serialize.dependinfo import DependSchema
FILES = {
"level-0-binary-installdep": "judy-judy-level-0-binary-installdep.json",
"level-0-source-builddep": "judy-judy-level-0-source-builddep.json",
"level-0-binary-builddep": "judy-gcc-level-0-binary-builddep.json",
"selfdep-info": "judy-info-selfdep.json",
"selfdep-info-binary": "judy-info-selfdep-1.json",
"selfdep-info-self-build": "judy-judy-selfdep-2.json",
"selfdep-info-subpack": "judy-judy-selfdep-3.json",
"selfdep-info-subpack-binary": "judy-judy-selfdep-4.json",
"selfdep-info-self-build-packtype": "judy-judy-selfdep-5.json",
"selfdep-info-self-build-subpack": "judy-judy-selfdep-6.json",
"bedep-packtype": "judy-judy-bedep-1.json",
"bedep-subpack": "judy-judy-bedep-2.json",
"bedep-search-type": "judy-judy-bedep-3.json",
"bedep-search-type-packtype": "judy-judy-bedep-4.json",
"bedep-search-type-subpack": "judy-judy-bedep-5.json",
"bedep-search-type-supack-packtype": "judy-judy-bedep-6.json"
}
class BaseGraph:
"""
Graph dependent base class methods
"""
data_folder = os.path.join(os.path.dirname(__file__), "data")
def _extract_edges_paramter(self, data):
"""
Extract request parameters and compare result values
"""
return data.get("request"), data.get("edges")
def get_depend_result(self, path):
"""
Obtain comparative data
"""
_data = self.read_file_content(path=os.path.join(
self.data_folder, path))
request_param, edges = self._extract_edges_paramter(data=_data)
_param, _ = validate(DependSchema, request_param, load=True)
return _param, edges
def _get_graph_data(self, request_param):
node_name = request_param.pop('node_name')
node_type = request_param.pop('node_type')
depend = DispatchDepend.execute(**request_param)
_graph = depend.depend_info_graph(
source=node_name, package_type=node_type)
return _graph["edges"]
def _order_by(self, graph, key="sourceID"):
_graph = sorted(graph, key=operator.itemgetter(key))
return sorted(_graph, key=operator.itemgetter("targetID"))
def _comparison_results(self, edges, request_param):
_graph = self._get_graph_data(request_param=request_param)
self.assertListEqual(self._order_by(_graph), self._order_by(edges))
class TestInstalldepGraph(DependTestBase, BaseGraph):
"""
The installation depends on the graph test
"""
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
def setUp(self):
super(TestInstalldepGraph, self).setUp()
def test_level_0_binary_installdep(self):
"""
Install dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["level-0-binary-installdep"])
self._comparison_results(edges=edges, request_param=request_param)
def test_level_0_source_installdep(self):
"""
Install dependent graph tests
"""
request_param, _ = validate(DependSchema, {
"packagename": [
"Judy"
],
"depend_type": "installdep",
"node_name": "Judy",
"node_type": "source",
"parameter": {
"db_priority": [
"os-version"
]
}
}, load=True)
self._comparison_results(edges=[], request_param=request_param)
class TestBuilddepGraph(DependTestBase, BaseGraph):
"""
Compile the dependency graph test
"""
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
source_file = "os-version-source.json"
def setUp(self):
super(TestBuilddepGraph, self).setUp()
def test_level_0_binary_builddep(self):
"""
Compile dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["level-0-binary-builddep"])
self._comparison_results(edges=edges, request_param=request_param)
def test_level_0_source_builddep(self):
"""
Compile dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["level-0-source-builddep"])
self._comparison_results(edges=edges, request_param=request_param)
class TestSelfdepGraph(DependTestBase, BaseGraph):
"""
Self dependent graph testing
"""
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
source_file = "os-version-source.json"
package_source_file = "os-version-source-package.json"
def setUp(self):
super(TestSelfdepGraph, self).setUp()
def test_selfdep_info(self):
"""
Self dependent data testing
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_binary(self):
"""
Self dependent binary packet data testing
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-binary"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_self_build(self):
"""
Self dependent selfbuild to true data test
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-self-build"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_subpack(self):
"""
Self dependent subpack to true data test
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_subpack_binary(self):
"""
Self dependent binary package data test with selfbuild being true
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-subpack-binary"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_self_build_packtype(self):
"""
Self dependent binary package data test with selfbuild being true
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-self-build-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_self_build_subpack(self):
"""
Self dependent binary package data test with selfbuild being true
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-self-build-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
class TestBedepGraph(DependTestBase, BaseGraph):
"""
The dependent base class method
"""
binary_file = "os-version-bedepend.json"
source_file = "os-version-source-bedepend.json"
def setUp(self):
super(TestBedepGraph, self).setUp()
def test_bedep_packtype(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_subpack(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type_packtype(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type_subpack(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type_supack_packtype(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type-supack-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
| 35.967273 | 98 | 0.652512 | 1,151 | 9,891 | 5.384883 | 0.159861 | 0.116167 | 0.049371 | 0.0697 | 0.583898 | 0.516618 | 0.502097 | 0.493062 | 0.481284 | 0.481284 | 0 | 0.005202 | 0.222526 | 9,891 | 274 | 99 | 36.09854 | 0.80078 | 0.15873 | 0 | 0.295302 | 0 | 0 | 0.20254 | 0.159056 | 0 | 0 | 0 | 0 | 0.006711 | 1 | 0.174497 | false | 0 | 0.040268 | 0 | 0.355705 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e1b73599dd7ea98dc2b2062486c37f98c1e9a64 | 15,070 | py | Python | gym_electric_motor/physical_systems/physical_systems.py | zizai/gym-electric-motor | 48a0232edf3474e441453126df0f52dc391aed11 | [
"MIT"
] | null | null | null | gym_electric_motor/physical_systems/physical_systems.py | zizai/gym-electric-motor | 48a0232edf3474e441453126df0f52dc391aed11 | [
"MIT"
] | null | null | null | gym_electric_motor/physical_systems/physical_systems.py | zizai/gym-electric-motor | 48a0232edf3474e441453126df0f52dc391aed11 | [
"MIT"
] | null | null | null | import numpy as np
from gym.spaces import Box
from ..core import PhysicalSystem
from ..physical_systems import electric_motors as em, mechanical_loads as ml, converters as cv, \
voltage_supplies as vs, noise_generators as ng, solvers as sv
from ..utils import instantiate, set_state_array
class SCMLSystem(PhysicalSystem):
"""
The SCML(Supply-Converter-Motor-Load)-System is used for the simulation of a technical setting consisting of these
components as well as a noise generator and a solver for the electrical ODE of the motor and mechanical ODE of the
load.
"""
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
@property
def limits(self):
return self._limits
@property
def nominal_state(self):
return self._nominal_state
@property
def supply(self):
"""
The voltage supply instance in the physical system
"""
return self._supply
@property
def converter(self):
"""
The power electronic converter instance in the system
"""
return self._converter
@property
def electrical_motor(self):
"""
The electrical motor instance of the system
"""
return self._electrical_motor
@property
def mechanical_load(self):
"""
The mechanical load instance in the system
"""
return self._mechanical_load
def __init__(self,
converter,
motor,
load=None,
supply='IdealVoltageSupply',
ode_solver='euler', solver_kwargs=None,
noise_generator=None,
tau=1e-4, **kwargs):
"""
Args:
converter(PowerElectronicConverter): Converter for the physical system
motor(ElectricMotor): Motor of the system
load(MechanicalLoad): Mechanical Load of the System
supply(VoltageSupply): Voltage Supply
ode_solver(OdeSolver): Ode Solver to use in this setting
solver_kwargs(dict): Special keyword arguments to be passed to the solver
noise_generator(NoiseGenerator): Noise generator
tau(float): discrete time step of the system
kwargs(dict): Further arguments to pass to the modules while instantiation
"""
self._converter = instantiate(cv.PowerElectronicConverter, converter, tau=tau, **kwargs)
self._electrical_motor = instantiate(em.ElectricMotor, motor, tau=tau, **kwargs)
load = load or ml.PolynomialStaticLoad(tau=tau, **kwargs)
self._mechanical_load = instantiate(ml.MechanicalLoad, load, tau=tau, **kwargs)
if 'u_sup' in kwargs.keys():
u_sup = kwargs['u_sup']
else:
u_sup = self._electrical_motor.limits['u']
self._supply = instantiate(vs.VoltageSupply, supply, u_nominal=u_sup, tau=tau, **kwargs)
self._noise_generator = noise_generator or ng.GaussianWhiteNoiseGenerator(tau=tau, **kwargs)
state_names = self._build_state_names()
self._noise_generator.set_state_names(state_names)
solver_kwargs = solver_kwargs or {}
self._ode_solver = instantiate(sv.OdeSolver, ode_solver, **solver_kwargs)
self._ode_solver.set_system_equation(self._system_equation)
self._mechanical_load.set_j_rotor(self._electrical_motor.motor_parameter['j_rotor'])
self._t = 0
self._set_indices()
state_space = self._build_state_space(state_names)
super().__init__(self._converter.action_space, state_space, state_names, tau)
self._limits = np.zeros_like(state_names, dtype=float)
self._nominal_state = np.zeros_like(state_names, dtype=float)
self._set_limits()
self._set_nominal_state()
self._noise_generator.set_signal_power_level(self._nominal_state)
def _set_limits(self):
"""
Method to set the physical limits from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_lim = self._electrical_motor.limits.get(state, np.inf)
mechanical_lim = self._mechanical_load.limits.get(state, np.inf)
self._limits[ind] = min(motor_lim, mechanical_lim)
self._limits[self._state_positions['u_sup']] = self.supply.u_nominal
def _set_nominal_state(self):
"""
Method to set the nominal values from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_nom = self._electrical_motor.nominal_values.get(state, np.inf)
mechanical_nom = self._mechanical_load.nominal_values.get(state, np.inf)
self._nominal_state[ind] = min(motor_nom, mechanical_nom)
self._nominal_state[self._state_positions['u_sup']] = self.supply.u_nominal
def _build_state_space(self, state_names):
"""
Method to build the normalized state space (i.e. the maximum and minimum possible values for each state variable
normalized by the limits).
Args:
state_names(list(str)): list of the names of each state.
"""
raise NotImplementedError
def _build_state_names(self):
"""
Setting of the state names in the physical system.
"""
raise NotImplementedError
def _set_indices(self):
"""
Setting of indices to faster access the arrays during integration.
"""
self._omega_ode_idx = self._mechanical_load.OMEGA_IDX
self._load_ode_idx = list(range(len(self._mechanical_load.state_names)))
self._ode_currents_idx = list(range(
self._load_ode_idx[-1] + 1, self._load_ode_idx[-1] + 1 + len(self._electrical_motor.CURRENTS)
))
self._motor_ode_idx = self._ode_currents_idx
def simulate(self, action, *_, **__):
# Docstring of superclass
transformed_action = self._action_transformation(action)
state = self._ode_solver.y
i_in = self._backward_transform(self._electrical_motor.i_in(state[self._ode_currents_idx]), state)
switching_times = self._converter.set_action(transformed_action, self._t)
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
for t in switching_times[:-1]:
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_sup for u in u_in]
u_transformed = self._forward_transform(u_in, state)
self._ode_solver.set_f_params(u_transformed)
state = self._ode_solver.integrate(t)
i_in = self._backward_transform(self._electrical_motor.i_in(state[self._ode_currents_idx]), state)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_sup for u in u_in]
u_transformed = self._forward_transform(u_in, state)
self._ode_solver.set_f_params(u_transformed)
ode_state = self._ode_solver.integrate(self._t + self._tau)
self._t = self._ode_solver.t
self._k += 1
torque = self._electrical_motor.torque(ode_state[self._motor_ode_idx])
noise = self._noise_generator.noise()
return (self._build_state(ode_state, torque, u_in, u_sup) + noise) / self._limits
def _system_equation(self, t, state, u_in, **__):
"""
Systems differential equation system.
It is a concatenation of the motors electrical ode system and the mechanical ode system.
Args:
t(float): Current systems time
state(ndarray(float)): Current systems ODE-State
u_in(list(float)): Input voltages from the converter
Returns:
ndarray(float): The derivatives of the ODE-State. Based on this, the Ode Solver calculates the next state.
"""
motor_derivative = self._electrical_motor.electrical_ode(
state[self._motor_ode_idx], u_in, state[self._omega_ode_idx]
)
torque = self._electrical_motor.torque(state[self._motor_ode_idx])
load_derivative = self._mechanical_load.mechanical_ode(t, state[self._load_ode_idx], torque)
return np.concatenate((load_derivative, motor_derivative))
def reset(self, *_):
"""
Reset all the systems modules to an initial state.
Returns:
The new state of the system.
"""
motor_state = self._electrical_motor.reset()
load_state = self._mechanical_load.reset()
state = np.concatenate((load_state, motor_state))
u_sup = self.supply.reset()
u_in = self.converter.reset()
u_in = [u * u_sup for u in u_in]
torque = self.electrical_motor.torque(motor_state)
noise = self._noise_generator.reset()
self._t = 0
self._k = 0
self._ode_solver.set_initial_value(state, self._t)
return (self._build_state(state, torque, u_in, u_sup) + noise) / self._limits
def _forward_transform(self, quantities, motor_state):
"""
Transformation to transform from the physical systems state to the ode-state
"""
return quantities
def _backward_transform(self, quantities, motor_state):
"""
Transformation to transform from the ode-state to the systems-state
"""
return quantities
def _build_state(self, motor_state, torque, u_in, u_sup):
"""
Based on the ode-state and the further input quantities the new systems state is built.
"""
raise NotImplementedError
@staticmethod
def _action_transformation(action):
"""
Placeholder for the option to use different representations for the synchronous motor in the future.
"""
return action
class DcMotorSystem(SCMLSystem):
"""
SCML-System that can be used for all DC Motors.
"""
def _set_indices(self):
# Docstring of superclass
super()._set_indices()
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + len(self._electrical_motor.CURRENTS)
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + len(self._electrical_motor.VOLTAGES)
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.U_SUP_IDX = voltages_upper
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names
+ ['torque']
+ self._electrical_motor.CURRENTS
+ self._electrical_motor.VOLTAGES
+ ['u_sup']
)
def _build_state_space(self, state_names):
# Docstring of superclass
low, high = self._electrical_motor.get_state_space(self._converter.currents, self._converter.voltages)
low_mechanical, high_mechanical = self._mechanical_load.get_state_space((low['omega'], high['omega']))
low.update(low_mechanical)
high.update(high_mechanical)
high['u_sup'] = self._supply.supply_range[1] / self._supply.u_nominal
if self._supply.supply_range[0] != self._supply.supply_range[1]:
low['u_sup'] = self._supply.supply_range[0] / self._supply.u_nominal
else:
low['u_sup'] = 0
low = set_state_array(low, state_names)
high = set_state_array(high, state_names)
return Box(low, high)
def _build_state(self, motor_state, torque, u_in, u_sup):
# Docstring of superclass
state = np.zeros_like(self.state_names, dtype=float)
state[:len(self._mechanical_load.state_names)] = motor_state[:len(self._mechanical_load.state_names)]
state[self.TORQUE_IDX] = torque
state[self.CURRENTS_IDX] = motor_state[self._electrical_motor.CURRENTS_IDX]
state[self.VOLTAGES_IDX] = u_in
state[self.U_SUP_IDX] = u_sup
return state
class SynchronousMotorSystem(SCMLSystem):
"""
SCML-System that can be used with all Synchronous Motors
"""
def _build_state_space(self, state_names):
# Docstring of superclass
low = -1 * np.ones_like(state_names, dtype=float)
low[self.U_SUP_IDX] = 0.0
high = np.ones_like(state_names, dtype=float)
return Box(low, high)
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names
+ ['torque']
+ ['i_a'] + ['i_b'] + ['i_c']
+ ['u_a'] + ['u_b'] + ['u_c']
+ ['epsilon']
+ ['u_sup']
)
def _set_indices(self):
# Docstring of superclass
super()._set_indices()
self._motor_ode_idx += [self._motor_ode_idx[-1] + 1]
self._ode_currents_idx = self._motor_ode_idx[:-1]
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + 3
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + 3
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.EPSILON_IDX = voltages_upper
self.U_SUP_IDX = self.EPSILON_IDX + 1
self._ode_epsilon_idx = self._motor_ode_idx[-1]
def _forward_transform(self, quantities, motor_state):
# Docstring of superclass
motor_quantity = self._electrical_motor.q_inv(
self._electrical_motor.t_23(quantities), motor_state[self._ode_epsilon_idx]
)
return motor_quantity[::-1]
def _backward_transform(self, quantities, motor_state):
# Docstring of superclass
return list(self._electrical_motor.t_32(
self._electrical_motor.q(quantities[::-1], motor_state[self._ode_epsilon_idx])
))
def _build_state(self, motor_state, torque, u_in, u_sup):
# Docstring of superclass
mechanical_state = motor_state[self._load_ode_idx]
currents = list(
self._backward_transform(motor_state[self._ode_currents_idx], motor_state)
)
epsilon = motor_state[self._ode_epsilon_idx] % (2 * np.pi)
if epsilon > np.pi:
epsilon -= 2 * np.pi
return np.array(
list(mechanical_state)
+ [torque]
+ currents
+ u_in
+ [epsilon]
+ [u_sup]
)
| 40.951087 | 121 | 0.633842 | 1,845 | 15,070 | 4.853659 | 0.125203 | 0.032161 | 0.050921 | 0.0134 | 0.395757 | 0.33646 | 0.282747 | 0.238638 | 0.21876 | 0.21876 | 0 | 0.003581 | 0.277372 | 15,070 | 367 | 122 | 41.06267 | 0.818733 | 0.172661 | 0 | 0.297872 | 0 | 0 | 0.010695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123404 | false | 0 | 0.021277 | 0.021277 | 0.26383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e1f1f99e22282cac2d668c68acaa5cc5f58efad | 961 | py | Python | start_trouper.py | meltaxa/troupe | fbec6cec09ced6c29695a0741420cb61893134d3 | [
"MIT"
] | 28 | 2020-10-03T02:20:13.000Z | 2021-11-12T11:26:23.000Z | start_trouper.py | meltaxa/troupe | fbec6cec09ced6c29695a0741420cb61893134d3 | [
"MIT"
] | null | null | null | start_trouper.py | meltaxa/troupe | fbec6cec09ced6c29695a0741420cb61893134d3 | [
"MIT"
] | 7 | 2020-10-03T09:11:43.000Z | 2021-01-29T06:45:46.000Z | #!/usr/bin/env python3
from slackbot.bot import Bot
import logging
import socket
import uuid
import os
# If you are running multiple homeops bots on the same server enable
# unique device names.
unique_device_names = False
if unique_device_names:
os.environ['DEVICE_NAME'] = '{}-{}'.format(socket.gethostname(),
uuid.uuid4())
else:
os.environ['DEVICE_NAME'] = socket.gethostname()
# Enabling debug will reveal which Troupe device is responding to a request
os.environ['DEBUG'] = 'False'
os.environ['TARGET_DEVICE'] = 'all'
os.environ['BASE_DIR'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)))
os.environ['ONLAUNCH'] = os.environ['BASE_DIR'] + '/.onlaunch'
os.environ['NEXTLAUNCH'] = os.environ['BASE_DIR'] + '/.nextlaunch'
os.environ['API_TOKEN'] = ''
logging.basicConfig()
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
main()
| 25.289474 | 75 | 0.651405 | 123 | 961 | 4.902439 | 0.504065 | 0.149254 | 0.084577 | 0.079602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002618 | 0.204995 | 961 | 37 | 76 | 25.972973 | 0.786649 | 0.190427 | 0 | 0 | 0 | 0 | 0.173127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.208333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e1fceddc66231ee1517373a6448d3a3f572702e | 713 | py | Python | test/jit/test_ivalue.py | YifanShenSZ/pytorch | b4232f7cbe407909f9d95b91304c73fdc4c66a50 | [
"Intel"
] | null | null | null | test/jit/test_ivalue.py | YifanShenSZ/pytorch | b4232f7cbe407909f9d95b91304c73fdc4c66a50 | [
"Intel"
] | null | null | null | test/jit/test_ivalue.py | YifanShenSZ/pytorch | b4232f7cbe407909f9d95b91304c73fdc4c66a50 | [
"Intel"
] | null | null | null | # Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
# Tests for torch.jit.isinstance
class TestIValue(JitTestCase):
def test_qscheme_ivalue(self):
def qscheme(x: torch.Tensor):
return x.qscheme()
x = torch.rand(2, 2)
self.checkScript(qscheme, (x,))
| 25.464286 | 79 | 0.680224 | 101 | 713 | 4.594059 | 0.60396 | 0.038793 | 0.060345 | 0.064655 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003521 | 0.203366 | 713 | 27 | 80 | 26.407407 | 0.81338 | 0.137447 | 0 | 0 | 0 | 0 | 0.180033 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0.055556 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e2079ace258f371ce2dd6d8234af7c77fb15387 | 3,457 | py | Python | 12/common.py | KorotkiyEugene/dsp_sdr_basic | 2aa4b22427b638f09c85948ccf442faba573a29f | [
"MIT"
] | 7 | 2021-05-16T22:34:15.000Z | 2022-02-19T17:19:32.000Z | 12/common.py | KorotkiyEugene/dsp_sdr_basic | 78a77736d0c33951d782f6889884633e4a42c5bd | [
"MIT"
] | null | null | null | 12/common.py | KorotkiyEugene/dsp_sdr_basic | 78a77736d0c33951d782f6889884633e4a42c5bd | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.io.wavfile import read as read_wav
from scipy.io.wavfile import write as write_wav
from scipy.fftpack import fft, fftshift
from scipy.signal import lfilter, firwin
def filt(sig, Fc=0.5, NFIR=101):
fir_taps = firwin(NFIR, Fc, window=('blackmanharris'))
filtered_signal = lfilter(fir_taps, 1.0, sig)
return filtered_signal
def interpolate(sig, INTERP_ORDER=10, NFIR=101):
sig_with_zeros = np.zeros(int(len(sig)*INTERP_ORDER))
sig_with_zeros[::INTERP_ORDER] = sig
interp_sig = INTERP_ORDER*filt(sig_with_zeros, 1/INTERP_ORDER, NFIR)
return interp_sig
def decimate(sig, DECIM_ORDER=10, NFIR=101):
filt_sig = filt(sig, 1/DECIM_ORDER, NFIR)
decim_sig = filt_sig[::DECIM_ORDER]
return decim_sig
def plot_spectrum(sig, Fs=100e3, NFFT=8192, title='Spectrum'):
f = np.linspace(-int(NFFT/2), int(NFFT/2)-1, int(NFFT))*Fs/NFFT
sigFFT = fft(sig, NFFT)/NFFT
sigFFT = fftshift(sigFFT)
spectrum = 20*np.log10(np.abs(sigFFT))
plt.figure()
plt.plot(f, spectrum)
plt.ylabel('Power (dBm)')
plt.xlabel('Frequency (Hz)')
plt.title(title)
plt.ylim([-150, 0])
plt.grid(True)
def fm_demod(sig, Fs=100e3, FM_dev = 2e3):
sig_len = len(sig)
fm_demod_sig = np.zeros(sig_len)
last_angle = 0.0
last_demod_data = 0.0
for ii in range(0, sig_len):
i = np.real(sig[ii])
q = np.imag(sig[ii])
angle = math.atan2(q, i)
angle_change = angle - last_angle
if angle_change > math.pi:
angle_change -= 2 * math.pi
elif angle_change < -math.pi:
angle_change += 2 * math.pi
last_angle = angle
demod_data = angle_change * Fs / (2 * math.pi * FM_dev)
if abs(demod_data) >= 1:
# some unexpectedly big angle change happened
demod_data = last_demod_data
last_demod_data = demod_data
fm_demod_sig[ii] = demod_data
return fm_demod_sig
def fm_mod(sig, Fc=5e3, Fs=100e3, FM_dev = 2e3):
sig_len = len(sig)
sig = np.array(sig)
sig_max_val = np.amax(np.absolute(sig))
sig = sig/sig_max_val #normalizing signal (max val = 1, min val = -1)
dF = sig*FM_dev
F = Fc + dF
phase = 0
fm_mod_sig = np.zeros(sig_len)
for ii in range(0, sig_len):
phase = phase + 2*np.pi*F[ii]/Fs
fm_mod_sig[ii] = np.cos(phase)
return fm_mod_sig
def create_harmonic(Fc=1e3, Fs=20e3, Amp=1, N=2e1):
time_indexes = np.arange(N)
time_values = time_indexes/Fs
phase_values = (2*np.pi*Fc/Fs)*time_indexes
sig = Amp*np.cos(phase_values)
return Fs, time_values, sig
def create_complex_exponent(Fc=1e3, Fs=20e3, Amp=1, N=2e1):
time_indexes = np.arange(N)
time_values = time_indexes/Fs
phase_values = (2*np.pi*Fc/Fs)*time_indexes
sig_real_part = Amp*np.cos(phase_values)
sig_imag_part = Amp*np.sin(phase_values)
sig_complex = sig_real_part + 1j*sig_imag_part
return sig_complex
def create_from_wav(file_name, N=float('inf')):
Fs, sig = read_wav(file_name)
N = min(N, len(sig))
if len(sig.shape) > 1:
sig = sig[0:int(N),0] # Selecting one audio channel
else:
sig = sig[0:int(N)]
time_indexes = np.arange(N)
time_values = time_indexes/Fs
return Fs, time_values, sig
| 30.06087 | 73 | 0.633497 | 552 | 3,457 | 3.766304 | 0.240942 | 0.034632 | 0.01924 | 0.027417 | 0.295334 | 0.190957 | 0.190957 | 0.172679 | 0.172679 | 0.113035 | 0 | 0.034642 | 0.248481 | 3,457 | 115 | 74 | 30.06087 | 0.765589 | 0.033844 | 0 | 0.155556 | 0 | 0 | 0.014984 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.077778 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e233744186d29b648450453c95858ec334e1c71 | 3,769 | py | Python | automix/config.py | MZehren/Automix | dfaa00a9e7c5c0938c0a9d275c07f3a3e5f87e43 | [
"MIT"
] | 18 | 2020-07-20T01:51:40.000Z | 2022-02-25T07:32:11.000Z | automix/config.py | MZehren/Automix | dfaa00a9e7c5c0938c0a9d275c07f3a3e5f87e43 | [
"MIT"
] | 2 | 2021-03-23T03:26:02.000Z | 2021-07-19T12:51:25.000Z | automix/config.py | MZehren/Automix | dfaa00a9e7c5c0938c0a9d275c07f3a3e5f87e43 | [
"MIT"
] | 5 | 2021-01-03T15:34:28.000Z | 2022-02-22T06:07:06.000Z | """
TODO: Move the functions to the correct location
"""
import logging as log
import os
DATASET_LOCATION = "/home/mickael/Documents/programming/dj-tracks-switch-points/"
CACHE_LOCATION = "../annotations/"
CACHE_LEVEL = 0
LOG_LEVEL = log.DEBUG
log.getLogger().setLevel(LOG_LEVEL)
def k_fold_split(X, Y, k=10, shuffleDataset=True):
"""
Split both list X and Y into k folds
random will shuffle the data before, so two calls would not return the same folds
ex: print(k_fold_split(["A", "B", "C", "D", "E", "F", "G"], ["a", "b", "c", "d", "e", "f", "g"], k=3, shuffleDataset=0))
[[('A', 'a'), ('B', 'b')], [('C', 'c'), ('D', 'd')], [('E', 'e'), ('F', 'f'), ('G', 'g')]]
"""
from random import shuffle
assert len(X) == len(Y) and k <= len(X)
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
indexes = list(range(len(X)))
if shuffleDataset:
shuffle(indexes)
foldsIndexes = chunkIt(indexes, k)
folds = [[(X[i], Y[i]) for i in foldIndexes] for foldIndexes in foldsIndexes]
return folds
def _getFilename(path):
file, ext = os.path.splitext(os.path.basename(path))
if ext != ".mp3" and ext != ".jams": # in case that we give a file without ext but still contain a "." in the name
return file + ext
else:
return file
def _getFileType(path):
"""
return the extension of the file based on the path
i.e.: 'MP3' or 'WAVE'
"""
ext = path.split("/")[-1].split(".")[-1]
if ext == "mp3":
return 'MP3'
if ext == "wav":
return "WAVE"
if ext == "jams":
return "JAMS"
else:
return ext
def getFolderFiles(directory):
"""
returns the paths located in this folder
"""
paths = sorted(os.listdir(directory))
knownTypes = ["MP3", "WAVE", "mp4", "m4a", "JAMS"]
return [os.path.join(directory, path) for path in paths if _getFileType(path) in knownTypes]
def GET_PAOLO_FULL(checkCompletude=True, sets=["paolo1", "paolo2", "paolo3", "paolo4", "paolo5", "paolo6", "paolo7"]):
"""
return the path of the audio files (.mp3) and the anotation files (.jams)
if checkCompletude si True, erase the tracks without annotations and erase annotation without tracks
"""
tracksPaths = []
for set in sets:
tracksPaths += getFolderFiles(DATASET_LOCATION + str(set) + "/audio/")
gtTrackPaths = getFolderFiles(DATASET_LOCATION + "clean/annotations/")
if checkCompletude:
tracksPaths, gtTrackPaths = CHECK_COMPLETUDE(tracksPaths, gtTrackPaths)
return tracksPaths, gtTrackPaths
def CHECK_COMPLETUDE(tracksPaths, gtTrackPaths):
"""
Check if all the files are annotated and each annotation has a file
"""
tracksPaths = sorted(tracksPaths, key=lambda x: _getFilename(x))
gtTrackPaths = sorted(gtTrackPaths, key=lambda x: _getFilename(x))
newTracksPaths = [track for track in tracksPaths if _getFilename(track) in [_getFilename(t) for t in gtTrackPaths]]
newgtTrackPaths = [track for track in gtTrackPaths if _getFilename(track) in [_getFilename(t) for t in tracksPaths]]
if len(newTracksPaths) != len(tracksPaths):
log.info(("Becareful all the tracks are not annotated", len(newTracksPaths), len(tracksPaths)))
log.debug("\n".join(
[track for track in tracksPaths if _getFilename(track) not in [_getFilename(t) for t in gtTrackPaths]]))
log.debug("\n".join(
[track for track in gtTrackPaths if _getFilename(track) not in [_getFilename(t) for t in tracksPaths]]))
return newTracksPaths, newgtTrackPaths
| 33.353982 | 124 | 0.633059 | 495 | 3,769 | 4.763636 | 0.331313 | 0.017812 | 0.022053 | 0.025445 | 0.194656 | 0.148431 | 0.148431 | 0.122986 | 0.067006 | 0.034775 | 0 | 0.008219 | 0.225259 | 3,769 | 112 | 125 | 33.651786 | 0.799315 | 0.215972 | 0 | 0.0625 | 0 | 0 | 0.083129 | 0.021045 | 0 | 0 | 0 | 0.008929 | 0.015625 | 1 | 0.109375 | false | 0 | 0.046875 | 0 | 0.328125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e25a796cccb23405dd95548e2edaa9a7d0b72fc | 2,342 | py | Python | saleor/graphql/rider/mutations.py | WarHawk007/erocerytest | 0f481e08012eaf2f9c987fc33d641deb8e335ba0 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/rider/mutations.py | WarHawk007/erocerytest | 0f481e08012eaf2f9c987fc33d641deb8e335ba0 | [
"CC-BY-4.0"
] | 10 | 2021-02-02T22:55:05.000Z | 2022-02-10T23:01:13.000Z | saleor/graphql/rider/mutations.py | WarHawk007/erocerytest | 0f481e08012eaf2f9c987fc33d641deb8e335ba0 | [
"CC-BY-4.0"
] | null | null | null | import graphene
from django.conf import settings
from django.core.exceptions import ValidationError
from ...rider import models as rider_models
from ...subshop import models as subshop_models
from ...account import models as account_models
from ..account.validation import isPhoneNumber, isCnic
from ..core.mutations import ModelMutation
from ..core.types.common import RiderError
from ..shop.types import SubShop
from .types import Rider
from .utils import riderGroupAdd
class RiderCreateInput(graphene.InputObjectType):
name = graphene.String(description="Rider Name", required=True)
city = graphene.String(description="City of Rider", required=True)
cnic = graphene.String(description="Rider cnic", required=True)
shopid = graphene.String(description="Enter shop id", required=True)
phone = graphene.String(description="Rider Phone Number", required=True)
password = graphene.String(description="Rider Password", required=True)
class RiderCreate(ModelMutation):
class Arguments:
input = RiderCreateInput(
description="Fields required to create a rider.", required=True
)
class Meta:
description = "Create A New Rider"
permissions = ("rider.manage_riders",)
model = rider_models.Rider
error_type_class = RiderError
error_type_field = "rider_errors"
@classmethod
def perform_mutation(cls, root, info, **data):
data = data["input"]
isPhoneNumber(data["phone"])
isCnic(data["cnic"])
user = account_models.User.objects.filter(phone=data["phone"])
if len(user) > 0 and user[0].riderid:
raise ValidationError({"phone": "User with phone already is a rider"})
shopid = graphene.Node.get_node_from_global_id(
info, data["shopid"], SubShop)
rider = rider_models.Rider.objects.create(
name=data["name"], city=data["city"], cnic=data["cnic"], shopid=shopid)
if len(user) > 0:
user.update(riderid=rider, is_rider=True)
user = user[0]
else:
user = account_models.User.objects.create_user(
password=data["password"], phone=data["phone"], is_active=True, phone_verified=True, riderid=rider, is_rider=True)
riderGroupAdd(user)
return cls.success_response(rider)
| 40.37931 | 130 | 0.687874 | 278 | 2,342 | 5.708633 | 0.320144 | 0.05293 | 0.094518 | 0.075614 | 0.064272 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002151 | 0.205807 | 2,342 | 57 | 131 | 41.087719 | 0.851075 | 0 | 0 | 0 | 0 | 0 | 0.106746 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0.04 | 0.24 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e2739d879c1c382a8deb97538a12818514a90c7 | 953 | py | Python | car_composite/utils.py | kerry-t-johnson/car-composite | ba39bed0883517a3884a1e68c8ab6522119eab4f | [
"Apache-2.0"
] | null | null | null | car_composite/utils.py | kerry-t-johnson/car-composite | ba39bed0883517a3884a1e68c8ab6522119eab4f | [
"Apache-2.0"
] | null | null | null | car_composite/utils.py | kerry-t-johnson/car-composite | ba39bed0883517a3884a1e68c8ab6522119eab4f | [
"Apache-2.0"
] | null | null | null | '''
Copyright 2021 Kerry Johnson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import subprocess
def ip(remove = '10.'):
all_ips = subprocess.check_output(['hostname', '-I'])
all_ips = [ip.decode('utf-8') for ip in all_ips.split()]
if remove is not None:
# Remove (e.g.) Docker/Resin IPs
all_ips = [ip for ip in all_ips if not ip.startswith(remove)]
return all_ips[0] if len(all_ips) > 0 else None | 36.653846 | 75 | 0.698846 | 150 | 953 | 4.386667 | 0.593333 | 0.06383 | 0.039514 | 0.048632 | 0.039514 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01745 | 0.218258 | 953 | 26 | 76 | 36.653846 | 0.865772 | 0.613851 | 0 | 0 | 0 | 0 | 0.054381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e27f18ab5ef993c02caa01656b29f1ecbe96024 | 11,458 | py | Python | yegsecbot.py | jershmagersh/yegsecbot | 147c1ad4e0bbf7688f55ee0075b9c03fddf4223c | [
"Apache-2.0"
] | 1 | 2019-02-20T23:24:02.000Z | 2019-02-20T23:24:02.000Z | yegsecbot.py | jershmagersh/yegsecbot | 147c1ad4e0bbf7688f55ee0075b9c03fddf4223c | [
"Apache-2.0"
] | null | null | null | yegsecbot.py | jershmagersh/yegsecbot | 147c1ad4e0bbf7688f55ee0075b9c03fddf4223c | [
"Apache-2.0"
] | 3 | 2019-02-20T01:43:10.000Z | 2019-06-19T00:08:29.000Z | #!/usr/bin/python
"""
I wrote this in a couple afternoons while watching Netflix, so it can probably be better.
-jmag
"""
from slackclient import SlackClient
import sys, json, sqlite3, time, re, datetime
MENTION_REGEX = "^<@(|[WU][A-Z0-9]+?)>(.*)"
class ConfigException(Exception):
pass
class ConnectionException(Exception):
pass
class YegsecDatabase:
def __init__(self, db_path):
self.path = db_path
self.conn = sqlite3.connect(db_path)
self.cursor = self.conn.cursor()
def confirm_user(self, user, month, year, pref):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
if not result:
self.cursor.execute("INSERT INTO users (user_id) VALUES (?)", (user,))
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
veg_bool = 0
if pref:
veg_bool = 1
else:
veg_bool = 0
self.cursor.execute("SELECT * FROM confirmations WHERE meetup_id = ? AND user_id = ?", (meeting_id, user))
if(self.cursor.fetchone()):
return False
else:
self.cursor.execute("INSERT INTO confirmations (user_id, meetup_id, pizza_pref) VALUES (?, ?, ?)", (user, meeting_id, veg_bool))
self.yegsec_commit()
return True
else:
return False
def remove_confirm_user(self, user, month, year):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
#A user cannot remove a confirmation if they don't exist in the database already.
if not result:
return False
else:
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
self.cursor.execute("DELETE FROM confirmations WHERE user_id = ? AND meetup_id = ?", (user, meeting_id))
self.yegsec_commit()
else:
return False
def yegsec_commit(self):
self.conn.commit()
#self.conn.close()
def get_summary(self):
result = self.cursor.execute("SELECT meetup_id FROM meetups")
results = {}
meetup_ids = []
meetup_id = self.cursor.fetchone()
while(meetup_id):
meetup_ids.append(meetup_id)
meetup_id = self.cursor.fetchone()
for meetup_id_a in meetup_ids:
meetup_id = meetup_id_a[0]
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 1", (meetup_id,))
veg_count = self.cursor.fetchone()
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 0", (meetup_id,))
other_count = self.cursor.fetchone()
self.cursor.execute("SELECT day_id, month_id, year_id FROM meetups WHERE meetup_id = ?", (meetup_id,))
date_result = self.cursor.fetchone()
results[meetup_id] = { "veg": veg_count[0],
"other": other_count[0],
"day": date_result[0],
"month": date_result[1],
"year": date_result[2]
}
return results
class YegsecBot:
def __init__(self, config):
db, token, rtm_delay = self.read_config(config)
self.db = YegsecDatabase(db)
self.bot = SlackClient(token)
self.rtm_delay = rtm_delay
if self.bot.rtm_connect(with_team_state=False):
self.bot_id = self.bot.api_call("auth.test")["user_id"]
try:
self.start()
except KeyboardInterrupt:
self.db.yegsec_commit()
else:
raise ConnectionException("Connection to Slack failed.")
def read_config(self, config_path):
f = open(config_path)
try:
frj = json.loads(f.read())
except:
raise ConfigException("Unable to read provided configuration: {}".format(config_path))
return frj['database'], frj['token'], frj['rtm_delay']
#Source: https://www.fullstackpython.com/blog/build-first-slack-bot-python.html
def parse_bot_commands(self, slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = self.parse_direct_mention(event["text"])
if user_id == self.bot_id:
#print(event)
return message, event["channel"], event["user"]
return None, None, None
def parse_direct_mention(self, message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def get_next_meet(self):
return 3,2019
def add_user(self, command, channel, user):
"""
Main function of the bot. We use this command for adding user numbers and their preferred vegetarian options
to the database.
"""
rs = re.findall("add me for ([0-9]{1,2}), ?([0-9]{4}) (vegetarian|any)", command, re.IGNORECASE)
rsm = re.findall("add me next (vegetarian|any)", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
vegetarian = None
if("VEG" in rs[0][2].upper()):
vegetarian = False
resp_veg = "vegetarian"
vegetarian = True
else:
vegetarian = True
resp_veg = "non-vegetarian"
vegetarian = False
result = self.db.confirm_user(user, month, year, vegetarian)
if result:
return(":pizza::pizza::pizza:Thank you <@{}>, I will add you to the pizza numbers for the month {} for the year {} as a {} option:pizza::pizza::pizza:".format(user, month_str, year, resp_veg))
else:
return(":pizza::pizza::pizza:Sorry, <@{}> it looks like you've already been added for that month.:pizza::pizza::pizza:".format(user))
except:
return("Sorry, I tried to add you with that command, but I couldn't quite understand it. Please try again.")
def remove_user(self, command, channel, user):
"""
Another main function of the bot. We use this command for removing user numbers and their preferred vegetarian options
from the database.
"""
rs = re.findall("remove me for ([0-9]{1,2}), ?([0-9]{4})", command, re.IGNORECASE)
rsm = re.findall("remove me next", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
self.db.remove_confirm_user(user, month, year)
return(":pizza::pizza::pizza:Thank you <@{}>, I will remove you to the pizza numbers for the month {} for the year {}:pizza::pizza::pizza:".format(user, month_str, year))
except:
return("Sorry, I tried to remove you with that command, but I couldn't quite understand it. Please try again.")
def get_summary(self):
result = self.db.get_summary()
response = ""
for meetup_id, meetup in result.items():
total_pizza_count = meetup['other'] + meetup['veg']
response += "*Summary*\nMeetup Date: `{}/{}/{}`\nTotal Pizza Count: `{}`\nNon-Vegetarian: `{}`\nVegetarian: `{}`\n\n".format(meetup['day'], meetup['month'], meetup['year'], total_pizza_count, meetup['other'], meetup['veg'])
return response
def get_help(self):
return "You can send me the following commands:\n\
To get added to the next meetup's pizza count do: `add me next [any|vegetarian]`\n\
To get added to a future meetup's pizza count do: `add me for [month],[year]`\n\
To get removed from the next meetup's pizza count do: `remove me next`\n\
To be removed from a future meetup's pizza count do: `remove me [month],[year]`"
def handle_command(self, command, channel, user):
"""
Executes bot command if the command is known
"""
print("Received command: {}".format(command))
# Default response is help text for the user
default_response = "Not sure what you mean. Try `{}`".format("help")
# Finds and executes the given command, filling in response
response = None
print("Command received: {}".format(command))
if command.startswith("add me for") or command.startswith("add me next"):
response = self.add_user(command, channel, user)
if command.startswith("remove me for") or command.startswith("remove me next"):
response = self.remove_user(command, channel, user)
if command.startswith("summary"):
response = self.get_summary()
if command.startswith("help"):
response = self.get_help()
# Sends the response back to the channel
# That only requested user can see
self.bot.api_call(
"chat.postEphemeral",
channel=channel,
user=user,
text=response or default_response,
as_user=True,
)
def start(self):
"""
self.bot.api_call(
"chat.postMessage",
channel="general",
text="I'm alive!",
as_user=True
)
"""
while True:
command, channel, user = self.parse_bot_commands(self.bot.rtm_read())
if command:
self.handle_command(command, channel, user)
time.sleep(self.rtm_delay)
if __name__ == "__main__":
bot = YegsecBot("config.json")
| 42.913858 | 239 | 0.56598 | 1,399 | 11,458 | 4.514653 | 0.197284 | 0.036415 | 0.032299 | 0.032774 | 0.399145 | 0.332964 | 0.282932 | 0.224034 | 0.183977 | 0.180177 | 0 | 0.007467 | 0.322046 | 11,458 | 266 | 240 | 43.075188 | 0.805613 | 0.118258 | 0 | 0.328125 | 0 | 0.052083 | 0.194705 | 0.02057 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.010417 | 0.010417 | 0.010417 | 0.182292 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e29069e2cd68e343ce7e90b98d21551b24dc656 | 5,072 | py | Python | workon/contrib/tracker/utils.py | devittek/django-workon | c39ddecac2649406a7a58922646478c5615d4cfd | [
"BSD-3-Clause"
] | 1 | 2018-01-19T16:08:54.000Z | 2018-01-19T16:08:54.000Z | workon/contrib/tracker/utils.py | devittek/django-workon | c39ddecac2649406a7a58922646478c5615d4cfd | [
"BSD-3-Clause"
] | 1 | 2020-07-06T08:35:18.000Z | 2020-07-06T08:35:18.000Z | workon/contrib/tracker/utils.py | devittek/django-workon | c39ddecac2649406a7a58922646478c5615d4cfd | [
"BSD-3-Clause"
] | 4 | 2020-04-08T06:14:46.000Z | 2020-12-11T14:28:06.000Z | from django.conf import settings
__all__ = ['track']
if 'workon.contrib.tracker' not in settings.INSTALLED_APPS:
def track(*args, **kwargs):
raise Exception('workon.contrib.tracker missing from settings.INSTALLED_APPS')
else:
import time
import datetime
from operator import itemgetter
def track(*fields, save=True):
"""
Tracks property changes on a model instance.
The changed list of properties is refreshed on model initialization
and save.
>>> @track('name')
>>> class Post(models.Model):
>>> name = models.CharField(...)
>>>
>>> post.name = "new name"
>>> post.save()
>>> post.track_changes()
>>>
>>> post.get_tracked_events()
"""
UNSAVED = dict()
from django.db.models.signals import post_init, post_save, m2m_changed
from django.db.models import Model, ManyToManyField
from django.utils import timezone
from django.contrib.contenttypes.fields import GenericRelation
from workon.contrib.tracker.models import TrackEvent
def _store(self):
"Updates a local copy of attributes values"
if self.id:
self.__initial_data = dict((f, getattr(self, f)) for f in fields)
else:
self.__initial_data = UNSAVED
def inner(cls):
def get_tracked_events(self, group_by=None, **kwargs):
# content_type = ContentType.objects.get_for_model(self)
# queryset = TrackEvent.objects.filter(content_type__pk=content_type.id, object_id=self.pk).filter(**kwargs)
queryset = self.generic_tracked_events.filter(**kwargs)
if group_by == 'timestamp':
events = {}
for event in queryset.order_by('-tracked_at'):
events.setdefault(event.timestamp, []).append(event)
return events
else:
return queryset.order_by('-tracked_at')
cls.get_tracked_events = get_tracked_events
cls.add_to_class('generic_tracked_events', GenericRelation(
TrackEvent,
content_type_field='object_content_type',
object_id_field='object_id',
))
cls.__initial_data = {}
def track_changes(self, user=None):
ts = time.time()
changes = self.get_tracked_changes()
if changes:
if save:
for field, change in changes.items():
change.user = user
change.save()
_store(self)
return changes
cls.track_changes = track_changes
def get_tracked_changes(self):
return getattr(self, '__tracked_changes', dict())
cls.get_tracked_changes = get_tracked_changes
def _post_init(sender, instance, **kwargs):
_store(instance)
post_init.connect(_post_init, sender=cls, weak=False)
def _post_save(sender, instance, **kwargs):
ts = time.time()
changes = instance.get_tracked_changes()
for field_name, old_value in getattr(instance, '__initial_data', dict()).items():
if old_value != getattr(instance, field_name):
changes[field_name] = TrackEvent(
object=instance,
field_name=field_name,
action='field_post_save',
old_value=old_value,
new_value=getattr(instance, field_name)
)
setattr(instance, '__tracked_changes', changes)
post_save.connect(_post_save, sender=cls, weak=False)
for f in fields:
if isinstance(cls._meta.get_field(f), ManyToManyField):
def get_m2m_changed(field_name):
def _m2m_changed(sender, instance, action, **kwargs):
ts = time.time()
changes = instance.get_tracked_changes()
if action.startswith('post_'):
changes[field_name] = TrackEvent(
object=instance,
field_name=field_name,
action=f'm2m_{action}',
m2m_pk_set=list(kwargs.get('pk_set')),
m2m_model=kwargs.get('model')
)
setattr(instance, '__tracked_changes', changes)
return _m2m_changed
m2m_changed.connect(get_m2m_changed(f), sender=getattr(cls, f).through, weak=False)
return cls
return inner | 38.424242 | 124 | 0.522082 | 490 | 5,072 | 5.142857 | 0.244898 | 0.039683 | 0.040476 | 0.020238 | 0.159524 | 0.088889 | 0.088889 | 0.088889 | 0.088889 | 0.050794 | 0 | 0.002917 | 0.391759 | 5,072 | 132 | 125 | 38.424242 | 0.813938 | 0.102918 | 0 | 0.179775 | 0 | 0 | 0.070789 | 0.019937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123596 | false | 0 | 0.101124 | 0.011236 | 0.303371 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e2b1e90e65d462ea33a73daa9aaad6d0c9d572b | 46,103 | py | Python | python/sandbox/core.py | geometer/sandbox | 373ec96e69df76744a19b51f7caa865cbc6b58cd | [
"Apache-2.0"
] | 6 | 2020-04-19T11:26:18.000Z | 2021-06-21T18:42:51.000Z | python/sandbox/core.py | geometer/sandbox | 373ec96e69df76744a19b51f7caa865cbc6b58cd | [
"Apache-2.0"
] | 31 | 2020-04-21T17:24:39.000Z | 2020-08-27T15:59:12.000Z | python/sandbox/core.py | geometer/sandbox | 373ec96e69df76744a19b51f7caa865cbc6b58cd | [
"Apache-2.0"
] | null | null | null | """
Core module.
Normally, do not add new construction methods here, do this in scene.py instead.
"""
from enum import Enum, auto, unique
import itertools
import re
import sympy as sp
from typing import List
from .figure import Figure
from .reason import Reason
from .util import LazyComment, Comment, divide
class CoreScene:
layers = ('user', 'auxiliary', 'invisible')
@staticmethod
def layers_by(max_layer):
return CoreScene.layers[0:CoreScene.layers.index(max_layer) + 1]
class Object:
"""
Common ancestor for all geometric objects like point, line, circle
"""
def __init__(self, scene, **kwargs):
assert isinstance(scene, CoreScene)
label = kwargs.get('label')
if label:
assert scene.get(label) is None, 'Object with label `%s` already exists' % label
else:
pattern = self.__class__.prefix + '%d'
for index in itertools.count():
label = pattern % index
if scene.get(label) is None:
self.label = label
self.auto_label = True
break
self.layer = kwargs.get('layer', 'user')
assert self.layer in CoreScene.layers
self.extra_labels = set()
self.scene = scene
self.__dict__.update(kwargs)
scene.add(self)
def with_extra_args(self, **kwargs):
if self.scene.is_frozen:
return self
layer = kwargs.get('layer', 'user')
if self.layer not in CoreScene.layers_by(layer):
self.layer = layer
for key in kwargs:
if key == 'layer':
continue
value = kwargs[key]
if key == 'label' and value and value != self.label:
if hasattr(self, 'auto_label'):
self.label = value
delattr(self, 'auto_label')
else:
self.extra_labels.add(value)
elif not hasattr(self, key):
self.__dict__[key] = value
return self
@property
def name(self):
return self.label
def __str__(self):
return self.name
@property
def description(self):
dct = {}
for key in ('layer', 'extra_labels', 'all_points', 'comment'):
value = self.__dict__.get(key)
if value is None:
continue
if isinstance(value, Enum):
dct[key] = value.name
elif isinstance(value, CoreScene.Object):
dct[key] = value.label
elif isinstance(value, (list, tuple, set)):
if value:
dct[key] = [elt.label if isinstance(elt, CoreScene.Object) else str(elt) for elt in value]
else:
dct[key] = str(value)
if self.name == self.label:
return '%s %s %s' % (self.__class__.__name__, self, dct)
else:
return '%s %s %s %s' % (self.__class__.__name__, self.label, self.name, dct)
class Point(Object, Figure):
prefix = 'Pt_'
class Origin(Enum):
free = auto()
translated = auto()
perp = auto()
line = auto()
circle = auto()
line_x_line = auto()
circle_x_line = auto()
circle_x_circle = auto()
def __init__(self, scene, origin, **kwargs):
assert isinstance(origin, CoreScene.Point.Origin), 'origin must be a Point.Origin, not %s' % type(origin)
CoreScene.Object.__init__(self, scene, origin=origin, **kwargs)
self.__vectors = {}
self.__perpendiculars = {}
def translated_point(self, vector, coef=1, **kwargs):
self.scene.assert_vector(vector)
if coef == 0:
return self
if coef == 1 and vector.start == self:
return vector.end
if coef == -1 and vector.end == self:
return vector.start
for pt in self.scene.points():
if pt.origin == CoreScene.Point.Origin.translated and pt.base == self and pt.delta == vector and pt.coef == coef:
return pt
if 'comment' not in kwargs:
kwargs = dict(kwargs)
if coef == 1:
pattern = 'translation of $%{point:pt}$ by vector $%{vector:vector}$'
else:
pattern = 'translation of $%{point:pt}$ by vector $%{multiplier:coef} %{vector:vector}$'
kwargs['comment'] = Comment(
pattern,
{'pt': self, 'coef': coef, 'vector': vector}
)
new_point = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.translated,
base=self, delta=vector, coef=coef, **kwargs
)
if self in {vector.start, vector.end}:
new_point.collinear_constraint(vector.start, vector.end)
if coef > 0:
self.vector(new_point).parallel_constraint(vector, guaranteed=True)
else:
new_point.vector(self).parallel_constraint(vector, guaranteed=True)
self.segment(new_point).ratio_constraint(vector.as_segment, sp.Abs(coef), guaranteed=True)
return new_point
def symmetric_point(self, centre, **kwargs):
symmetric = CoreScene.Point(
self.scene, CoreScene.Point.Origin.translated,
base=centre, delta=self.vector(centre), coef=1, **kwargs
)
symmetric.collinear_constraint(self, centre, guaranteed=True)
from .property import MiddleOfSegmentProperty
self.scene.add_property(MiddleOfSegmentProperty(centre, self.segment(symmetric)))
return symmetric
def perpendicular_line(self, line, **kwargs):
"""
Constructs a line through the point, perpendicular to the given line.
"""
self.scene.assert_line(line)
existing = self.__perpendiculars.get(line)
if existing:
return existing.with_extra_args(**kwargs)
new_point = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.perp,
point=self, line=line,
layer='invisible'
)
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'perpendicular from $%{point:pt}$ to $%{line:line}$',
{'pt': self, 'line': line}
)
new_line = self.line_through(new_point, **kwargs)
if self not in line:
crossing = new_line.intersection_point(line, layer='auxiliary', comment=Comment(
'foot of the perpendicular from $%{point:pt}$ to $%{line:line}$',
{'pt': self, 'line': line}
))
line.perpendicular_constraint(new_line, guaranteed=True)
self.__perpendiculars[line] = new_line
return new_line
def line_through(self, point, **kwargs):
self.scene.assert_point(point)
assert self != point, 'Cannot create a line by a single point'
self.not_equal_constraint(point)
for existing in self.scene.lines():
if self in existing and point in existing:
return existing.with_extra_args(**kwargs)
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'Line through $%{point:pt0}$ and $%{point:pt1}$',
{'pt0': self, 'pt1': point}
)
line = CoreScene.Line(self.scene, point0=self, point1=point, **kwargs)
if not self.scene.is_frozen:
for cnstr in self.scene.constraints(Constraint.Kind.collinear):
if len([pt for pt in line.all_points if pt in cnstr.params]) == 2:
for pt in cnstr.params:
if pt not in line.all_points:
line.all_points.append(pt)
return line
def circle_through(self, point, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'Circle with centre $%{point:centre}$ through $%{point:pt}$',
{'centre': self, 'pt': point}
)
return self.circle_with_radius(self.segment(point), **kwargs)
def circle_with_radius(self, radius, **kwargs):
self.scene.assert_segment(radius)
assert radius.points[0] != radius.points[1], 'Cannot create a circle of zero radius'
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'Circle with centre $%{point:centre}$ with radius $%{segment:radius}$',
{'centre': self, 'radius': radius}
)
return CoreScene.Circle(
self.scene, centre=self, radius=radius, **kwargs
)
def vector(self, point):
assert self != point, 'Cannot create vector from a single point'
vec = self.__vectors.get(point)
if vec is None:
vec = CoreScene.Vector(self, point)
self.__vectors[point] = vec
return vec
def segment(self, point):
assert self != point, 'Cannot create segment from a single point'
return self.scene._get_segment(self, point)
def angle(self, point0, point1):
assert point0 != point1, 'Angle endpoints should be different'
return self.scene._get_angle(self.vector(point0), self.vector(point1))
def belongs_to(self, line_or_circle):
self.scene.assert_line_or_circle(line_or_circle)
if not self.scene.is_frozen and self not in line_or_circle.all_points:
line_or_circle.all_points.append(self)
def not_equal_constraint(self, A, **kwargs):
"""
The current point does not coincide with A.
"""
if self.scene.is_frozen:
return
for cnstr in self.scene.constraints(Constraint.Kind.not_equal):
if set(cnstr.params) == {self, A}:
cnstr.update(kwargs)
return
self.scene.constraint(Constraint.Kind.not_equal, self, A, **kwargs)
def not_collinear_constraint(self, A, B, **kwargs):
"""
The current point is not collinear with A and B.
"""
for cnstr in self.scene.constraints(Constraint.Kind.not_collinear):
if set(cnstr.params) == {self, A, B}:
cnstr.update(kwargs)
return
self.scene.constraint(Constraint.Kind.not_collinear, self, A, B, **kwargs)
self.not_equal_constraint(A, guaranteed=True, **kwargs)
self.not_equal_constraint(B, guaranteed=True, **kwargs)
A.not_equal_constraint(B, guaranteed=True, **kwargs)
def collinear_constraint(self, A, B, **kwargs):
"""
The current point is collinear with A and B.
"""
cnstr = self.scene.constraint(Constraint.Kind.collinear, self, A, B, **kwargs)
if not self.scene.is_frozen:
for line in self.scene.lines():
if len([pt for pt in line.all_points if pt in cnstr.params]) == 2:
for pt in cnstr.params:
if pt not in line.all_points:
line.all_points.append(pt)
return cnstr
def distance_constraint(self, A, distance, **kwargs):
"""
Distance to the point A equals to the given distance.
The given distance must be a non-negative number
"""
if isinstance(A, str):
A = self.scene.get(A)
return self.segment(A).length_constraint(distance, **kwargs)
def opposite_side_constraint(self, point, line, **kwargs):
"""
The current point lies on the opposite side to the line than the given point.
"""
if isinstance(point, CoreScene.Line) and isinstance(line, CoreScene.Point):
point, line = line, point
for cnstr in self.scene.constraints(Constraint.Kind.opposite_side):
if line == cnstr.params[2] and set(cnstr.params[0:2]) == {self, point}:
cnstr.update(kwargs)
return
#self.not_collinear_constraint(line.point0, line.point1, **kwargs)
#point.not_collinear_constraint(line.point0, line.point1, **kwargs)
self.scene.constraint(Constraint.Kind.opposite_side, self, point, line, **kwargs)
def same_side_constraint(self, point, line, **kwargs):
"""
The point lies on the same side to the line as the given point.
"""
if isinstance(point, CoreScene.Line) and isinstance(line, CoreScene.Point):
point, line = line, point
for cnstr in self.scene.constraints(Constraint.Kind.same_side):
if line == cnstr.params[2] and set(cnstr.params[0:2]) == {self, point}:
cnstr.update(kwargs)
return
self.not_collinear_constraint(line.point0, line.point1, **kwargs)
point.not_collinear_constraint(line.point0, line.point1, **kwargs)
self.scene.constraint(Constraint.Kind.same_side, self, point, line, **kwargs)
def same_direction_constraint(self, A, B, **kwargs):
"""
Vectors (self, A) and (self, B) have the same direction
"""
for cnstr in self.scene.constraints(Constraint.Kind.same_direction):
if self == cnstr.params[0] and set(cnstr.params[1:3]) == {A, B}:
cnstr.update(kwargs)
return
self.not_equal_constraint(A)
self.not_equal_constraint(B)
A.belongs_to(self.line_through(B, layer='auxiliary'))
self.scene.constraint(Constraint.Kind.same_direction, self, A, B, **kwargs)
def inside_constraint(self, obj, **kwargs):
"""
The point is inside the object (angle or segment)
"""
if isinstance(obj, CoreScene.Segment):
self.collinear_constraint(*obj.points, **kwargs)
self.scene.constraint(Constraint.Kind.inside_segment, self, obj, **kwargs)
elif isinstance(obj, CoreScene.Angle) and obj.vertex:
self.scene.constraint(Constraint.Kind.inside_angle, self, obj, **kwargs)
else:
assert False, 'Cannot declare point lying inside %s' % obj
def inside_triangle_constraint(self, triangle, **kwargs):
"""
The point is inside the triangle
"""
triangle.points[0].not_collinear_constraint(*triangle.points[1:])
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'point $%{point:pt}$ is inside $%{triangle:triangle}$',
{'pt': self, 'triangle': triangle}
)
for angle in triangle.angles:
self.inside_constraint(angle, **kwargs)
from .property import SameOrOppositeSideProperty
for vertex, side in zip(triangle.points, triangle.sides):
self.scene.add_property(SameOrOppositeSideProperty(side, vertex, self, True))
class Line(Object):
prefix = 'Ln_'
def __init__(self, scene, **kwargs):
CoreScene.Object.__init__(self, scene, **kwargs)
self.all_points = [self.point0, self.point1]
@property
def name(self):
if hasattr(self, 'auto_label') and self.auto_label:
for points in itertools.combinations(self.all_points, 2):
if points[0].layer == 'user' and points[1].layer == 'user':
return '(%s %s)' % (points[0].name, points[1].name)
return super().name
def free_point(self, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment('point on line $%{line:line}$', {'line': self})
point = CoreScene.Point(self.scene, CoreScene.Point.Origin.line, line=self, **kwargs)
point.belongs_to(self)
return point
def intersection_point(self, obj, **kwargs):
"""
Creates an intersection point of the line and given object (line or circle).
Requires a constraint for determinate placement if the object a circle
"""
self.scene.assert_line_or_circle(obj)
assert self != obj, 'The line does not cross itself'
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = LazyComment('crossing point of %s and %s', self.label, obj.label)
if isinstance(obj, CoreScene.Circle):
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.circle_x_line,
circle=obj, line=self, **kwargs
)
else:
existing = next((pt for pt in self.all_points if pt in obj), None)
if existing:
return existing.with_extra_args(**kwargs)
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.line_x_line,
line0=self, line1=obj, **kwargs
)
crossing.belongs_to(self)
crossing.belongs_to(obj)
return crossing
def perpendicular_constraint(self, other, **kwargs):
"""
self ⟂ other
"""
self.point0.segment(self.point1).perpendicular_constraint(other.point0.segment(other.point1), **kwargs)
def __contains__(self, obj):
if obj is None:
return False
if isinstance(obj, CoreScene.Point):
return obj in self.all_points
if isinstance(obj, CoreScene.Vector):
return obj.start in self.all_points and obj.end in self.all_points
assert False, 'Operator not defined for %s and Line' % type(obj)
class Circle(Object):
prefix = 'Circ_'
def __init__(self, scene, **kwargs):
CoreScene.Object.__init__(self, scene, **kwargs)
self.all_points = []
if not scene.is_frozen:
if self.centre == self.radius.points[0]:
self.all_points.append(self.radius.points[1])
elif self.centre == self.radius.points[1]:
self.all_points.append(self.radius.points[0])
def centre_point(self, **kwargs):
return self.centre.with_extra_args(**kwargs)
def free_point(self, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = LazyComment('point on circle %s', self.label)
point = CoreScene.Point(self.scene, CoreScene.Point.Origin.circle, circle=self, **kwargs)
point.belongs_to(self)
return point
def intersection_point(self, obj, **kwargs):
"""
Creates an intersection point of the circle and given object (line or circle).
Requires a constraint for determinate placement
"""
self.scene.assert_line_or_circle(obj)
assert self != obj, 'The circle does not cross itself'
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = LazyComment('crossing point of %s and %s', self.label, obj.label)
if isinstance(obj, CoreScene.Circle):
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.circle_x_circle,
circle0=self, circle1=obj, **kwargs
)
else:
crossing = CoreScene.Point(
self.scene,
CoreScene.Point.Origin.circle_x_line,
circle=self, line=obj, **kwargs
)
crossing.belongs_to(self)
crossing.belongs_to(obj)
return crossing
def __contains__(self, obj):
if obj is None:
return False
if isinstance(obj, CoreScene.Point):
return obj in self.all_points
assert False, 'Operator not defined for %s and Circle' % type(obj)
class Vector(Figure):
def __init__(self, start, end):
assert isinstance(start, CoreScene.Point)
assert isinstance(end, CoreScene.Point)
assert start.scene == end.scene
self.start = start
self.end = end
self.points = (start, end)
self.__segment = None
@property
def as_segment(self):
if self.__segment is None:
self.__segment = self.start.segment(self.end)
return self.__segment
def angle(self, other):
angle = self.scene._get_angle(self, other)
if not self.scene.is_frozen:
for vec in (self, other):
for cnstr in vec.scene.constraints(Constraint.Kind.not_equal):
if set(cnstr.params) == set(vec.points):
break
else:
vec.as_segment.non_zero_length_constraint(comment=Comment(
'$%{vector:side}$ is side of $%{angle:angle}$',
{'side': vec, 'angle': angle}
))
return angle
@property
def scene(self):
return self.start.scene
@property
def reversed(self):
return self.end.vector(self.start)
def parallel_constraint(self, vector, **kwargs):
"""
Self and vector have the same direction.
This constraint also fulfilled if at least one of the vectors has zero length.
"""
assert isinstance(vector, CoreScene.Vector)
assert self.scene == vector.scene
return self.scene.constraint(Constraint.Kind.parallel_vectors, self, vector, **kwargs)
def __str__(self):
return '%s %s' % (self.start, self.end)
def _get_segment(self, point0, point1):
assert isinstance(point0, CoreScene.Point)
assert isinstance(point1, CoreScene.Point)
assert point0.scene == self
assert point1.scene == self
key = frozenset([point0, point1])
#key = (point0, point1)
segment = self.__segments.get(key)
if segment is None:
segment = CoreScene.Segment(point0, point1)
self.__segments[key] = segment
return segment
class Segment(Figure):
def __init__(self, pt0, pt1):
self.points = (pt0, pt1)
self.point_set = frozenset(self.points)
self.__middle_point = None
@property
def scene(self):
return self.points[0].scene
def middle_point(self, **kwargs):
"""
Constructs middle point of the segment
"""
if self.__middle_point:
return self.__middle_point.with_extra_args(**kwargs)
delta = self.points[0].vector(self.points[1])
coef = divide(1, 2)
for pt in self.scene.points():
if pt.origin == CoreScene.Point.Origin.translated:
if pt.base == self.points[0] and pt.delta == delta and pt.coef == coef:
middle = pt
break
if pt.base == self.points[1] and pt.delta == delta.reversed and pt.coef == coef:
middle = pt
break
else:
middle = CoreScene.Point(
self.scene, CoreScene.Point.Origin.translated,
base=self.points[0], delta=delta, coef=coef, **kwargs
)
middle.collinear_constraint(*self.points, guaranteed=True)
from .property import MiddleOfSegmentProperty
self.scene.add_property(MiddleOfSegmentProperty(middle, self))
self.__middle_point = middle
return middle
def free_point(self, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment('point on segment $%{segment:seg}$', {'seg': self})
point = self.line_through(layer='auxiliary').free_point(**kwargs)
point.inside_constraint(self)
return point
def line_through(self, **kwargs):
return self.points[0].line_through(self.points[1], **kwargs)
def perpendicular_bisector_line(self, **kwargs):
"""
Perpendicular bisector
"""
middle = self.middle_point(layer='auxiliary')
line = self.line_through(layer='auxiliary')
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'perpendicular bisector of $%{segment:seg}$',
{'seg': self}
)
bisector = middle.perpendicular_line(line, **kwargs)
comment=Comment(
'$%{line:bisector}$ is the perpendicular bisector of $%{segment:seg}$',
{'bisector': bisector, 'seg': self}
)
bisector.perpendicular_constraint(line, comment=comment)
return bisector
def perpendicular_constraint(self, other, **kwargs):
"""
self ⟂ other
"""
for cnstr in self.scene.constraints(Constraint.Kind.perpendicular):
if set(cnstr.params) == {self, other}:
cnstr.update(kwargs)
return
self.scene.constraint(Constraint.Kind.perpendicular, self, other, **kwargs)
def ratio_constraint(self, segment, coef, **kwargs):
"""
|self| == |segment| * coef
coef is a non-zero number
"""
assert isinstance(segment, CoreScene.Segment)
assert self.scene == segment.scene
assert coef != 0
for cnstr in self.scene.constraints(Constraint.Kind.length_ratio):
if set(cnstr.params) == {self, segment, coef}:
cnstr.update(kwargs)
return
comment = kwargs.get('comment')
if not comment:
kwargs = dict(kwargs)
if coef == 1:
pattern = '$|%{segment:seg0}| = |%{segment:seg1}|$'
else:
pattern = '$|%{segment:seg0}| = %{multiplier:coef} |%{segment:seg1}|$'
kwargs['comment'] = Comment(
pattern, {'seg0': self, 'seg1': segment, 'coef': coef}
)
return self.scene.constraint(Constraint.Kind.length_ratio, self, segment, coef, **kwargs)
def congruent_constraint(self, segment, **kwargs):
"""
|self| == |vector|
"""
self.ratio_constraint(segment, 1, **kwargs)
def non_zero_length_constraint(self, **kwargs):
"""
|self| > 0
"""
self.points[0].not_equal_constraint(self.points[1], **kwargs)
def length_constraint(self, length, **kwargs):
"""
|self| == length
"""
if length > 0:
self.non_zero_length_constraint(**kwargs)
#TODO: equal_constraint otherwise?
self.scene.constraint(Constraint.Kind.distance, self, length, **kwargs)
def __str__(self):
return '%s %s' % self.points
def _get_angle(self, vector0, vector1):
assert isinstance(vector0, CoreScene.Vector)
assert isinstance(vector1, CoreScene.Vector)
assert vector0.scene == self
assert vector1.scene == self
key = frozenset([vector0, vector1])
angle = self.__angles.get(key)
if angle is None:
angle = CoreScene.Angle(vector0, vector1)
if angle.vertex is None and angle.pseudo_vertex:
if angle.vectors[0].end == angle.vectors[1].start:
from .property import SumOfTwoAnglesProperty
#TODO add comment
self.add_property(SumOfTwoAnglesProperty(
angle, angle.vectors[0].reversed.angle(angle.vectors[1]), 180
))
elif angle.vectors[0].start == angle.vectors[1].end:
from .property import SumOfTwoAnglesProperty
#TODO add comment
self.add_property(SumOfTwoAnglesProperty(
angle, angle.vectors[0].angle(angle.vectors[1].reversed), 180
))
elif angle.vectors[0].end == angle.vectors[1].end:
#TODO vertical angles
pass
self.__angles[key] = angle
return angle
class Angle(Figure):
def __init__(self, vector0, vector1):
assert vector0 != vector1 and vector0 != vector1.reversed
self.vectors = (vector0, vector1)
self.vertex = vector0.start if vector0.start == vector1.start else None
if self.vertex:
self.pseudo_vertex = self.vertex
else:
self.pseudo_vertex = next((p for p in vector0.points if p in vector1.points), None)
self.point_set = frozenset([*vector0.points, *vector1.points])
self.__bisector = None
@property
def scene(self):
return self.vectors[0].scene
@property
def endpoints(self):
assert self.vertex, 'Cannot locate endpoints of angle with no vertex'
return (self.vectors[0].end, self.vectors[1].end)
def bisector_line(self, **kwargs):
assert self.pseudo_vertex, 'Cannot construct bisector of angle %s with no vertex' % self
if self.__bisector:
return self.__bisector.with_extra_args(**kwargs)
v = self.pseudo_vertex
vec0 = self.vectors[0]
e0 = vec0.end if v == vec0.start else v.translated_point(vec0, layer='invisible')
vec1 = self.vectors[1]
e1 = vec1.end if v == vec1.start else v.translated_point(vec1, layer='invisible')
circle = v.circle_through(e0, layer='invisible')
line = v.line_through(e1, layer='invisible')
X = circle.intersection_point(line, layer='invisible')
v.same_direction_constraint(X, e1)
Y = X.translated_point(v.vector(e0), layer='invisible')
self.point_on_bisector_constraint(Y, guaranteed=True)
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment('bisector of $%{angle:angle}$', {'angle': self})
self.__bisector = v.line_through(Y, **kwargs)
return self.__bisector
def point_on_bisector_constraint(self, point, **kwargs):
bisector = self.pseudo_vertex.vector(point)
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'$%{ray:bisector}$ is the bisector of $%{angle:angle}$',
{'bisector': bisector, 'angle': self}
)
angle0 = self.vectors[0].angle(bisector)
angle1 = self.vectors[1].angle(bisector)
if self.vertex:
point.inside_constraint(self, **kwargs)
self.ratio_constraint(angle0, 2, **kwargs)
self.ratio_constraint(angle1, 2, **kwargs)
angle0.ratio_constraint(angle1, 1, **kwargs)
def ratio_constraint(self, angle, ratio, **kwargs):
# self = angle * ratio
self.scene.assert_angle(angle)
self.scene.constraint(Constraint.Kind.angles_ratio, self, angle, ratio, **kwargs)
def value_constraint(self, degree, **kwargs):
if kwargs.get('comment') is None:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'$%{anglemeasure:angle} = %{degree:degree}$',
{'angle': self, 'degree': degree}
)
self.scene.constraint(Constraint.Kind.angle_value, self, degree, **kwargs)
def is_acute_constraint(self, **kwargs):
self.scene.constraint(Constraint.Kind.acute_angle, self, **kwargs)
def is_obtuse_constraint(self, **kwargs):
self.scene.constraint(Constraint.Kind.obtuse_angle, self, **kwargs)
def is_right_constraint(self, **kwargs):
self.vectors[0].as_segment.line_through().perpendicular_constraint(
self.vectors[1].as_segment.line_through(),
**kwargs
)
def __str__(self):
if self.vertex:
return '\\angle %s %s %s' % (self.vectors[0].end, self.vertex, self.vectors[1].end)
return '\\angle(%s, %s)' % self.vectors
class Triangle(Figure):
def __init__(self, pt0, pt1, pt2):
self.points = (pt0, pt1, pt2)
self.__sides = None
self.__angles = None
self.__permutations = None
@property
def scene(self):
return self.points[0].scene
@property
def is_equilateral(self):
for cnstr in self.scene.constraints(Constraint.Kind.equilateral):
if set(cnstr.params[0].points) == set(self.points):
return True
# TODO: check implicit equilateral constraints, e.g. congruency of sides
return False
@property
def sides(self):
if self.__sides is None:
self.__sides = (
self.points[1].segment(self.points[2]),
self.points[0].segment(self.points[2]),
self.points[0].segment(self.points[1])
)
return self.__sides
@property
def angles(self):
if self.__angles is None:
self.__angles = (
self.points[0].angle(self.points[1], self.points[2]),
self.points[1].angle(self.points[0], self.points[2]),
self.points[2].angle(self.points[0], self.points[1])
)
return self.__angles
@property
def permutations(self):
if self.__permutations is None:
self.__permutations = (
(self.points[0], self.points[1], self.points[2]),
(self.points[0], self.points[2], self.points[1]),
(self.points[1], self.points[0], self.points[2]),
(self.points[1], self.points[2], self.points[0]),
(self.points[2], self.points[0], self.points[1]),
(self.points[2], self.points[1], self.points[0])
)
return self.__permutations
def __str__(self):
return '\\bigtriangleup %s %s %s' % self.points
class Polygon(Figure):
def __init__(self, *points):
self.points = tuple(points)
self.__sides = None
self.__angles = None
def __str__(self):
return ' '.join(['%s'] * len(self.points)) % self.points
@property
def scene(self):
return self.points[0].scene
@property
def sides(self):
if self.__sides is None:
pts = self.points
self.__sides = tuple(p0.segment(p1) for (p0, p1) in zip(pts, pts[1:] + (pts[0], )))
return self.__sides
@property
def angles(self):
if self.__angles is None:
pts = self.points + self.points[:2]
self.__angles = tuple(pts[i + 1].angle(pts[i], pts[i + 2]) for i in range(0, len(self.points)))
return self.__angles
def __init__(self):
self.__objects = []
self.validation_constraints = []
self.adjustment_constraints = []
self.__properties = set()
self.__frozen = False
self.__angles = {} # {vector, vector} => angle
self.__segments = {} # {point, point} => angle
def add_property(self, prop):
if prop not in self.__properties:
self.__properties.add(prop)
@property
def properties(self):
return list(self.__properties)
def constraint(self, kind, *args, **kwargs):
cns = Constraint(kind, self, *args, **kwargs)
if not self.__frozen:
if kind.stage == Stage.validation:
self.validation_constraints.append(cns)
else:
self.adjustment_constraints.append(cns)
return cns
def equilateral_constraint(self, triangle, **kwargs):
if 'comment' not in kwargs:
kwargs = dict(kwargs)
kwargs['comment'] = Comment(
'$%{triangle:equilateral}$ is equilateral',
{'equilateral': triangle}
)
self.constraint(Constraint.Kind.equilateral, triangle, **kwargs)
from .property import EquilateralTriangleProperty
self.add_property(EquilateralTriangleProperty(triangle))
def quadrilateral_constraint(self, A, B, C, D, **kwargs):
"""
ABDC is a quadrilateral.
I.e., the polygonal chain ABCD does not cross itself and contains no 180º angles.
"""
self.constraint(Constraint.Kind.quadrilateral, A, B, C, D, **kwargs)
def convex_polygon_constraint(self, *points, **kwargs):
"""
*points (in given order) is a convex polygon.
"""
assert len(points) > 3
self.constraint(Constraint.Kind.convex_polygon, points, **kwargs)
def points(self, max_layer='invisible'):
return [p for p in self.__objects if isinstance(p, CoreScene.Point) and p.layer in CoreScene.layers_by(max_layer)]
def lines(self, max_layer='invisible'):
return [l for l in self.__objects if isinstance(l, CoreScene.Line) and l.layer in CoreScene.layers_by(max_layer)]
def circles(self, max_layer='invisible'):
return [c for c in self.__objects if isinstance(c, CoreScene.Circle) and c.layer in CoreScene.layers_by(max_layer)]
def constraints(self, kind):
if kind.stage == Stage.validation:
return [cnstr for cnstr in self.validation_constraints if cnstr.kind == kind]
else:
return [cnstr for cnstr in self.adjustment_constraints if cnstr.kind == kind]
def assert_type(self, obj, *args):
assert isinstance(obj, args), 'Unexpected type %s' % type(obj)
assert obj.scene == self
def assert_point(self, obj):
self.assert_type(obj, CoreScene.Point)
def assert_line(self, obj):
self.assert_type(obj, CoreScene.Line)
def assert_line_or_circle(self, obj):
self.assert_type(obj, CoreScene.Line, CoreScene.Circle)
def assert_vector(self, obj):
self.assert_type(obj, CoreScene.Vector)
def assert_segment(self, obj):
self.assert_type(obj, CoreScene.Segment)
def assert_angle(self, obj):
self.assert_type(obj, CoreScene.Angle)
def free_point(self, **kwargs):
return CoreScene.Point(self, origin=CoreScene.Point.Origin.free, **kwargs)
def existing_line(self, point0, point1):
for cnstr in self.constraints(Constraint.Kind.not_equal):
if {point0, point1} == {*cnstr.params}:
break
else:
return None
for line in self.lines():
if point0 in line and point1 in line:
return line
return None
def add(self, obj: Object):
if not self.__frozen:
self.__objects.append(obj)
def get(self, label: str):
for obj in self.__objects:
if obj.label == label or label in obj.extra_labels:
return obj
return None
def freeze(self):
self.__frozen = True
def unfreeze(self):
self.__frozen = False
@property
def is_frozen(self):
return self.__frozen
def dump(self, include_constraints=False, max_layer='auxiliary'):
print('Objects:')
print('\n'.join(['\t' + obj.description for obj in self.__objects if obj.layer in CoreScene.layers_by(max_layer)]))
counts = [len([o for o in self.__objects if o.layer == layer]) for layer in ('user', 'auxiliary', 'invisible')]
print('Total: %s objects (+ %s auxiliary, %s invisible)' % tuple(counts))
if include_constraints:
if self.validation_constraints:
print('\nValidation constraints:')
print('\n'.join(['\t' + str(cnstr) for cnstr in self.validation_constraints]))
if self.adjustment_constraints:
print('\nAdjustment constraints:')
print('\n'.join(['\t' + str(cnstr) for cnstr in self.adjustment_constraints]))
class Stage(Enum):
validation = auto()
adjustment = auto()
class Constraint:
@unique
class Kind(Enum):
not_equal = ('not_equal', Stage.validation, CoreScene.Point, CoreScene.Point)
not_collinear = ('not_collinear', Stage.validation, CoreScene.Point, CoreScene.Point, CoreScene.Point)
collinear = ('collinear', Stage.adjustment, CoreScene.Point, CoreScene.Point, CoreScene.Point)
opposite_side = ('opposite_side', Stage.validation, CoreScene.Point, CoreScene.Point, CoreScene.Line)
same_side = ('same_side', Stage.validation, CoreScene.Point, CoreScene.Point, CoreScene.Line)
same_direction = ('same_direction', Stage.validation, CoreScene.Point, CoreScene.Point, CoreScene.Point)
inside_segment = ('inside_segment', Stage.validation, CoreScene.Point, CoreScene.Segment)
inside_angle = ('inside_angle', Stage.validation, CoreScene.Point, CoreScene.Angle)
quadrilateral = ('quadrilateral', Stage.validation, CoreScene.Point, CoreScene.Point, CoreScene.Point, CoreScene.Point)
equilateral = ('equilateral', Stage.adjustment, CoreScene.Triangle)
convex_polygon = ('convex_polygon', Stage.validation, List[CoreScene.Point])
distance = ('distance', Stage.adjustment, CoreScene.Vector, int)
length_ratio = ('length_ratio', Stage.adjustment, CoreScene.Segment, CoreScene.Segment, int)
parallel_vectors = ('parallel_vectors', Stage.adjustment, CoreScene.Vector, CoreScene.Vector)
angles_ratio = ('angles_ratio', Stage.adjustment, CoreScene.Angle, CoreScene.Angle, int)
perpendicular = ('perpendicular', Stage.adjustment, CoreScene.Segment, CoreScene.Segment)
acute_angle = ('acute_angle', Stage.validation, CoreScene.Angle)
obtuse_angle = ('obtuse_angle', Stage.validation, CoreScene.Angle)
angle_value = ('angle_value', Stage.adjustment, CoreScene.Angle, int)
def __init__(self, name, stage, *params):
self.stage = stage
self.params = params
def __init__(self, kind, scene, *args, **kwargs):
assert isinstance(kind, Constraint.Kind)
assert len(args) == len(kind.params)
self.params = []
for (arg, knd) in zip(args, kind.params):
if knd == List[CoreScene.Point]:
knd = knd.__origin__
if issubclass(knd, CoreScene.Object):
if isinstance(arg, str):
arg = scene.get(arg)
scene.assert_type(arg, knd)
elif issubclass(knd, List):
# TODO: check element types
assert isinstance(arg, (list, tuple))
# TODO: restore other parameters type check
#else:
# assert isinstance(arg, knd)
self.params.append(arg)
self.kind = kind
self.comment = None
self.update(kwargs)
def update(self, kwargs):
self.__dict__.update(kwargs)
def __str__(self):
params = [para.label if isinstance(para, CoreScene.Object) else str(para) for para in self.params]
extras = dict(self.__dict__)
del extras['kind']
del extras['params']
del extras['comment']
if self.comment:
return 'Constraint(%s) %s %s (%s)' % (self.kind.name, params, self.comment, extras)
else:
return 'Constraint(%s) %s (%s)' % (self.kind.name, params, extras)
| 42.064781 | 139 | 0.549899 | 4,943 | 46,103 | 5 | 0.064536 | 0.026219 | 0.01845 | 0.018774 | 0.41934 | 0.351892 | 0.316407 | 0.271859 | 0.225855 | 0.206393 | 0 | 0.007902 | 0.343969 | 46,103 | 1,095 | 140 | 42.103196 | 0.809192 | 0.042795 | 0 | 0.294118 | 0 | 0 | 0.063666 | 0.001593 | 0 | 0 | 0 | 0.001826 | 0.070358 | 1 | 0.128028 | false | 0.001153 | 0.016148 | 0.024221 | 0.272203 | 0.008074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e2b6f738581b2742bd1ca8b2f9482692264a225 | 345 | py | Python | while_example.py | ISE2012/ch5 | cce470d418419bff0c9d37233755ee39e1362a27 | [
"MIT"
] | null | null | null | while_example.py | ISE2012/ch5 | cce470d418419bff0c9d37233755ee39e1362a27 | [
"MIT"
] | null | null | null | while_example.py | ISE2012/ch5 | cce470d418419bff0c9d37233755ee39e1362a27 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 10:08:03 2020
@author: ucobiz
"""
num = 0 # we have to initialize num to zero
while num <= 0: # so that we can use it here
num = int(input("Enter a positive number: "))
# the while loop has exited b/c num is positive
print("Thank you. The number you chose is:", num)
| 21.5625 | 51 | 0.611594 | 59 | 345 | 3.576271 | 0.762712 | 0.037915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059289 | 0.266667 | 345 | 15 | 52 | 23 | 0.774704 | 0.527536 | 0 | 0 | 0 | 0 | 0.448529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e2d130d98704094b0d5ff31b8c5866899ab9c1f | 12,633 | py | Python | entry/light_menu.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | entry/light_menu.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | entry/light_menu.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | import math
import tkinter as tk
from tkinter import ttk, messagebox
from tkinter.constants import X, Y
from ttkthemes import ThemedStyle
class PickLight():
def __init__(self, root, widg):
self.root = root
self.widg = widg
self.x = self.root.winfo_pointerx()
self.y = self.root.winfo_pointery()
def open_light_win(self):
x_offset = -340
y_offset = 20
self.light_win = tk.Toplevel(self.widg)
style = ThemedStyle(self.root)
style.theme_use("equilux")
self.light_win.configure(bg=style.lookup('TLabel', 'background'))
self.light_win.wm_overrideredirect(1)
self.light_win.wm_geometry(f"+{self.x + x_offset}+{self.y + y_offset}")
self.pfont = ('Papyrus', '14')
self.light_win.focus_set()
self.light_win.focus_force()
self.light_win.attributes('-topmost', True)
border_frame = ttk.Frame(master=self.light_win, borderwidth=2, relief='sunken')
border_frame.grid(row=0, column=0, padx=20, pady=20)#, ipadx=10, ipady=10)
btn_close = tk.Button(master=border_frame, text="X", command=self.light_win.destroy, bg='gray18', fg='gray70', activebackground='red3', bd=0, relief='sunken', font=('Papyrus', '8'), width=2, height=1, anchor='center')
btn_close.grid(row=0, column=1, sticky='e', padx=5)
lbl_light_shape = ttk.Label(master=border_frame, text="Shape", font=self.pfont)
lbl_light_shape.grid(row=1, column=0, sticky='w')
shape_list = [
'Square',
'Circle',
'Cone',
'Line',
'Ring'
]
self.cbx_light_shape = ttk.Combobox(master=border_frame, values=shape_list, width=18, state='readonly')
self.cbx_light_shape.grid(row=1, column=1, sticky='w', padx=5)
self.cbx_light_shape.bind("<<ComboboxSelected>>", self.shape_select)
lbl_size = ttk.Label(master=border_frame, text="Size", font=self.pfont)
lbl_size.grid(row=2, column=0, sticky='w')
self.cbx_light_size = ttk.Combobox(master=border_frame, width=18, state='readonly')
self.cbx_light_size.grid(row=2, column=1, sticky='w', padx=5)
lbl_angle = ttk.Label(master=border_frame, text="Angle from North", font=self.pfont)
lbl_angle.grid(row=3, column=0, sticky='w')
self.cbx_light_angle = ttk.Combobox(master=border_frame, width=18, state='readonly')
self.cbx_light_angle.grid(row=3, column=1, sticky='w', padx=5)
self.btn_confirm = ttk.Button(master=border_frame, text="Confirm")
self.btn_confirm.grid(row=4, column=0, columnspan=2, pady=5)
def shape_select(self, event):
shape = self.cbx_light_shape.get()
len_list = []
angle_list = []
if shape == 'Square':
for i in range(5, 125, 5):
len_list.append(i)
elif shape == 'Circle' or shape == 'Ring':
len_list = [
10,
15,
20,
30,
40,
50,
60,
90,
100,
120
]
elif shape == 'Cone':
for i in range(5, 105, 5):
len_list.append(i)
angle_list = [
'N',
'NE',
'E',
'SE',
'S',
'SW',
'W',
'NW'
]
elif shape == 'Line':
for i in range(5, 305, 5):
len_list.append(i)
for i in range(0, 361, 15):
angle_list.append(i)
self.cbx_light_size.config(values=len_list)
self.cbx_light_size.set('')
self.cbx_light_angle.config(values=angle_list)
self.cbx_light_angle.set(''),
def collect(self):
shape = self.cbx_light_shape.get()
size = self.cbx_light_size.get()
angle = self.cbx_light_angle.get()
offset_array = []
if shape == '':
return
if size == '':
return
size = int(int(size) / 5)
if shape == 'Square':
offset_array = self.fill_square(size)
elif shape == 'Circle':
points = self.fill_circle(size)
offset_array = self.points_to_offsets(points)
elif shape == 'Ring':
#points = self.get_ring_8th(size)
#points = self.brute_ring(size)
points = self.no_fill_circle(size)
offset_array = self.points_to_offsets(points)
elif shape == 'Line':
end_x, end_y = self.find_endpoint(size, angle)
points = self.draw_line(0, 0, end_x, end_y)
offset_array = self.points_to_offsets(points)
elif shape == 'Cone':
points = self.draw_cone(size, angle)
offset_array = self.points_to_offsets(points)
else:
return
return offset_array, shape
def find_endpoint(self, size, angle):
angle_diff = 0
octant = 1
if angle == '':
return
angle = int(angle)
if angle == 0 or angle == 360:
octant = 0
end_x = size
end_y = 0
elif angle == 180:
octant = 0
end_x = -size
end_y = 0
if octant > 0:
if angle > 45 and angle < 90:
angle_diff = 45
octant = 2
elif angle >= 90 and angle < 135:
angle_diff = 90
octant = 3
elif angle >= 135 and angle < 180:
angle_diff = 135
octant = 4
elif angle >= 180 and angle < 225:
angle_diff = 180
octant = 5
elif angle >= 225 and angle < 270:
angle_diff = 225
octant = 6
elif angle >= 270 and angle < 315:
angle_diff = 270
octant = 7
elif angle >= 315 and angle < 360:
angle_diff = 315
octant = 8
angle -= angle_diff
# Flip odd octant angles to simplify math
if octant % 2 == 0:
angle = abs(angle - 45)
short_leg = int((angle * size) / 45)
if octant == 1:
end_x = size
end_y = short_leg
elif octant == 2:
end_x = short_leg
end_y = size
elif octant == 3:
end_x = -short_leg
end_y = size
elif octant == 4:
end_x = -size
end_y = short_leg
elif octant == 5:
end_x = -size
end_y = -short_leg
elif octant == 6:
end_x = -short_leg
end_y = -size
elif octant == 7:
end_x = short_leg
end_y = -size
elif octant == 8:
end_x = size
end_y = -short_leg
return end_x, end_y
def points_to_offsets(self, points):
pos = [0,0]
offsets = []
for point in points:
dist = [point[0]-pos[0], point[1]-pos[1]]
offsets.append(dist)
pos = point
return offsets
def fill_square(self, size):
points = []
col = 1
area = int(size**2)
for i in range(1, area):
if col < size:
points.append((1,0))
col += 1
elif col == size:
points.append((-1 * (col-1), 1))
col = 1
return points
def fill_circle(self, r, center=[0.5, 0.5]):
top = int(center[1] - r)
bottom = int(center[1] + r)
points = []
for y in range(top, bottom+1):
dy = y - center[1]
dx = math.sqrt(r*r - dy*dy)
left = math.ceil(center[0] - dx)
right = math.floor(center[0] + dx)
for x in range(left, right+1):
points.append([x,y])
return points
def no_fill_circle(self, r):
points = []
y = 1
x = r
while x > y:
dy = y - 0.5
dx = math.sqrt(r*r - dy*dy)
left = math.ceil(0.5 - dx)
right = math.floor(0.5 + dx)
points.extend(self.transform_no_fill(left, y))
points.extend(self.transform_no_fill(right, y))
y += 1
return points
def transform_no_fill(self, x, y):
x = int(x)
y = int(y)
return [
( x, y),
(1-y, x),
(1-x, 1-y),
( y, 1-x)
]
def draw_line(self, x1, y1, x2, y2):
points = []
# undef is for a vertical line
undef = False
small_slope = True
m_error = 0
if x1 > x2:
start_x = x2
start_y = y2
end_x = x1
end_y = y1
x1 = start_x
x2 = end_x
y1 = start_y
y2 = end_y
elif x1 == x2:
undef = True
if y1 > y2:
start_x = x2
start_y = y2
end_x = x1
end_y = y1
x1 = start_x
x2 = end_x
y1 = start_y
y2 = end_y
if not undef:
dx = x2 - x1
dy = y2 - y1
m = dy / dx
if m > 1 or m < -1:
small_slope = False
if m < -1:
start_x = x2
start_y = y2
end_x = x1
end_y = y1
x1 = start_x
x2 = end_x
y1 = start_y
y2 = end_y
if small_slope:
y = y1
if m >= 0:
for x in range(x1, x2+1):
points.append([x, y])
m_error += dy
if (m_error * 2) >= dx:
y += 1
m_error -= dx
else:
for x in range(x1, x2+1):
points.append([x, y])
if (m_error + m) > -0.5:
m_error += m
else:
y -= 1
m_error = m_error + m + 1
else:
x = x1
if m > 0:
for y in range(y1, y2+1):
points.append([x, y])
m_error += dx
if (m_error * 2) >= dy:
x += 1
m_error -= dy
else:
m = 1/m
for y in range(y1, y2+1):
points.append([x, y])
if (m_error + m) > -0.5:
m_error += m
else:
x -= 1
m_error = m_error + m + 1
else:
x = x1
for y in range(y1, y2+1):
points.append([x, y])
return points
def draw_cone(self, size, dir):
points = []
if len(dir) == 1:
xl1 = 0
xl2 = 1
for y in range(1, size+1):
if y % 2 == 1 and y > 1:
xl1 -= 1
xl2 += 1
for x in range(xl1, xl2):
if dir == 'N':
points.append([x, -y])
elif dir == 'S':
points.append([x, y])
elif dir == 'W':
points.append([-y, x])
elif dir == 'E':
points.append([y, x])
else:
height = size
for x in range(1, size+1):
for y in range(1, height+1):
if dir == 'NE':
points.append([x, -y])
elif dir == 'SE':
points.append([x, y])
elif dir == 'SW':
points.append([-x, y])
elif dir == 'NW':
points.append([-x, -y])
height -= 1
return points
def escape(self):
self.light_win.destroy()
def GenLightWin(root, widg):
light_win = PickLight(root, widg)
return light_win | 32.392308 | 225 | 0.432676 | 1,490 | 12,633 | 3.520134 | 0.14698 | 0.01449 | 0.034318 | 0.032031 | 0.361868 | 0.310391 | 0.236797 | 0.199047 | 0.182841 | 0.134223 | 0 | 0.048634 | 0.461252 | 12,633 | 390 | 226 | 32.392308 | 0.72201 | 0.012032 | 0 | 0.314607 | 0 | 0 | 0.024205 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039326 | false | 0 | 0.014045 | 0 | 0.092697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e2ed5d13acdd2a38212da944c819fc33a4a6530 | 11,151 | py | Python | tests/ozpcenter_api/test_api_library.py | emosher/ozp-backend | d31d00bb8a28a8d0c999813f616b398f41516244 | [
"Apache-2.0"
] | 1 | 2018-10-05T17:03:01.000Z | 2018-10-05T17:03:01.000Z | tests/ozpcenter_api/test_api_library.py | emosher/ozp-backend | d31d00bb8a28a8d0c999813f616b398f41516244 | [
"Apache-2.0"
] | 1 | 2017-01-06T19:20:32.000Z | 2017-01-06T19:20:32.000Z | tests/ozpcenter_api/test_api_library.py | emosher/ozp-backend | d31d00bb8a28a8d0c999813f616b398f41516244 | [
"Apache-2.0"
] | 7 | 2016-12-16T15:42:05.000Z | 2020-09-05T01:11:27.000Z | import datetime
import pytz
from django.test import override_settings
from rest_framework import status
from ozpcenter import model_access as generic_model_access
from ozpcenter.scripts import sample_data_generator as data_gen
from tests.ozp.cases import APITestCase
from tests.ozpcenter.helper import APITestHelper
@override_settings(ES_ENABLED=False)
class LibraryApiTest(APITestCase):
@classmethod
def setUpTestData(cls):
data_gen.run()
def setUp(self):
pass
def test_get_library(self):
url = '/api/library/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
self.assertIsNotNone(response.data)
def test_create_library(self):
# Listing is Enabled
response = APITestHelper.create_bookmark(self, 'wsmith', 1, folder_name='', status_code=201)
self.assertEqual(response.data['listing']['id'], 1)
# Disable Listing
APITestHelper.edit_listing(self, 1, {'is_enabled': False}, 'wsmith')
# POST to /self/library after listing disabled
response = APITestHelper.create_bookmark(self, 'wsmith', 1, folder_name='', status_code=400)
# Enabled Listing
APITestHelper.edit_listing(self, 1, {'is_enabled': True}, 'wsmith')
# POST to /self/library after listing disabled
response = APITestHelper.create_bookmark(self, 'wsmith', 1, folder_name='', status_code=201)
self.assertEqual(response.data['listing']['id'], 1)
def test_get_library_list(self):
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
# import json; print(json.dumps(shorthand_types(response.data), indent=2))
self.assertEqual(10, len(response.data))
self.assertIn('listing', response.data[0])
self.assertIn('id', response.data[0]['listing'])
self.assertIn('title', response.data[0]['listing'])
self.assertIn('unique_name', response.data[0]['listing'])
self.assertIn('folder', response.data[0])
def test_get_library_self_when_listing_disabled_enabled(self):
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
listing_ids = [record['listing']['id'] for record in response.data]
first_listing_id = listing_ids[0] # Should be 2
self.assertEqual([2, 23, 44, 63, 10, 77, 81, 101, 9, 147], listing_ids, 'Comparing Ids #1')
# Disable Listing
APITestHelper.edit_listing(self, first_listing_id, {'is_enabled': False})
# Get Library for current user after listing was disabled
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
listing_ids = [record['listing']['id'] for record in response.data]
self.assertEqual([23, 44, 63, 10, 77, 81, 101, 9, 147], listing_ids, 'Comparing Ids #2')
# Enable Listing
APITestHelper.edit_listing(self, first_listing_id, {'is_enabled': True})
# Get Library for current user after listing was Enable
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
listing_ids = [record['listing']['id'] for record in response.data]
self.assertEqual([2, 23, 44, 63, 10, 77, 81, 101, 9, 147], listing_ids, 'Comparings Ids #3')
def test_get_library_list_listing_type(self):
url = '/api/self/library/?type=Web Application'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
self.assertEqual(4, len(response.data))
self.assertIn('listing', response.data[0])
self.assertIn('id', response.data[0]['listing'])
self.assertIn('title', response.data[0]['listing'])
self.assertIn('unique_name', response.data[0]['listing'])
self.assertIn('folder', response.data[0])
def test_get_library_list_listing_type_empty(self):
url = '/api/self/library/?type=widget'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
self.assertEqual([], response.data)
def test_get_library_pk(self):
url = '/api/self/library/2/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
self.assertIn('listing', response.data)
self.assertIn('id', response.data['listing'])
self.assertIn('title', response.data['listing'])
self.assertIn('unique_name', response.data['listing'])
self.assertIn('folder', response.data)
def test_library_update_all(self):
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
put_data = []
position_count = 0
for i in response.data:
position_count = position_count + 1
data = {'id': i['id'],
'folder': 'test',
'listing': {'id': i['listing']['id']},
'position': position_count
}
put_data.append(data)
url = '/api/self/library/update_all/'
response = APITestHelper.request(self, url, 'PUT', data=put_data, username='wsmith', status_code=200)
self.assertIsNotNone(response)
def _compare_library(self, usernames_list):
usernames_list_actual = {}
for username, ids_list in usernames_list.items():
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username=username, status_code=200)
before_notification_ids = ['{}-{}'.format(entry['listing']['title'], entry['folder']) for entry in response.data]
usernames_list_actual[username] = before_notification_ids
for username, ids_list in usernames_list.items():
before_notification_ids = usernames_list_actual[username]
self.assertEqual(sorted(ids_list), sorted(before_notification_ids), 'Checking for {}'.format(username))
def test_import_bookmarks(self):
# Create notification to share Weater foler from Bigbrother to Julia
now = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=5)
data = {'expires_date': str(now),
'message': 'A Simple Peer to Peer Notification',
'peer': {
'user': {
'username': 'julia',
},
'folder_name': 'Weather'
}}
url = '/api/notification/'
user = generic_model_access.get_profile('bigbrother').user
self.client.force_authenticate(user=user)
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
bookmark_notification1_id = response.data['id']
# Import Bookmarks
results = APITestHelper._import_bookmarks(self, 'julia', bookmark_notification1_id, status_code=201)
# Compare Library for users
user_library_data = {'julia': ['Tornado-Weather',
'Lightning-Weather',
'Snow-Weather']}
self._compare_library(user_library_data)
# Modify Bigbrother's library to add another listing to the weather library
url_lib = '/api/self/library/'
response = APITestHelper.request(self, url_lib, 'GET', username='bigbrother', status_code=200)
put_data = []
position_count = 0
for i in response.data:
if i['id'] is 12:
data = {'id': i['id'],
'folder': "Weather",
'listing': {'id': i['listing']['id']},
'position': position_count
}
put_data.append(data)
else:
data = {'id': i['id'],
'folder': i['folder'],
'listing': {'id': i['listing']['id']},
'position': position_count
}
put_data.append(data)
url_update = '/api/self/library/update_all/'
response = APITestHelper.request(self, url_update, 'PUT', data=put_data, username='bigbrother', status_code=200)
self.assertIsNotNone(response)
# Recreate the notification to send to Julia to share the folder
now = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=5)
data = {'expires_date': str(now),
'message': 'A Simple Peer to Peer Notification',
'peer': {
'user': {
'username': 'julia',
},
'folder_name': 'Weather'
}}
url = '/api/notification/'
user = generic_model_access.get_profile('bigbrother').user
self.client.force_authenticate(user=user)
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
bookmark_notification1_id = response.data['id']
# Import Bookmarks
results = APITestHelper._import_bookmarks(self, 'julia', bookmark_notification1_id, status_code=201)
# Compare Library for users
user_library_data = {'bigbrother': ['Tornado-Weather',
'Lightning-Weather',
'Snow-Weather',
'Wolf Finder-Animals',
'Killer Whale-Animals',
'Lion Finder-Animals',
'Monkey Finder-Animals',
'Parrotlet-Animals',
'White Horse-Animals',
'Electric Guitar-Instruments',
'Acoustic Guitar-Instruments',
'Sound Mixer-Instruments',
'Electric Piano-Instruments',
'Piano-Instruments',
'Violin-Instruments',
'Bread Basket-Weather',
'Informational Book-None',
'Stop sign-None',
'Chain boat navigation-None',
'Gallery of Maps-None',
'Chart Course-None'],
'julia': ['Tornado-Weather',
'Lightning-Weather',
'Snow-Weather',
'Bread Basket-Weather']}
self._compare_library(user_library_data)
| 44.426295 | 125 | 0.565958 | 1,147 | 11,151 | 5.347864 | 0.178727 | 0.058689 | 0.059341 | 0.067819 | 0.72628 | 0.688947 | 0.65569 | 0.611999 | 0.551679 | 0.551679 | 0 | 0.020609 | 0.316833 | 11,151 | 250 | 126 | 44.604 | 0.784589 | 0.058829 | 0 | 0.494565 | 0 | 0 | 0.161989 | 0.010977 | 0 | 0 | 0 | 0 | 0.157609 | 1 | 0.065217 | false | 0.005435 | 0.059783 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e31aafbd6d3baac6fea5ff1008384c247a90db7 | 568 | py | Python | ABC/ABC126/abc126_d.py | yatabis/AtCoder-in-Python3 | cc2948853b549a6b8f39df5685c9e84cda81499d | [
"MIT"
] | null | null | null | ABC/ABC126/abc126_d.py | yatabis/AtCoder-in-Python3 | cc2948853b549a6b8f39df5685c9e84cda81499d | [
"MIT"
] | null | null | null | ABC/ABC126/abc126_d.py | yatabis/AtCoder-in-Python3 | cc2948853b549a6b8f39df5685c9e84cda81499d | [
"MIT"
] | null | null | null | # 問題URL: https://atcoder.jp/contests/abc126/tasks/abc126_d
# 解答URL: https://atcoder.jp/contests/abc126/submissions/14638903
from heapq import heappop, heappush
n = int(input())
a = [[] for _ in range(n)]
for _ in range(n - 1):
u, v, w = map(int, input().split())
a[u - 1].append((v - 1, w))
a[v - 1].append((u - 1, w))
d = [-1] * n
todo = [(0, 0)]
seen = set()
while todo:
e, p = heappop(todo)
if p in seen:
continue
seen.add(p)
d[p] = e
for pi, ei in a[p]:
heappush(todo, (e + ei, pi))
for di in d:
print(di % 2)
| 22.72 | 64 | 0.556338 | 99 | 568 | 3.161616 | 0.454545 | 0.076677 | 0.089457 | 0.140575 | 0.178914 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061176 | 0.251761 | 568 | 24 | 65 | 23.666667 | 0.675294 | 0.209507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e334b9a938fd3c9df3467c8d839369988bb30b6 | 10,029 | py | Python | Codes/utility/load_data.py | CRIPAC-DIG/K-GHRM | 5d73ed701b7753ee402ecfc1dbc4b20c578a4656 | [
"MIT"
] | 3 | 2021-07-19T14:34:07.000Z | 2022-03-31T06:24:38.000Z | Codes/utility/load_data.py | CRIPAC-DIG/K-GHRM | 5d73ed701b7753ee402ecfc1dbc4b20c578a4656 | [
"MIT"
] | null | null | null | Codes/utility/load_data.py | CRIPAC-DIG/K-GHRM | 5d73ed701b7753ee402ecfc1dbc4b20c578a4656 | [
"MIT"
] | 1 | 2021-08-22T07:23:49.000Z | 2021-08-22T07:23:49.000Z | import numpy as np
import random as rd
from time import time
from math import log
import gc
import heapq
import copy
from utility.parser import parse_args
args = parse_args()
class Data(object):
def __init__(self, path, batch_size):
self.batch_size = batch_size
doc_file = path + '/map.documents.txt' # before deduplication
qrl_file = path + '/map.queries.txt'
doc_dict_file = path + '/doc_dict.txt'
qrl_dict_file = path + '/qrl_dict.txt'
# train_file = path + '/train_pairs/f{}.train.pairs'.format(str(args.fold))
# test_file = path + '/valid_run/f{}.valid.run'.format(str(args.fold))
# test_file = path + '/test_run/f{}.test.run'.format(str(args.fold))
fold = [1, 2, 3, 4, 5]
test_file = path + '/%d.run' % fold[args.fold - 1]
valid_file = path + '/%d.run' % fold[args.fold - 2]
train_files = [path + '/%d.run' % fold[i] for i in [args.fold - 3, args.fold - 4, args.fold - 5]]
match_file = path + '/qrels'
doc_unique_words = path + '/doc_word_list_unique.txt' # after deduplication
doc_unique_entities = path + '/doc_ent_list_unique.txt' # after deduplication
qrl_unique_entities = path + '/que_ent_list_unique.txt'
self.n_docs = 0
self.n_qrls = 0
self.n_words = 0
self.n_ents = 0
self.n_train = 0
self.n_test = 0
self.pos_pools = {}
self.neg_pools = {}
# map the original id to numerical id, such as 'LA03421' -> 0
self.doc_dict = {}
self.qrl_dict = {}
self.doc_dict_rev = {}
self.qrl_dict_rev = {}
self.qrl_doc_match = {}
self.doc_word_list = {}
self.qrl_word_list = {}
self.qrl_ent_list = {}
self.doc_unique_word_list = {}
self.doc_unique_ent_list = {}
'''
self.word_freq = {}
self.word_doc_freq = {}
self.word_window_freq = {}
self.doc_word_freq = {}
self.qrl_word_freq = {}
self.word_pair_count = {}
self.num_window = 0
'''
# window_size = 5
# self.windows = []
self.all_neg = []
print('loading entities...', end='', flush=True)
with open(doc_unique_entities) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip().split('\t')
did = int(l[0])
if len(l) > 1:
ents = [int(i) for i in l[1].split()]
self.doc_unique_ent_list[did] = ents
self.n_ents = max(self.n_ents, max(ents))
with open(qrl_unique_entities) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip().split('\t')
did = int(l[0])
if len(l) > 1:
ents = [int(i) for i in l[1].split()]
self.qrl_ent_list[did] = ents
self.n_ents = max(self.n_ents, max(ents))
print('done')
print('loading documents...', end='', flush=True)
with open(doc_file) as f, open(doc_unique_words) as f2:
for l in f.readlines():
if len(l) > 0:
l = l.strip().split('\t')
did = int(l[0])
words = [int(i) for i in l[1].split()]
self.doc_word_list[did] = words
self.n_words = max(self.n_words, max(words))
self.n_docs = max(self.n_docs, did)
for l in f2.readlines():
if len(l)>0:
l = l.strip().split('\t')
did = int(l[0])
words = [int(i) for i in l[1].split()]
self.doc_unique_word_list[did] = words
print('done')
print('loading queries...', end='', flush=True)
with open(qrl_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip().split('\t')
qid = int(l[0])
words = [int(i) for i in l[1].split()]
self.qrl_word_list[qid] = words
self.n_words = max(self.n_words, max(words))
self.n_qrls = max(self.n_qrls, qid)
print('done')
self.n_docs += 1
self.n_qrls += 1
print('query:{} \t document:{} \t word:{} \t entity:{}'.format(self.n_qrls, self.n_docs, self.n_words, self.n_ents))
self.n_words += 1
self.n_ents += 1
print('loading dict...', end='', flush=True)
with open(doc_dict_file) as f1, open(qrl_dict_file) as f2:
for l in f1.readlines():
key, item = l.strip().split('\t')
self.doc_dict[key] = int(item)
self.doc_dict_rev[int(item)] = key
for l in f2.readlines():
key, item = l.strip().split('\t')
self.qrl_dict[key] = int(item) # key is the original id and value is the mapped id starts from 0
self.qrl_dict_rev[int(item)] = key
with open(match_file) as f:
for l in f.readlines():
qrl_key, _, doc_key, score = l.strip().split()
if qrl_key not in self.qrl_dict:
continue
if doc_key not in self.doc_dict:
continue
qrl = self.qrl_dict[qrl_key]
doc = self.doc_dict[doc_key]
if int(score) > 0:
score = '1'
if (qrl, score) in self.qrl_doc_match:
self.qrl_doc_match[(qrl, score)].append(doc)
else:
self.qrl_doc_match[(qrl, score)] = [doc]
print('done')
print('loading train&test set...', end='', flush=True)
self.train_items, self.test_set = {}, {}
for train_file in train_files:
with open(train_file) as f_train:
for l in f_train.readlines():
if len(l) == 0:
break
l = l.strip()
qrl_key, _, doc_key, _, _, _ = l.split()
qrl = self.qrl_dict[qrl_key]
doc = self.doc_dict[doc_key]
if qrl in self.train_items:
self.train_items[qrl].append(doc)
else:
self.train_items[qrl] = [doc]
self.n_train += 1
with open(test_file) as f_test:
for l in f_test.readlines():
if len(l) == 0:
break
l = l.strip()
qrl_key, _, doc_key, _, _, _ = l.split()
qrl = self.qrl_dict[qrl_key]
doc = self.doc_dict[doc_key]
if qrl in self.test_set:
self.test_set[qrl].append(doc)
else:
self.test_set[qrl] = [doc]
self.n_test += 1
print('done')
for qrl in self.train_items.keys():
for doc in self.train_items[qrl]:
if not (qrl, '0') in self.qrl_doc_match:
self.qrl_doc_match[(qrl, '0')] = []
if not (qrl, '1') in self.qrl_doc_match:
self.qrl_doc_match[(qrl, '1')] = []
self.all_neg.append(qrl)
if doc in self.qrl_doc_match[(qrl, '0')] or doc in self.qrl_doc_match[(qrl, '1')]:
continue
else:
self.qrl_doc_match[(qrl, '0')].append(doc)
for q in self.all_neg:
self.train_items.pop(q)
self.positive_pool()
self.negative_pool()
print('init finish!')
def positive_pool(self):
t1 = time()
for q in self.train_items.keys():
self.pos_pools[q] = self.qrl_doc_match[(q, '1')]
print('refresh positive pools', time() - t1)
def negative_pool(self):
t1 = time()
for q in self.train_items.keys():
self.neg_pools[q] = self.qrl_doc_match[(q, '0')]
print('refresh negative pools', time() - t1)
def sample(self):
key_pool = list(self.train_items.keys())
rd.shuffle(key_pool)
if self.batch_size <= len(key_pool):
qrls = rd.sample(key_pool, self.batch_size)
else:
qrls = [rd.choice(key_pool) for _ in range(self.batch_size)]
def sample_pos_docs_for_q_from_pools(q, num):
pos_docs = self.pos_pools[q]
return rd.sample(pos_docs, num)
def sample_neg_docs_for_q_from_pools(q, num):
neg_docs = self.neg_pools[q]
return rd.sample(neg_docs, num)
pos_docs, neg_docs = [], []
for q in qrls:
pos_docs += sample_pos_docs_for_q_from_pools(q, 1)
neg_docs += sample_neg_docs_for_q_from_pools(q, 1)
return qrls, pos_docs, neg_docs
def print_statistics(self):
print('n_docs=%d, n_qrls=%d, n_words=%d' % (self.n_docs, self.n_qrls, self.n_words))
print('n_interactions=%d' % (self.n_train + self.n_test))
print('n_train=%d, n_test=%d, sparsity=%.5f' % (self.n_train, self.n_test, (self.n_train + self.n_test)/(self.n_docs * self.n_qrls)))
| 41.271605 | 444 | 0.466746 | 1,265 | 10,029 | 3.475099 | 0.115415 | 0.044359 | 0.029572 | 0.044359 | 0.457006 | 0.371019 | 0.330528 | 0.284122 | 0.229072 | 0.229072 | 0 | 0.012598 | 0.414299 | 10,029 | 242 | 445 | 41.442149 | 0.735785 | 0.042676 | 0 | 0.325 | 0 | 0 | 0.053666 | 0.008011 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035 | false | 0 | 0.04 | 0 | 0.095 | 0.09 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e33b7d46ae6ef1f9eed0f3bad1e2a967744885c | 4,379 | py | Python | popseg/models/panopticdeeplab.py | sithu31296/panoptic-segmentation | bd3f5bf51729dcf5698959b3a6b53a1da8e2db65 | [
"MIT"
] | 1 | 2021-11-06T09:00:11.000Z | 2021-11-06T09:00:11.000Z | popseg/models/panopticdeeplab.py | sithu31296/panoptic-segmentation | bd3f5bf51729dcf5698959b3a6b53a1da8e2db65 | [
"MIT"
] | 1 | 2021-11-07T11:59:57.000Z | 2021-11-08T14:29:10.000Z | popseg/models/panopticdeeplab.py | sithu31296/panoptic-segmentation | bd3f5bf51729dcf5698959b3a6b53a1da8e2db65 | [
"MIT"
] | null | null | null | import torch
from torch import nn, Tensor
from torch.nn import functional as F
from .modules import Conv, SeparableConv
from .backbones import ResNet
class ASPP(nn.Module):
def __init__(self, c1, c2, drop_rate=0.1):
super().__init__()
ratios = [1, 6, 12, 18]
self.blocks = nn.ModuleList([
Conv(c1, c2, 1 if ratio==1 else 3, 1, 0 if ratio==1 else ratio, ratio)
for ratio in ratios])
self.blocks.append(nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Conv(c1, c2, 1)
))
self.conv = Conv(c2 * (len(ratios) + 1), c2, 1)
self.dropout = nn.Dropout(drop_rate)
def forward(self, x: Tensor) -> Tensor:
contexts = []
for blk in self.blocks:
contexts.append(F.interpolate(blk(x), x.shape[2:], mode='bilinear', align_corners=False))
x = self.conv(torch.cat(contexts, dim=1))
x = self.dropout(x)
return x
class Decoder(nn.Module):
def __init__(self, backbone_channels, aspp_out_channel=256, decoder_channel=256, low_level_channels=[64, 32]):
super().__init__()
self.aspp = ASPP(backbone_channels[-1], aspp_out_channel)
self.conv = Conv(aspp_out_channel, aspp_out_channel, 1)
self.project8 = Conv(backbone_channels[1], low_level_channels[0], 1)
self.fuse8 = SeparableConv(aspp_out_channel + low_level_channels[0], decoder_channel, 5, 1, 2)
self.project4 = Conv(backbone_channels[0], low_level_channels[1], 1)
self.fuse4 = SeparableConv(decoder_channel + low_level_channels[1], decoder_channel, 5, 1, 2)
def forward(self, features: list) -> Tensor:
x = self.aspp(features[-1])
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
feat8 = self.project8(features[1])
x = self.fuse8(torch.cat([x, feat8], dim=1))
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
feat4 = self.project4(features[0])
x = self.fuse4(torch.cat([x, feat4], dim=1))
return x
class SemanticHead(nn.Module):
def __init__(self, decoder_channel, head_channel, num_classes):
super().__init__()
self.conv = nn.Sequential(
SeparableConv(decoder_channel, head_channel, 5, 1, 2),
nn.Conv2d(head_channel, num_classes, 1)
)
def forward(self, x: Tensor) -> Tensor:
return self.conv(x)
class InstanceHead(nn.Module):
def __init__(self, decoder_channel, head_channel):
super().__init__()
self.center_conv = nn.Sequential(
SeparableConv(decoder_channel, head_channel, 5, 1, 2),
nn.Conv2d(head_channel, 1, 1)
)
self.offset_conv = nn.Sequential(
SeparableConv(decoder_channel, head_channel, 5, 1, 2),
nn.Conv2d(head_channel, 2, 1)
)
def forward(self, x: Tensor) -> Tensor:
return self.center_conv(x), self.offset_conv(x)
class PanopticDeepLab(nn.Module):
def __init__(self, variant: str = '50', num_classes: int = 19):
super().__init__()
self.backbone = ResNet(variant)
backbone_channels = [256, 512, 1024]
self.semantic_decoder = Decoder(backbone_channels, 256, 256, [64, 32])
self.instance_decoder = Decoder(backbone_channels, 256, 128, [32, 16])
self.semantic_head = SemanticHead(256, 256, num_classes)
self.instance_head = InstanceHead(128, 32)
def forward(self, x: Tensor) -> Tensor:
features = self.backbone(x)[:-1]
semantic = self.semantic_decoder(features)
instance = self.instance_decoder(features)
semantic = self.semantic_head(semantic)
center, offset = self.instance_head(instance)
semantic = F.interpolate(semantic, x.shape[-2:], mode='bilinear', align_corners=False)
center = F.interpolate(center, x.shape[-2:], mode='bilinear', align_corners=False)
scale = x.shape[-2] // offset.shape[-2]
offset = F.interpolate(offset, x.shape[-2:], mode='bilinear', align_corners=False)
offset *= scale
return semantic, center, offset
if __name__ == '__main__':
model = PanopticDeepLab('50')
x = torch.randn(2, 3, 224, 224)
semantic, center, offset = model(x)
print(semantic.shape)
print(center.shape)
print(offset.shape) | 35.893443 | 114 | 0.631879 | 570 | 4,379 | 4.647368 | 0.184211 | 0.02718 | 0.029445 | 0.04077 | 0.324273 | 0.265006 | 0.244621 | 0.244621 | 0.19026 | 0.12835 | 0 | 0.045632 | 0.239324 | 4,379 | 122 | 115 | 35.893443 | 0.749625 | 0 | 0 | 0.172043 | 0 | 0 | 0.013699 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107527 | false | 0 | 0.053763 | 0.021505 | 0.268817 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e35b131f0e0e93445407cb308ded03d523d37ec | 474 | py | Python | heap/easy/qheap.py | YANG007SUN/data_structure | 481e76eb713dc305409eafe4118121f499357ec6 | [
"Apache-2.0"
] | null | null | null | heap/easy/qheap.py | YANG007SUN/data_structure | 481e76eb713dc305409eafe4118121f499357ec6 | [
"Apache-2.0"
] | null | null | null | heap/easy/qheap.py | YANG007SUN/data_structure | 481e76eb713dc305409eafe4118121f499357ec6 | [
"Apache-2.0"
] | null | null | null | # https://www.hackerrank.com/challenges/qheap1/problem?isFullScreen=true
Q = int(input())
hp = []
for _ in range(Q):
ls = list(map(int, input().split()))
if ls[0]==1:
if len(hp)==0:
hp.append(ls[1])
minv = hp[0]
else:
minv = min(ls[1], minv)
hp.append(ls[1])
elif ls[0]==2:
hp.remove(ls[1])
if ls[1] == minv and hp:
minv = min(hp)
else:
print(minv)
| 23.7 | 72 | 0.466245 | 68 | 474 | 3.235294 | 0.485294 | 0.068182 | 0.095455 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039344 | 0.35654 | 474 | 20 | 73 | 23.7 | 0.681967 | 0.147679 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e369a1097ab91da0411c8e0085e71ff5b62e1cb | 3,087 | py | Python | pygcn/layers.py | linzhongping/fm-gcn | d403eff40c19c7cf7998c894e03b2c45da556ac7 | [
"MIT"
] | 1 | 2020-01-11T12:53:28.000Z | 2020-01-11T12:53:28.000Z | pygcn/layers.py | linzhongping/fm-gcn | d403eff40c19c7cf7998c894e03b2c45da556ac7 | [
"MIT"
] | 11 | 2019-08-06T02:23:32.000Z | 2022-03-11T23:51:20.000Z | pygcn/layers.py | linzhongping/fm-gcn | d403eff40c19c7cf7998c894e03b2c45da556ac7 | [
"MIT"
] | 1 | 2020-01-11T12:53:29.000Z | 2020-01-11T12:53:29.000Z | import math
from torch.nn import Embedding
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from torch.nn import init
from torch.autograd import Variable
torch.random.manual_seed(2070)
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class BI_Intereaction(Module):
def __init__(self, in_features, k_embedding, train_idx):
'''
:param in_features: 输入特征维数
:param k: 单一特征embedding
:param bias:
'''
super(BI_Intereaction, self).__init__()
self.in_features = in_features
self.k_embedding = k_embedding
self.train_idx = train_idx
self.embedding = Embedding(in_features, k_embedding)
# self.weight = Parameter(torch.FloatTensor(in_features,k_embedding))
# self.reset_parameters()
self.init_embedding()
def init_embedding(self):
init.xavier_uniform_(self.embedding.weight)
# print('embedding_init',self.embedding.weight)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def bi_pooling(self, input, embeddings):
output = torch.zeros(input.shape[0], self.k_embedding)
rows, cols = input.shape[0], input.shape[1]
# print(rows,cols)
for _ in self.train_idx:
left = torch.zeros(self.k_embedding)
right = torch.zeros(self.k_embedding)
nonzero_index = torch.nonzero(input[_])
# print(nonzero_index.squeeze(1))
for i in nonzero_index.squeeze(1):
left += torch.mul(embeddings.weight[i] , input[_][i])
right += torch.mul(embeddings.weight[i] , input[_][i]) ** 2
vec = 0.5 * (left ** 2 - right)
del left, right
output[_] = vec
return output
def forward(self, input):
return self.bi_pooling(input,self.embedding)
| 31.5 | 77 | 0.6184 | 374 | 3,087 | 4.882353 | 0.248663 | 0.060241 | 0.038335 | 0.03943 | 0.31161 | 0.230011 | 0.2092 | 0.125958 | 0.086528 | 0.086528 | 0 | 0.01154 | 0.270165 | 3,087 | 97 | 78 | 31.824742 | 0.798935 | 0.101717 | 0 | 0.21875 | 0 | 0 | 0.004056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140625 | false | 0 | 0.109375 | 0.03125 | 0.359375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e3792ff3e2c12900e4deb81ec079584de749ca2 | 1,623 | py | Python | digest/test/test_digest_tools.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | 1 | 2020-10-16T19:30:41.000Z | 2020-10-16T19:30:41.000Z | digest/test/test_digest_tools.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | 15 | 2020-06-18T15:32:06.000Z | 2022-03-03T23:06:24.000Z | digest/test/test_digest_tools.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | import pytest
from digest.digest_tools import Digest, content_path, InvalidDigestException
@pytest.mark.parametrize(
"digest, output_args",
[
("tarsum.v123123+sha1:123deadbeef", ("tarsum.v123123+sha1", "123deadbeef")),
("tarsum.v1+sha256:123123", ("tarsum.v1+sha256", "123123")),
("tarsum.v0+md5:abc", ("tarsum.v0+md5", "abc")),
("tarsum+sha1:abc", ("tarsum+sha1", "abc")),
("sha1:123deadbeef", ("sha1", "123deadbeef")),
("sha256:123123", ("sha256", "123123")),
("md5:abc", ("md5", "abc")),
],
)
def test_parse_good(digest, output_args):
assert Digest.parse_digest(digest) == Digest(*output_args)
assert str(Digest.parse_digest(digest)) == digest
@pytest.mark.parametrize(
"bad_digest",
[
"tarsum.v+md5:abc:",
"sha1:123deadbeefzxczxv",
"sha256123123",
"tarsum.v1+",
"tarsum.v1123+sha1:",
],
)
def test_parse_fail(bad_digest):
with pytest.raises(InvalidDigestException):
Digest.parse_digest(bad_digest)
@pytest.mark.parametrize(
"digest, path",
[
("tarsum.v123123+sha1:123deadbeef", "tarsum/v123123/sha1/12/123deadbeef"),
("tarsum.v1+sha256:123123", "tarsum/v1/sha256/12/123123"),
("tarsum.v0+md5:abc", "tarsum/v0/md5/ab/abc"),
("sha1:123deadbeef", "sha1/12/123deadbeef"),
("sha256:123123", "sha256/12/123123"),
("md5:abc", "md5/ab/abc"),
("md5:1", "md5/01/1"),
("md5.....+++:1", "md5/01/1"),
(".md5.:1", "md5/01/1"),
],
)
def test_paths(digest, path):
assert content_path(digest) == path
| 30.055556 | 84 | 0.593346 | 185 | 1,623 | 5.113514 | 0.221622 | 0.044397 | 0.071882 | 0.088795 | 0.358351 | 0.287526 | 0.287526 | 0.192389 | 0.031712 | 0.031712 | 0 | 0.148148 | 0.201479 | 1,623 | 53 | 85 | 30.622642 | 0.58179 | 0 | 0 | 0.130435 | 0 | 0 | 0.393099 | 0.117067 | 0 | 0 | 0 | 0 | 0.065217 | 1 | 0.065217 | false | 0 | 0.043478 | 0 | 0.108696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e3b2896c4c71de1f2f4c4021609291572737a53 | 14,014 | py | Python | index.py | jansusea/video_rent_demo | 17e3d2f239a433fc313c7f2c45fbb46b7573a6a6 | [
"Apache-2.0"
] | 2 | 2019-12-13T06:10:52.000Z | 2019-12-15T14:58:07.000Z | index.py | jansusea/video_rent_demo | 17e3d2f239a433fc313c7f2c45fbb46b7573a6a6 | [
"Apache-2.0"
] | null | null | null | index.py | jansusea/video_rent_demo | 17e3d2f239a433fc313c7f2c45fbb46b7573a6a6 | [
"Apache-2.0"
] | null | null | null | from flask_jsglue import JSGlue
from flask import Flask, render_template, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_restful import reqparse, Api
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:root@localhost:3306/videorent?charset=utf8mb4'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
js_glue = JSGlue()
js_glue.init_app(app) # 让js文件中可以使用url_for方法
parser = reqparse.RequestParser()
class Customer(db.Model):
id = db.Column('id', db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
phone = db.Column(db.String(20), unique=True)
deposit = db.Column(db.String(10))
comment = db.Column(db.String(50))
def __init__(self, customer_id=None, customer_name=None, customer_phone=None, customer_deposit="0", comment=""):
self.id = customer_id
self.name = customer_name
self.phone = customer_phone
self.deposit = customer_deposit
self.comment = comment
def get_id(self):
return str(self.id)
# 打印对象的内容
def __repr__(self):
return '<Customer %r,%r,%r,%r,%r >' % (self.id, self.name, self.phone, self.deposit, self.comment)
def to_json(self):
return {
'id': self.id,
'name': self.name,
'phone': self.phone,
'deposit': self.deposit,
'comment': self.comment,
}
class Video(db.Model):
id = db.Column('id', db.Integer, primary_key=True)
format = db.Column(db.String(10), unique=True)
name = db.Column(db.String(50), unique=True)
description = db.Column(db.String(256))
comment = db.Column(db.String(256))
def __init__(self, video_id=None, video_format=None, video_name=None, video_description="", comment=""):
self.id = video_id
self.format = video_format
self.name = video_name
self.description = video_description
self.comment = comment
def get_id(self):
return str(self.id)
# 打印对象的内容
def __repr__(self):
return '<Video %r,%r,%r,%r,%r >' % (self.id, self.name, self.format, self.description, self.comment)
def to_json(self):
return {
'id': self.id,
'name': self.name,
'format': self.format,
'description': self.description,
'comment': self.comment,
}
class Rental(db.Model):
id = db.Column('id', db.Integer, primary_key=True)
rental_time = db.Column(db.TIMESTAMP, unique=True)
return_time = db.Column(db.TIMESTAMP, unique=True)
customer_id = db.Column(db.String(20))
video_id = db.Column(db.String(20))
status = db.Column(db.String(10))
comment = db.Column(db.String(256))
def __init__(self, rental_id=None, rental_time=None, return_time=None, customer_id="", video_id="",
status="", comment=""):
self.id = rental_id
self.rental_time = rental_time
self.return_time = return_time
self.customer_id = customer_id
self.video_id = video_id
self.status = status
self.comment = comment
def get_id(self):
return str(self.id)
# 打印对象的内容
def __repr__(self):
return '<Rental $r,%r,%r,%r,%r,%r,%r>' % (self.id, self.rental_time, self.return_time, self.customer_id,
self.video_id, self.status, self.comment)
def to_json(self):
return {
'id': self.id,
'rental_time': self.rental_time,
'return_time': self.return_time,
'customer_id': self.customer_id,
'video_id': self.video_id,
'status': self.status,
'comment': self.comment,
}
class RentalRelation:
def __init__(self, rental_id, video_id, video_name, video_description, customer_id, customer_name,
customer_phone, status,
rental_time, return_time, comment=None):
self.id = rental_id
self.video_id = video_id
self.video_name = video_name
self.video_description = video_description
self.customer_id = customer_id
self.customer_name = customer_name
self.customer_phone = customer_phone
self.status = status
self.rental_time = rental_time
self.return_time = return_time
# 打印对象的内容
def __repr__(self):
return '<RentalRelation $r,%r,%r,%r,%r,%r,%r,%r>' % (self.id, self.video_id, self.video_name, self.video_description,
self.customer_id, self.customer_name, self.customer_phone,
self.status,
self.rental_time, self.return_time)
def to_json(self):
return {
'id': self.id,
'video_id': self.video_id,
'video_name': self.video_name,
'video_description': self.video_description,
'customer_id': self.customer_id,
'customer_name': self.customer_name,
'customer_phone': self.customer_phone,
'status': self.status,
'rental_time': self.rental_time,
'return_time': self.return_time,
}
@app.route('/')
def index():
return render_template('index.html')
@app.route('/get_data')
def get_base_data():
data = db.session.query(Video, Customer, Rental).filter(Rental.video_id == Video.id, Rental.customer_id ==
Customer.id).all()
dict1 = []
for item in data:
format_rental_time = item.Rental.rental_time.strftime("%Y.%m.%d-%H:%M:%S")
format_return_time = item.Rental.return_time.strftime("%Y.%m.%d-%H:%M:%S")
rental_relation_item = RentalRelation(item.Rental.id, item.Video.id, item.Video.name, item.Video.description,
item.Customer.id, item.Customer.name, item.Customer.phone,
item.Rental.status, format_rental_time, format_return_time)
dict1.append(rental_relation_item.to_json())
return jsonify({'results': dict1})
@app.route('/get_video_data')
def get_video_data():
parser.add_argument("id", type=str)
args = parser.parse_args()
item_id = args.get('id')
if item_id is None:
data = db.session.query(Video).all()
else:
data = db.session.query(Video).filter(Video.id == item_id).all()
dict1 = []
for item in data:
video_item = Video(item.id, "", item.name, item.description, "")
dict1.append(video_item.to_json())
return jsonify({'results': dict1})
@app.route('/get_customer_data')
def get_customer_data():
parser.add_argument("id", type=str)
args = parser.parse_args()
item_id = args.get('id')
if item_id is None:
data = db.session.query(Customer).all()
else:
data = db.session.query(Customer).filter(Customer.id == item_id).all()
dict1 = []
for item in data:
customer_item = Customer(item.id, item.name, item.phone, item.deposit,
item.comment)
dict1.append(customer_item.to_json())
return jsonify({'results': dict1})
@app.route('/add_video', methods=['POST'])
def add_video():
parser.add_argument("name")
parser.add_argument("description")
parser.add_argument("comment")
args = parser.parse_args()
video_name = args['name']
video_description = args['description']
comment = args['comment']
db.create_all() # In case user table doesn't exists already. Else remove it.
video_data = db.session.query(Video).filter(Video.name == video_name).all()
if len(video_data) == 0:
video = Video(video_name=video_name, video_description=video_description, comment=comment)
db.session.add(video)
db.session.commit()
return jsonify({'message': '添加成功!'}), 200
else:
return jsonify({'message': '添加失败!该影片已经存在'}), 400
@app.route('/add_customer', methods=['POST'])
def add_customer():
parser.add_argument("phone")
parser.add_argument("name")
parser.add_argument("deposit")
parser.add_argument("comment")
args = parser.parse_args()
customer_phone = args['phone']
customer_name = args['name']
customer_deposit = args['deposit']
comment = args['comment']
db.create_all() # In case user table doesn't exists already. Else remove it.
customer_data = db.session.query(Customer).filter(Customer.name == customer_name,
Customer.phone == customer_phone).all()
if len(customer_data) == 0:
customer = Customer(customer_phone=customer_phone, customer_name=customer_name,
customer_deposit=customer_deposit, comment=comment)
db.session.add(customer)
db.session.commit()
return jsonify({'message': '添加成功!'}), 200
else:
return jsonify({'message': '添加失败!该会员已经存在'}), 400
@app.route('/add_rental', methods=['POST'])
def add_rental():
parser.add_argument("video_name")
parser.add_argument("video_description")
parser.add_argument("customer_phone")
parser.add_argument("customer_name")
parser.add_argument("rental_time")
parser.add_argument("return_time")
parser.add_argument("comment")
args = parser.parse_args()
video_name = args['video_name']
customer_phone = args['customer_phone']
customer_name = args['customer_name']
rental_time = args['rental_time']
return_time = args['return_time']
comment = args['comment']
db.create_all() # In case user table doesn't exists already. Else remove it.
video_data = db.session.query(Video).filter(Video.name == video_name).first()
customer_data = db.session.query(Customer).filter(Customer.name == customer_name, Customer.phone ==
customer_phone).first()
if video_data is not None and customer_data is not None:
rental = Rental(rental_time=rental_time, return_time=return_time, customer_id=customer_data.id,
video_id=video_data.id, status=str(1))
db.session.add(rental)
db.session.commit()
return jsonify({'message': '添加成功!'}), 200
else:
if video_data is None:
return jsonify({'message': '添加失败!这里没有该影片'}), 400
else:
return jsonify({'message': '添加失败!这里没有该会员'}), 400
@app.route('/update_rental', methods=['PUT'])
def update_rental():
parser.add_argument("id", type=int)
parser.add_argument("video_name", type=str)
parser.add_argument("customer_name", type=str)
parser.add_argument("customer_phone", type=str)
parser.add_argument("rental_time", type=str)
parser.add_argument("return_time", type=str)
parser.add_argument("status", type=str)
parser.add_argument("comment", type=str)
args = parser.parse_args()
item_id = args.get('id')
update_video_name = args.get('video_name')
update_customer_name = args.get('customer_name')
update_customer_phone = args.get('customer_phone')
update_rental_time = args.get('rental_time')
update_return_time = args.get('return_time')
update_status = args.get('status')
update_comment = args.get('comment')
video = db.session.query(Video).filter_by(name=update_video_name).first()
customer = db.session.query(Customer).filter_by(name=update_customer_name, phone=update_customer_phone).first()
rental = db.session.query(Rental).filter_by(id=item_id).first()
# 将要修改的值赋给title
if rental is not None and video is not None and customer is not None:
rental.customer_id = customer.id
rental.video_id = video.id
rental.return_time = update_return_time
rental.rental_time = update_rental_time
rental.status = update_status
rental.comment = update_comment
db.session.commit()
else:
print("the rental is None,update error")
return jsonify({"message": "修改成功!"})
@app.route('/delete_rental', methods=['DELETE'])
def delete_rental():
parser.add_argument("id", type=str, location='args')
args = parser.parse_args()
raw_id = args.get('id')
rental = db.session.query(Rental).filter_by(id=raw_id).first()
if rental is not None:
db.session.delete(rental)
db.session.commit()
else:
print("the rental is None,delete error")
return jsonify({'message': '删除成功!'})
@app.route('/delete_video', methods=['DELETE'])
def delete_video():
parser.add_argument("id", type=str, location='args')
args = parser.parse_args()
raw_id = args.get('id')
video = db.session.query(Video).filter_by(id=raw_id).first()
rental = db.session.query(Rental).filter_by(video_id=raw_id, status=1).first()
if video is not None and rental is None:
db.session.delete(video)
db.session.commit()
return jsonify({'message': '删除成功!'}),200
else:
print("the video is None,delete error")
return jsonify({'message': '影片不存在或者还有人租赁该影片没有归还'}), 403
@app.route('/delete_customer', methods=['DELETE'])
def delete_customer():
parser.add_argument("id", type=str, location='args')
args = parser.parse_args()
raw_id = args.get('id')
customer = db.session.query(Customer).filter_by(id=raw_id).first()
rental = db.session.query(Rental).filter_by(customer_id=raw_id, status=1).first()
if customer is not None and rental is None:
db.session.delete(customer)
db.session.commit()
return jsonify({'message': '删除成功!'})
else:
print("the customer is None,delete error")
return jsonify({'message': '会员不存在,或者还有会员租赁还影片!'}), 403
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True)
| 35.659033 | 125 | 0.630512 | 1,786 | 14,014 | 4.739642 | 0.089586 | 0.031896 | 0.054223 | 0.006143 | 0.572711 | 0.46013 | 0.41394 | 0.327112 | 0.305493 | 0.272888 | 0 | 0.00842 | 0.237263 | 14,014 | 392 | 126 | 35.75 | 0.783516 | 0.020337 | 0 | 0.380503 | 0 | 0 | 0.105919 | 0.011372 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084906 | false | 0 | 0.012579 | 0.040881 | 0.254717 | 0.012579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e3bf3816d2775c53ca40f975a39eda86b48bb10 | 11,285 | py | Python | everest/repositories/rdb/querying.py | helixyte/everest | 70c9b93c3061db5cb62428349d18b8fb8566411b | [
"MIT"
] | 3 | 2015-03-10T17:38:25.000Z | 2017-04-29T03:47:06.000Z | everest/repositories/rdb/querying.py | helixyte/everest | 70c9b93c3061db5cb62428349d18b8fb8566411b | [
"MIT"
] | 1 | 2015-03-02T16:02:41.000Z | 2015-03-02T16:02:41.000Z | everest/repositories/rdb/querying.py | cenix/everest | 70c9b93c3061db5cb62428349d18b8fb8566411b | [
"MIT"
] | 1 | 2020-07-12T22:46:59.000Z | 2020-07-12T22:46:59.000Z | """
Querying functionality for the rdb repository.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 7, 2013.
"""
from everest.constants import RESOURCE_ATTRIBUTE_KINDS
from everest.entities.base import Entity
from everest.exceptions import MultipleResultsException
from everest.exceptions import NoResultsException
from everest.querying.base import EXPRESSION_KINDS
from everest.querying.filtering import RepositoryFilterSpecificationVisitor
from everest.querying.interfaces import IFilterSpecificationVisitor
from everest.querying.interfaces import IOrderSpecificationVisitor
from everest.querying.ordering import OrderSpecificationVisitor
from everest.querying.ordering import RepositoryOrderSpecificationVisitor
from everest.querying.specifications import order
from everest.repositories.rdb.utils import OrmAttributeInspector
from everest.resources.interfaces import ICollectionResource
from everest.resources.interfaces import IResource
from everest.utils import get_order_specification_visitor
from functools import reduce as func_reduce
from sqlalchemy import and_ as sqlalchemy_and
from sqlalchemy import not_ as sqlalchemy_not
from sqlalchemy import or_ as sqlalchemy_or
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.query import Query as SaQuery
from sqlalchemy.sql.expression import ClauseList
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.expression import over
from zope.interface import implementer # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['OptimizedCountingRdbQuery',
'OrderClauseList',
'RdbQuery',
'SqlFilterSpecificationVisitor',
'SqlOrderSpecificationVisitor',
]
@implementer(IFilterSpecificationVisitor)
class SqlFilterSpecificationVisitor(RepositoryFilterSpecificationVisitor):
"""
Filter specification visitor implementation for the RDB repository
(builds a SQL expression).
"""
def __init__(self, entity_class, custom_clause_factories=None):
"""
Constructs a SqlFilterSpecificationVisitor
:param entity_class: an entity class that is mapped with SQLAlchemy
:param custom_clause_factories: a map containing custom clause factory
functions for selected (attribute name, operator) combinations.
"""
RepositoryFilterSpecificationVisitor.__init__(self, entity_class)
if custom_clause_factories is None:
custom_clause_factories = {}
self.__custom_clause_factories = custom_clause_factories
def visit_nullary(self, spec):
key = (spec.attr_name, spec.operator.name)
if key in self.__custom_clause_factories:
self._push(self.__custom_clause_factories[key](spec.attr_value))
else:
RepositoryFilterSpecificationVisitor.visit_nullary(self, spec)
def _starts_with_op(self, spec):
return self.__build(spec.attr_name, 'startswith', spec.attr_value)
def _ends_with_op(self, spec):
return self.__build(spec.attr_name, 'endswith', spec.attr_value)
def _contains_op(self, spec):
return self.__build(spec.attr_name, 'contains', spec.attr_value)
def _contained_op(self, spec):
if ICollectionResource.providedBy(spec.attr_value): # pylint:disable=E1101
# FIXME: This is a hack that allows us to query for containment
# of a member in an arbitrary collection (not supported
# by SQLAlchemy yet).
spec.attr_name = spec.attr_name + '.id'
spec.attr_value = [rc.id for rc in spec.attr_value]
return self.__build(spec.attr_name, 'in_', spec.attr_value)
def _equal_to_op(self, spec):
return self.__build(spec.attr_name, '__eq__', spec.attr_value)
def _less_than_op(self, spec):
return self.__build(spec.attr_name, '__lt__', spec.attr_value)
def _less_than_or_equal_to_op(self, spec):
return self.__build(spec.attr_name, '__le__', spec.attr_value)
def _greater_than_op(self, spec):
return self.__build(spec.attr_name, '__gt__', spec.attr_value)
def _greater_than_or_equal_to_op(self, spec):
return self.__build(spec.attr_name, '__ge__', spec.attr_value)
def _in_range_op(self, spec):
from_value, to_value = spec.attr_value
return self.__build(spec.attr_name, 'between', from_value, to_value)
def _conjunction_op(self, spec, *expressions):
return sqlalchemy_and(*expressions)
def _disjunction_op(self, spec, *expressions):
return sqlalchemy_or(*expressions)
def _negation_op(self, spec, expression):
return sqlalchemy_not(expression)
def __build(self, attribute_name, sql_op, *values):
# Builds an SQL expression from the given (possibly dotted)
# attribute name, SQL operation name, and values.
exprs = []
infos = OrmAttributeInspector.inspect(self._entity_class,
attribute_name)
count = len(infos)
for idx, info in enumerate(infos):
kind, entity_attr = info
if idx == count - 1:
#
args = \
[val.get_entity() if IResource.providedBy(val) else val # pylint: disable=E1101
for val in values]
expr = getattr(entity_attr, sql_op)(*args)
elif kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER:
expr = entity_attr.has
exprs.insert(0, expr)
elif kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION:
expr = entity_attr.any
exprs.insert(0, expr)
return func_reduce(lambda g, h: h(g), exprs, expr)
class OrderClauseList(ClauseList):
"""
Custom clause list for ORDER BY clauses.
Suppresses the grouping parentheses which would trigger a syntax error.
"""
def self_group(self, against=None):
return self
@implementer(IOrderSpecificationVisitor)
class SqlOrderSpecificationVisitor(RepositoryOrderSpecificationVisitor):
"""
Order specification visitor implementation for the rdb repository
(builds a SQL expression).
"""
def __init__(self, entity_class, custom_join_clauses=None):
"""
Constructs a SqlOrderSpecificationVisitor
:param klass: a class that is mapped to a selectable using SQLAlchemy
"""
RepositoryOrderSpecificationVisitor.__init__(self, entity_class)
if custom_join_clauses is None:
custom_join_clauses = {}
self.__custom_join_clauses = custom_join_clauses
self.__joins = set()
def visit_nullary(self, spec):
OrderSpecificationVisitor.visit_nullary(self, spec)
if spec.attr_name in self.__custom_join_clauses:
self.__joins = set(self.__custom_join_clauses[spec.attr_name])
def order_query(self, query):
for join_expr in self.__joins:
# FIXME: only join when needed here.
query = query.outerjoin(join_expr)
return query.order(self.expression)
def _conjunction_op(self, spec, *expressions):
clauses = []
for expr in expressions:
if isinstance(expr, ClauseList):
clauses.extend(expr.clauses)
else:
clauses.append(expr)
return OrderClauseList(*clauses)
def _asc_op(self, spec):
return self.__build(spec.attr_name, 'asc')
def _desc_op(self, spec):
return self.__build(spec.attr_name, 'desc')
def __build(self, attribute_name, sql_op):
expr = None
infos = OrmAttributeInspector.inspect(self._entity_class,
attribute_name)
count = len(infos)
for idx, info in enumerate(infos):
kind, entity_attr = info
if idx == count - 1:
expr = getattr(entity_attr, sql_op)()
elif kind != RESOURCE_ATTRIBUTE_KINDS.TERMINAL:
# FIXME: Avoid adding multiple attrs with the same target here.
self.__joins.add(entity_attr)
return expr
class Query(SaQuery):
def __init__(self, entities, session, **kw):
SaQuery.__init__(self, entities, session, **kw)
ent_cls = entities[0]
if isinstance(ent_cls, type) and issubclass(ent_cls, Entity):
self._entity_class = ent_cls
else: # just for compatibility pragma: no cover
self._entity_class = None
def order(self, order_expression):
return SaQuery.order_by(self, order_expression)
def order_by(self, *args):
spec = order(*args)
vst_cls = get_order_specification_visitor(EXPRESSION_KINDS.SQL)
vst = vst_cls(self._entity_class)
spec.accept(vst)
return vst.order_query(self)
class RdbQuery(Query):
"""
Query class for the RDB backend.
"""
def one(self):
# Overwritten so we can translate exceptions.
try:
return Query.one(self)
except NoResultFound:
raise NoResultsException('No results found when exactly one '
'was expected.')
except MultipleResultsFound:
raise MultipleResultsException('More than one result found '
'where exactly one was expected.')
class SimpleCountingRdbQuery(RdbQuery):
"""
Simple counting query for the RDB backend.
We want the count to reflect the true size of the aggregate, without
slicing.
"""
def count(self):
count_query = self.limit(None).offset(None)
# Avoid circular calls to by "downcasting" the new query.
count_query.__class__ = Query
return count_query.count()
class OptimizedCountingRdbQuery(RdbQuery): # pragma: no cover
"""
Optimized counting query for the RDB backend.
The optimization uses the OVER windowing SQL statement to retrieve the
collection data ''and'' count in ''one'' database roundtrip. Note that
this query object will always return the same count and data once the
:method:`__iter__` or :method:`count` method has been called.
"""
def __init__(self, entities, session, **kw):
RdbQuery.__init__(self, entities, session, **kw)
self.__count = None
self.__data = None
def _clone(self):
clone = RdbQuery._clone(self)
# pylint: disable=W0212
clone.__count = None
clone.__data = None
# pylint: enable=W0212
return clone
def __iter__(self):
if self.__data is None:
self.__count, self.__data = self.__load()
return iter(self.__data)
def count(self):
if self.__count is None:
self.__count, self.__data = self.__load()
return self.__count
def __load(self):
query = self.add_columns(over(func.count(1)).label('_count'))
res = [tup[0] for tup in Query.__iter__(query)]
if len(res) > 0:
count = tup._count # pylint:disable-msg=W0212,W0631
else:
count = 0
return count, res
| 37.616667 | 99 | 0.672751 | 1,304 | 11,285 | 5.552147 | 0.233129 | 0.034254 | 0.028177 | 0.031492 | 0.293508 | 0.218232 | 0.150967 | 0.14268 | 0.14268 | 0.106077 | 0 | 0.005434 | 0.249889 | 11,285 | 299 | 100 | 37.742475 | 0.849852 | 0.175986 | 0 | 0.14359 | 0 | 0 | 0.034289 | 0.009041 | 0 | 0 | 0 | 0.006689 | 0 | 1 | 0.174359 | false | 0 | 0.133333 | 0.076923 | 0.487179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e3c4d4cb0dbc4f7cda5f5d0400be26747fb6df0 | 1,259 | py | Python | myLogger.py | DavideFrr/ibmqx_experiments | f9e3c2e85d3fd8ea3f9a69b7bee26c135eabc755 | [
"Apache-2.0"
] | 6 | 2017-11-07T12:01:49.000Z | 2018-04-23T06:45:05.000Z | myLogger.py | DavideFrr/ibmqx_envariance | f9e3c2e85d3fd8ea3f9a69b7bee26c135eabc755 | [
"Apache-2.0"
] | null | null | null | myLogger.py | DavideFrr/ibmqx_envariance | f9e3c2e85d3fd8ea3f9a69b7bee26c135eabc755 | [
"Apache-2.0"
] | 1 | 2018-05-07T06:34:42.000Z | 2018-05-07T06:34:42.000Z | # Copyright 2017 Quantum Information Science, University of Parma, Italy. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__author__ = "Davide Ferrari"
__copyright__ = "Copyright 2017, Quantum Information Science, University of Parma, Italy"
__license__ = "Apache"
__version__ = "2.0"
__email__ = "davide.ferrari8@studenti.unipr.it"
import logging
logging.VERBOSE = 5
class MyHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
logging.addLevelName(logging.VERBOSE, "VERBOSE")
formatter = logging.Formatter('%(filename)s - %(levelname)s - %(message)s')
self.setFormatter(formatter)
| 38.151515 | 94 | 0.70135 | 155 | 1,259 | 5.516129 | 0.612903 | 0.070175 | 0.046784 | 0.072515 | 0.140351 | 0.140351 | 0.140351 | 0.140351 | 0.140351 | 0 | 0 | 0.015023 | 0.154091 | 1,259 | 32 | 95 | 39.34375 | 0.787793 | 0.549643 | 0 | 0 | 0 | 0 | 0.318841 | 0.059783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e40bb23a7df34b4097b777f22663a9bcc159f40 | 3,742 | py | Python | predicting solar enegy generation/solar.py | imsanjoykb/Working-Analysis | 12267ccbe833ea0daedb34990ba1757f3888e447 | [
"Apache-2.0"
] | 3 | 2020-12-31T19:23:45.000Z | 2022-02-07T16:09:14.000Z | predicting solar enegy generation/solar.py | imsanjoykb/Working-Analysis | 12267ccbe833ea0daedb34990ba1757f3888e447 | [
"Apache-2.0"
] | null | null | null | predicting solar enegy generation/solar.py | imsanjoykb/Working-Analysis | 12267ccbe833ea0daedb34990ba1757f3888e447 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 17:57:02 2020
@author: Atique Akhtar
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn import neighbors
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
import pickle
weather_df=pd.read_csv("C:\\Users\\EE1303227\\Desktop\\flask app\\pavagada_nasa_dataset.csv")
weather_df.info()
weather_desc=pd.DataFrame(weather_df.describe())
weather_df['GENERATED_ENERGY'] = weather_df.apply(lambda row: row.ALLSKY_SFC_LW_DWN*1.6*15.6*0.75 , axis = 1)
weather_df.columns
df=weather_df[['PRECTOT', 'QV2M', 'RH2M', 'PS', 'TS','T2MDEW', 'T2MWET', 'T2M_MAX',
'T2M_MIN', 'T2M', 'WS10M', 'WS50M','WS10M_MAX', 'WS50M_MAX', 'WS50M_MIN',
'WS10M_MIN', 'GENERATED_ENERGY']]
df_corr=pd.DataFrame(df.corr())
X=df[['PRECTOT', 'QV2M', 'PS', 'T2M_MIN', 'T2M','WS10M_MAX']]
y=df['GENERATED_ENERGY']
X_corr=pd.DataFrame(X.corr())
Xtrain,Xtest,ytrain,ytest=train_test_split(X, y, test_size=0.3, random_state=100)
# LINEAR REGRESSION
lm=LinearRegression()
lm.fit(Xtrain,ytrain)
print(lm.intercept_)
print(lm.coef_)
X.columns
cdf=pd.DataFrame(lm.coef_,Xtrain.columns,columns=['coeff'])
predictions = lm.predict(Xtest)
plt.scatter(ytest,predictions)
sns.distplot((ytest-predictions)) # if normally distributed then the model is correct choice
metrics.mean_absolute_error(ytest,predictions)
metrics.mean_squared_error(ytest,predictions)
np.sqrt(metrics.mean_squared_error(ytest,predictions))
# KNN
scaler=StandardScaler()
scaler.fit(X)
scaled_features=scaler.transform(X)
X_feat=pd.DataFrame(scaled_features,columns=X.columns)
Xtrain,Xtest,ytrain,ytest=train_test_split(X_feat, y, test_size=0.3, random_state=0)
rmse_val = [] #to store rmse values for different k
for K in range(40):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K)
model.fit(Xtrain, ytrain) #fit the model
pred=model.predict(Xtest) #make prediction on test set
error = np.sqrt(metrics.mean_squared_error(ytest,pred)) #calculate rmse
rmse_val.append(error) #store rmse values
print('RMSE value for k= ' , K , 'is:', error)
#plotting the rmse values against k values
curve = pd.DataFrame(rmse_val) #elbow curve
curve.plot()
knn_model = neighbors.KNeighborsRegressor(n_neighbors = 25)
knn_model.fit(Xtrain, ytrain) #fit the model
pred=knn_model.predict(Xtest) #make prediction on test set
np.sqrt(metrics.mean_squared_error(ytest,pred)) #calculate rmse
# RANDOM FOREST
rf_model=RandomForestRegressor(n_estimators=300)
rf_model.fit(Xtrain,ytrain)
pred_rf=rf_model.predict(Xtest)
plt.scatter(ytest,pred_rf)
sns.distplot((ytest-pred_rf))
metrics.mean_absolute_error(ytest,pred_rf)
metrics.mean_squared_error(ytest,pred_rf)
np.sqrt(metrics.mean_squared_error(ytest,pred_rf))
# SVR
pram_grid={'C':[0.1,1,10,100,1000], 'gamma':[1,0.1,0.01,0.001,0.0001]}
grid=GridSearchCV(SVR(),pram_grid,verbose=3)
grid.fit(Xtrain,ytrain)
grid.best_params_
#{'C': 1000, 'gamma': 0.01}
grid.best_estimator_
#SVR(C=1000, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma=0.01,
# kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
pred_grid=grid.predict(Xtest)
metrics.mean_absolute_error(ytest,pred_grid)
metrics.mean_squared_error(ytest,pred_grid)
np.sqrt(metrics.mean_squared_error(ytest,pred_grid))
#6.0520296890047245
# PICKLE FILE
pickle.dump(lm, open('model.pkl', 'wb'))
model=pickle.load(open('model.pkl','rb'))
print(model.predict([[12.17,0.017,94.42,21.47,23.3,6.67]]))
| 29.464567 | 110 | 0.756547 | 593 | 3,742 | 4.608769 | 0.340641 | 0.044274 | 0.052689 | 0.067325 | 0.312477 | 0.245884 | 0.203439 | 0.147091 | 0.037322 | 0.037322 | 0 | 0.048694 | 0.099947 | 3,742 | 126 | 111 | 29.698413 | 0.762767 | 0.158739 | 0 | 0 | 0 | 0 | 0.094551 | 0.021154 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.155844 | 0 | 0.155844 | 0.051948 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e42f7faf93d8ca30408b7f1bf98b2c4582082bf | 7,047 | py | Python | scoring_functions.py | juius/GB-GA | 4b49f1822c5190e8b2bbb8b7403eed30af9e50fd | [
"MIT"
] | 42 | 2018-10-24T09:31:07.000Z | 2021-04-10T04:04:51.000Z | scoring_functions.py | juius/GB-GA | 4b49f1822c5190e8b2bbb8b7403eed30af9e50fd | [
"MIT"
] | null | null | null | scoring_functions.py | juius/GB-GA | 4b49f1822c5190e8b2bbb8b7403eed30af9e50fd | [
"MIT"
] | 25 | 2018-10-24T09:01:59.000Z | 2021-04-24T10:51:22.000Z | '''
Written by Jan H. Jensen 2018
'''
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
from rdkit.DataStructs.cDataStructs import TanimotoSimilarity
from rdkit.Chem import rdFMCS
from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
import numpy as np
import sys
from multiprocessing import Pool
import subprocess
import os
import shutil
import string
import random
import sascorer
logP_values = np.loadtxt('logP_values.txt')
SA_scores = np.loadtxt('SA_scores.txt')
cycle_scores = np.loadtxt('cycle_scores.txt')
SA_mean = np.mean(SA_scores)
SA_std=np.std(SA_scores)
logP_mean = np.mean(logP_values)
logP_std= np.std(logP_values)
cycle_mean = np.mean(cycle_scores)
cycle_std=np.std(cycle_scores)
def calculate_score(args):
'''Parallelize at the score level (not currently in use)'''
gene, function, scoring_args = args
score = function(gene,scoring_args)
return score
def calculate_scores_parallel(population,function,scoring_args, n_cpus):
'''Parallelize at the score level (not currently in use)'''
args_list = []
args = [function, scoring_args]
for gene in population:
args_list.append([gene]+args)
with Pool(n_cpus) as pool:
scores = pool.map(calculate_score, args_list)
return scores
def calculate_scores(population,function,scoring_args):
if 'pop' in function.__name__:
scores = function(population,scoring_args)
else:
scores = [function(gene,scoring_args) for gene in population]
return scores
def logP_max(m, dummy):
score = logP_score(m)
return max(0.0, score)
def logP_target(m,args):
target, sigma = args
score = logP_score(m)
score = GaussianModifier(score, target, sigma)
return score
def logP_score(m):
try:
logp = Descriptors.MolLogP(m)
except:
print (m, Chem.MolToSmiles(m))
sys.exit('failed to make a molecule')
SA_score = -sascorer.calculateScore(m)
#cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(m)))
cycle_list = m.GetRingInfo().AtomRings() #remove networkx dependence
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score = -cycle_length
#print cycle_score
#print SA_score
#print logp
SA_score_norm=(SA_score-SA_mean)/SA_std
logp_norm=(logp-logP_mean)/logP_std
cycle_score_norm=(cycle_score-cycle_mean)/cycle_std
score_one = SA_score_norm + logp_norm + cycle_score_norm
return score_one
def shell(cmd, shell=False):
if shell:
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
cmd = cmd.split()
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
return output
def write_xtb_input_file(fragment, fragment_name):
number_of_atoms = fragment.GetNumAtoms()
charge = Chem.GetFormalCharge(fragment)
symbols = [a.GetSymbol() for a in fragment.GetAtoms()]
for i,conf in enumerate(fragment.GetConformers()):
file_name = fragment_name+"+"+str(i)+".xyz"
with open(file_name, "w") as file:
file.write(str(number_of_atoms)+"\n")
file.write("title\n")
for atom,symbol in enumerate(symbols):
p = conf.GetAtomPosition(atom)
line = " ".join((symbol,str(p.x),str(p.y),str(p.z),"\n"))
file.write(line)
if charge !=0:
file.write("$set\n")
file.write("chrg "+str(charge)+"\n")
file.write("$end")
def get_structure(mol,n_confs):
mol = Chem.AddHs(mol)
new_mol = Chem.Mol(mol)
AllChem.EmbedMultipleConfs(mol,numConfs=n_confs,useExpTorsionAnglePrefs=True,useBasicKnowledge=True)
energies = AllChem.MMFFOptimizeMoleculeConfs(mol,maxIters=2000, nonBondedThresh=100.0)
energies_list = [e[1] for e in energies]
min_e_index = energies_list.index(min(energies_list))
new_mol.AddConformer(mol.GetConformer(min_e_index))
return new_mol
def compute_absorbance(mol,n_confs,path):
mol = get_structure(mol,n_confs)
dir = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
os.mkdir(dir)
os.chdir(dir)
write_xtb_input_file(mol, 'test')
shell(path+'/xtb4stda test+0.xyz',shell=False)
out = shell(path+'/stda_v1.6.1 -xtb -e 10',shell=False)
#data = str(out).split('Rv(corr)\\n')[1].split('alpha')[0].split('\\n') # this gets all the lines
data = str(out).split('Rv(corr)\\n')[1].split('(')[0]
wavelength, osc_strength = float(data.split()[2]), float(data.split()[3])
os.chdir('..')
shutil.rmtree(dir)
return wavelength, osc_strength
def absorbance_target(mol,args):
n_confs, path, target, sigma, threshold = args
try:
wavelength, osc_strength = compute_absorbance(mol,n_confs,path)
except:
return 0.0
score = GaussianModifier(wavelength, target, sigma)
score += ThresholdedLinearModifier(osc_strength,threshold)
return score
# GuacaMol article https://arxiv.org/abs/1811.09621
# adapted from https://github.com/BenevolentAI/guacamol/blob/master/guacamol/utils/fingerprints.py
def get_ECFP4(mol):
return AllChem.GetMorganFingerprint(mol, 2)
def get_ECFP6(mol):
return AllChem.GetMorganFingerprint(mol, 3)
def get_FCFP4(mol):
return AllChem.GetMorganFingerprint(mol, 2, useFeatures=True)
def get_FCFP6(mol):
return AllChem.GetMorganFingerprint(mol, 3, useFeatures=True)
def rediscovery(mol,args):
target = args[0]
try:
fp_mol = get_ECFP4(mol)
fp_target = get_ECFP4(target)
score = TanimotoSimilarity(fp_mol, fp_target)
return score
except:
print('Failed ',Chem.MolToSmiles(mol))
return None
def MCS(mol,args):
target = args[0]
try:
mcs = rdFMCS.FindMCS([mol, target], bondCompare=rdFMCS.BondCompare.CompareOrderExact,ringMatchesRingOnly=True,completeRingsOnly=True)
score = mcs.numAtoms/target.GetNumAtoms()
return score
except:
print('Failed ',Chem.MolToSmiles(mol))
return None
def similarity(mol,target,threshold):
score = rediscovery(mol,target)
if score:
return ThresholdedLinearModifier(score,threshold)
else:
return None
# adapted from https://github.com/BenevolentAI/guacamol/blob/master/guacamol/score_modifier.py
def ThresholdedLinearModifier(score,threshold):
return min(score,threshold)/threshold
def GaussianModifier(score, target, sigma):
try:
score = np.exp(-0.5 * np.power((score - target) / sigma, 2.))
except:
score = 0.0
return score
if __name__ == "__main__":
n_confs = 20
xtb_path = '/home/jhjensen/stda'
target = 200.
sigma = 50.
threshold = 0.3
smiles = 'Cc1occn1' # Tsuda I
mol = Chem.MolFromSmiles(smiles)
wavelength, osc_strength = compute_absorbance(mol,n_confs,xtb_path)
print(wavelength, osc_strength)
score = absorbance_target(mol,[n_confs, xtb_path, target, sigma, threshold])
print(score)
| 28.188 | 137 | 0.711225 | 976 | 7,047 | 4.981557 | 0.26332 | 0.011107 | 0.011107 | 0.029617 | 0.20835 | 0.194981 | 0.122995 | 0.122995 | 0.103661 | 0.050185 | 0 | 0.012615 | 0.167589 | 7,047 | 249 | 138 | 28.301205 | 0.816229 | 0.087555 | 0 | 0.186813 | 0 | 0 | 0.035619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10989 | false | 0 | 0.082418 | 0.027473 | 0.318681 | 0.049451 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e4e63042228f66cff5a912d41b87d37722d2937 | 3,298 | py | Python | Task2/main.py | KKowalewski24/ADZ | 8a04570a1f6f08506572386b2312a259a8308f56 | [
"MIT"
] | null | null | null | Task2/main.py | KKowalewski24/ADZ | 8a04570a1f6f08506572386b2312a259a8308f56 | [
"MIT"
] | null | null | null | Task2/main.py | KKowalewski24/ADZ | 8a04570a1f6f08506572386b2312a259a8308f56 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser, Namespace
from typing import Any, Dict, Tuple
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.metrics import precision_score, recall_score
from sklearn.neighbors import LocalOutlierFactor
from module.LatexGenerator import LatexGenerator
from module.OutlierAgglomerativeClustering import OutlierAgglomerativeClustering
from module.OutlierKMeans import OutlierKMeans
from module.plot import draw_plots
from module.reader import read_http_dataset, read_mammography_dataset, read_synthetic_dataset
from module.utils import create_directory, display_finish, run_main
"""
How to run:
python main.py -s -c lof
"""
# VAR ------------------------------------------------------------------------ #
RESULTS_DIR = "results/"
latex_generator: LatexGenerator = LatexGenerator(RESULTS_DIR)
clusterizers: Dict[str, Any] = {
"kmeans": (OutlierKMeans, int, float),
"agglomerative": (OutlierAgglomerativeClustering, float, float),
"db_scan": (DBSCAN, float),
"lof": (LocalOutlierFactor, int)
}
datasets: Dict[str, Tuple[np.ndarray, np.ndarray]] = {
"http": read_http_dataset(),
"mammography": read_mammography_dataset(),
"synthetic": read_synthetic_dataset(),
}
# MAIN ----------------------------------------------------------------------- #
def main() -> None:
args = prepare_args()
chosen_clusterizer_name = args.clusterizer
chosen_dataset_name = args.dataset
algorithm_params = args.algorithm_params
save_stats = args.save
create_directory(RESULTS_DIR)
X, y = datasets[chosen_dataset_name]
params = [
typee(param)
for param, typee in zip(algorithm_params, clusterizers[chosen_clusterizer_name][1:])
]
y_pred = clusterizers[chosen_clusterizer_name][0](*params).fit_predict(X)
recall = np.round(recall_score(y, y_pred, average=None, zero_division=0)[0], 2)
precision = np.round(precision_score(y, y_pred, average=None, zero_division=0)[0], 2)
print(f"Recall {recall} & Precision {precision}")
print(f"{chosen_clusterizer_name} ({algorithm_params}) - {chosen_dataset_name}")
name = (f"{chosen_clusterizer_name}_{chosen_dataset_name}_"
f"{'_'.join([str(param).replace('.', ',') for param in algorithm_params])}_")
title = name + f"Rcl={recall}_Prec={precision}"
draw_plots(X, y_pred, name, title, RESULTS_DIR, save_stats)
display_finish()
# DEF ------------------------------------------------------------------------ #
def prepare_args() -> Namespace:
arg_parser = ArgumentParser()
arg_parser.add_argument(
"-c", "--clusterizer", type=str, choices=clusterizers.keys(),
help="Name of clusterizer"
)
arg_parser.add_argument(
"-ds", "--dataset", type=str, choices=datasets.keys(),
help="Name of dataset"
)
arg_parser.add_argument(
"-ap", "--algorithm_params", nargs="+", required=True, type=str,
help="List of arguments for certain algorithm"
)
arg_parser.add_argument(
"-s", "--save", default=False, action="store_true", help="Save charts to files"
)
return arg_parser.parse_args()
# __MAIN__ ------------------------------------------------------------------- #
if __name__ == "__main__":
run_main(main)
| 35.462366 | 93 | 0.647665 | 366 | 3,298 | 5.579235 | 0.338798 | 0.029383 | 0.05142 | 0.039177 | 0.036239 | 0.036239 | 0.036239 | 0.036239 | 0.036239 | 0.036239 | 0 | 0.002876 | 0.156458 | 3,298 | 92 | 94 | 35.847826 | 0.731129 | 0.093996 | 0 | 0.059701 | 0 | 0 | 0.167009 | 0.05373 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.179104 | 0 | 0.223881 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e4f198f80a2b90957f00d6e9a760ddb7d4d2c00 | 1,534 | py | Python | test/test_matching_with_dont_cares.py | matkaczmarek/string-algorithms | df1367e633ee0b3377cbdd67a77982f373fe2459 | [
"MIT"
] | 1 | 2020-06-27T01:33:43.000Z | 2020-06-27T01:33:43.000Z | test/test_matching_with_dont_cares.py | TenGumis/string-algorithms | e57a9dc6150e92ab65cad4a5c1e68533b7166eb7 | [
"MIT"
] | null | null | null | test/test_matching_with_dont_cares.py | TenGumis/string-algorithms | e57a9dc6150e92ab65cad4a5c1e68533b7166eb7 | [
"MIT"
] | null | null | null | import itertools
import os
import unittest
from generator import rand
from approximate_string_matching import dont_care, matching_with_dont_cares
class TestExactMatchingWithDontCaresCase(unittest.TestCase):
run_large = unittest.skipUnless(
os.environ.get('LARGE', False), 'Skip test in small runs')
def make_test(self, text, pattern, result):
matches = matching_with_dont_cares.exact_matching_with_dont_cares(
text, pattern, len(text), len(pattern))
self.assertEqual(result, list(matches))
def test_given_input_with_no_wildcards_returns_matches(self):
self.make_test('#abbabaaa', '#ab', [1, 4])
def test_given_input_with_wildcards_returns_matches(self):
self.make_test('#abbabaaa', '#??a', [2, 4, 5, 6])
def test_simple(self):
self.make_test('#aa', '#a', [1, 2])
@run_large
def test_random_exact_string_matching(self):
T, n, m, A = 100, 500, 10, ['a', 'b']
for _ in range(T):
t, w = rand.random_word(n, A), rand.random_word(m, A + ['?'])
reference = list(dont_care.basic_fft(t, w, n, m))
self.make_test(t, w, reference)
@run_large
def test_all_exact_string_matching(self):
N, M, A = 7, 3, ['a', 'b']
for n in range(2, N + 1):
for m in range(1, M + 1):
for t in itertools.product(A, repeat = n):
t = '#' + ''.join(t)
for w in itertools.product(A + ['?'], repeat = m):
w = '#' + ''.join(w)
reference = list(dont_care.basic_fft(t, w, n, m))
self.make_test(t, w, reference)
| 34.088889 | 75 | 0.643416 | 231 | 1,534 | 4.051948 | 0.324675 | 0.051282 | 0.064103 | 0.067308 | 0.318376 | 0.220085 | 0.220085 | 0.220085 | 0.119658 | 0.119658 | 0 | 0.018122 | 0.208605 | 1,534 | 44 | 76 | 34.863636 | 0.752883 | 0 | 0 | 0.166667 | 0 | 0 | 0.043025 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 1 | 0.166667 | false | 0 | 0.138889 | 0 | 0.361111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e4f435f06f8b0f3c610c16275c5fe53209837e7 | 1,902 | py | Python | nengo/utils/network.py | HugoChateauLaurent/nengo | 749893186ee09aa6c621a40da3ffd3878114db9c | [
"BSD-2-Clause"
] | null | null | null | nengo/utils/network.py | HugoChateauLaurent/nengo | 749893186ee09aa6c621a40da3ffd3878114db9c | [
"BSD-2-Clause"
] | null | null | null | nengo/utils/network.py | HugoChateauLaurent/nengo | 749893186ee09aa6c621a40da3ffd3878114db9c | [
"BSD-2-Clause"
] | null | null | null | import nengo
from .magic import decorator
@decorator
def with_self(method, network, args, kwargs):
"""Wraps a method with ``with network:``.
This makes it easy to add methods to a network that create new
Nengo objects. Instead of writing ``with self`` at the top of the method
and indenting everything over, you can instead use this decorator.
Example
-------
The two methods in the following class do the same thing::
class MyNetwork(nengo.Network):
def add_one_1(self):
with self:
node = nengo.Node(output=1)
@with_self
def add_one_2(self):
node = nengo.Node(output=1)
"""
with network:
return method(*args, **kwargs)
def activate_direct_mode(network):
"""Activates direct mode for a network.
This sets the neuron type of all ensembles to a `nengo.Direct`
instance unless:
- there is a connection to or from the ensemble's neurons
- there is a probe on an ensemble's neurons
- the ensemble has a connection with a learning rule attached.
Parameters
----------
network : Network
Network to activate direct mode for.
"""
requires_neurons = set()
for c in network.all_connections:
if isinstance(c.pre_obj, nengo.ensemble.Neurons):
requires_neurons.add(c.pre_obj.ensemble)
if isinstance(c.post_obj, nengo.ensemble.Neurons):
requires_neurons.add(c.post_obj.ensemble)
if c.learning_rule_type is not None:
requires_neurons.add(c.pre_obj)
requires_neurons.add(c.post_obj)
for p in network.all_probes:
if isinstance(p.obj, nengo.ensemble.Neurons):
requires_neurons.add(p.obj.ensemble)
for e in network.all_ensembles:
if e not in requires_neurons:
e.neuron_type = nengo.Direct()
| 29.71875 | 76 | 0.640904 | 261 | 1,902 | 4.56705 | 0.360153 | 0.088087 | 0.075503 | 0.063758 | 0.205537 | 0.205537 | 0.151846 | 0.07047 | 0 | 0 | 0 | 0.002905 | 0.276025 | 1,902 | 63 | 77 | 30.190476 | 0.862745 | 0.476866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e509497962389e6ed71b35b5ec1d4dff24d9090 | 779 | py | Python | acme/resources/DVI.py | reinaortega/ACME-oneM2M-CSE | ee45a6a2bcbb0fd0397cfffc0522fa252d7e98be | [
"BSD-3-Clause"
] | null | null | null | acme/resources/DVI.py | reinaortega/ACME-oneM2M-CSE | ee45a6a2bcbb0fd0397cfffc0522fa252d7e98be | [
"BSD-3-Clause"
] | null | null | null | acme/resources/DVI.py | reinaortega/ACME-oneM2M-CSE | ee45a6a2bcbb0fd0397cfffc0522fa252d7e98be | [
"BSD-3-Clause"
] | null | null | null | #
# DVI.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# ResourceType: mgmtObj:DeviceInfo
#
from .MgmtObj import *
from Constants import Constants as C
import Utils
defaultDeviceType = 'unknown'
defaultModel = "unknown"
defaultManufacturer = "unknown"
defaultDeviceLabel = "unknown serial id"
class DVI(MgmtObj):
def __init__(self, jsn=None, pi=None, create=False):
super().__init__(jsn, pi, C.tsDVI, C.mgdDVI, create=create)
if self.json is not None:
self.setAttribute('dty', defaultDeviceType, overwrite=False)
self.setAttribute('mod', defaultModel, overwrite=False)
self.setAttribute('man', defaultManufacturer, overwrite=False)
self.setAttribute('dlb', defaultDeviceLabel, overwrite=False)
| 26.862069 | 74 | 0.752246 | 95 | 779 | 6.084211 | 0.568421 | 0.110727 | 0.093426 | 0.155709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.133504 | 779 | 28 | 75 | 27.821429 | 0.848889 | 0.17715 | 0 | 0 | 0 | 0 | 0.079114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e520e590c8df2d66276d19de77a152e7ca6c316 | 1,272 | py | Python | augmentation.py | chaets/cogniac | aefb4355d3f7b56cbc42e1039e88d709b40051dc | [
"Apache-2.0"
] | 1 | 2021-04-26T18:15:35.000Z | 2021-04-26T18:15:35.000Z | augmentation.py | chaets/cogniac | aefb4355d3f7b56cbc42e1039e88d709b40051dc | [
"Apache-2.0"
] | null | null | null | augmentation.py | chaets/cogniac | aefb4355d3f7b56cbc42e1039e88d709b40051dc | [
"Apache-2.0"
] | 1 | 2021-03-19T21:40:48.000Z | 2021-03-19T21:40:48.000Z | import cv2
import numpy as np
class Augmentation:
"""docstring for Augmentation."""
# def __init__(self):
# pass
# super(Augmentation, self).__init__()
# self.arg = arg
def get_resize(image, lengthScale, breadthScale):
src = cv2.imread(image , cv2.IMREAD_UNCHANGED)
#calculate the scale percent of original dimensions
width = int(src.shape[1] * breadthScale / 100)
length = int(src.shape[0] * lengthScale / 100)
# dsize
dsize = (width, length)
# resize image
output = cv2.resize(src, dsize)
return(output)
def get_crop(image, dim):
ima = cv2.imread(image)
if dim == None:
print(ima.shape[1])
x = int(ima.shape[0]/2)
y = int(ima.shape[1]/2)
cropped = ima[0:x, 0:y]
else:
cropped = ima[dim[0]:dim[1], dim[2]:dim[3]]
return(cropped)
def get_rotate(image, angle):
src = cv2.imread(image , cv2.IMREAD_UNCHANGED)
image_center = tuple(np.array(src.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(src, rot_mat, src.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
| 31.02439 | 87 | 0.575472 | 163 | 1,272 | 4.380368 | 0.398773 | 0.063025 | 0.058824 | 0.047619 | 0.098039 | 0.098039 | 0.098039 | 0 | 0 | 0 | 0 | 0.041387 | 0.29717 | 1,272 | 40 | 88 | 31.8 | 0.757271 | 0.139937 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0 | 0.269231 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e529806b916a14d473506fae40b14ec1ce0965a | 557 | py | Python | pages/themes/beginners/unicodeTopics/examples/fromWin1251_toUTF8.py | ProgressBG-Python-Course/ProgressBG-VC2-Python | 03b892a42ee1fad3d4f97e328e06a4b1573fd356 | [
"MIT"
] | null | null | null | pages/themes/beginners/unicodeTopics/examples/fromWin1251_toUTF8.py | ProgressBG-Python-Course/ProgressBG-VC2-Python | 03b892a42ee1fad3d4f97e328e06a4b1573fd356 | [
"MIT"
] | null | null | null | pages/themes/beginners/unicodeTopics/examples/fromWin1251_toUTF8.py | ProgressBG-Python-Course/ProgressBG-VC2-Python | 03b892a42ee1fad3d4f97e328e06a4b1573fd356 | [
"MIT"
] | null | null | null | input_file_name = "windows-1251_encoded_file.txt"
output_file_name = "utf8_encoded_file.txt"
def read_from(enc, file_name):
# open file for read in textmode with encoding:
with open(file_name,"r", encoding="cp1251") as f:
decoded_content = f.read()
return decoded_content
def write_to(enc, content, file_name):
# open file for write in textmode with encoding:
with open(file_name,"w+", encoding=enc) as f:
f.write(content)
decoded_content = read_from("cp1251", input_file_name)
write_to("utf8", decoded_content, output_file_name)
| 25.318182 | 54 | 0.746858 | 88 | 557 | 4.454545 | 0.329545 | 0.163265 | 0.066327 | 0.081633 | 0.290816 | 0.193878 | 0.193878 | 0.193878 | 0 | 0 | 0 | 0.029412 | 0.145422 | 557 | 21 | 55 | 26.52381 | 0.794118 | 0.165171 | 0 | 0 | 0 | 0 | 0.15 | 0.108696 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e572a42c6bc0c9afac50afe4337fe89f5d90d51 | 7,795 | py | Python | PyObjCTest/test_cfdictionary.py | linuxfood/pyobjc-framework-Cocoa-test | 3475890f165ab26a740f13d5afe4c62b4423a140 | [
"MIT"
] | null | null | null | PyObjCTest/test_cfdictionary.py | linuxfood/pyobjc-framework-Cocoa-test | 3475890f165ab26a740f13d5afe4c62b4423a140 | [
"MIT"
] | null | null | null | PyObjCTest/test_cfdictionary.py | linuxfood/pyobjc-framework-Cocoa-test | 3475890f165ab26a740f13d5afe4c62b4423a140 | [
"MIT"
] | null | null | null | import CoreFoundation
import objc
from PyObjCTools.TestSupport import TestCase
class TestCFDictionary(TestCase):
def testCreation(self):
dictionary = CoreFoundation.CFDictionaryCreate(
None,
("aap", "noot", "mies", "wim"),
("monkey", "nut", "missy", "john"),
4,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
self.assertIsInstance(dictionary, CoreFoundation.CFDictionaryRef)
self.assertEqual(
dictionary, {"aap": "monkey", "noot": "nut", "mies": "missy", "wim": "john"}
)
dictionary = CoreFoundation.CFDictionaryCreateMutable(
None,
0,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
self.assertIsInstance(dictionary, CoreFoundation.CFMutableDictionaryRef)
CoreFoundation.CFDictionarySetValue(dictionary, "hello", "world")
self.assertEqual(dictionary, {"hello": "world"})
def testApplyFunction(self):
dictionary = CoreFoundation.CFDictionaryCreate(
None,
("aap", "noot", "mies", "wim"),
("monkey", "nut", "missy", "john"),
4,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
context = []
def function(key, value, context):
context.append((key, value))
self.assertArgIsFunction(
CoreFoundation.CFDictionaryApplyFunction, 1, b"v@@@", False
)
self.assertArgHasType(CoreFoundation.CFDictionaryApplyFunction, 2, b"@")
CoreFoundation.CFDictionaryApplyFunction(dictionary, function, context)
context.sort()
self.assertEqual(len(context), 4)
self.assertEqual(
context,
[
(b"aap".decode("ascii"), b"monkey".decode("ascii")),
(b"mies".decode("ascii"), b"missy".decode("ascii")),
(b"noot".decode("ascii"), b"nut".decode("ascii")),
(b"wim".decode("ascii"), b"john".decode("ascii")),
],
)
def testTypeID(self):
self.assertIsInstance(CoreFoundation.CFDictionaryGetTypeID(), int)
def testCreation2(self): # XXX
dct = CoreFoundation.CFDictionaryCreate(
None,
[b"key1".decode("ascii"), b"key2".decode("ascii")],
[42, 43],
2,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
self.assertIsInstance(dct, CoreFoundation.CFDictionaryRef)
dct = CoreFoundation.CFDictionaryCreateCopy(None, dct)
self.assertIsInstance(dct, CoreFoundation.CFDictionaryRef)
dct = CoreFoundation.CFDictionaryCreateMutable(
None,
0,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
self.assertIsInstance(dct, CoreFoundation.CFDictionaryRef)
dct = CoreFoundation.CFDictionaryCreateMutableCopy(None, 0, dct)
self.assertIsInstance(dct, CoreFoundation.CFDictionaryRef)
def testInspection(self):
dct = CoreFoundation.CFDictionaryCreate(
None,
[b"key1".decode("ascii"), b"key2".decode("ascii")],
[42, 42],
2,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
self.assertIsInstance(dct, CoreFoundation.CFDictionaryRef)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 2)
self.assertEqual(
CoreFoundation.CFDictionaryGetCountOfKey(dct, b"key1".decode("ascii")), 1
)
self.assertEqual(
CoreFoundation.CFDictionaryGetCountOfKey(dct, b"key3".decode("ascii")), 0
)
self.assertEqual(CoreFoundation.CFDictionaryGetCountOfValue(dct, 42), 2)
self.assertEqual(CoreFoundation.CFDictionaryGetCountOfValue(dct, 44), 0)
self.assertResultHasType(CoreFoundation.CFDictionaryContainsKey, objc._C_NSBOOL)
self.assertTrue(
CoreFoundation.CFDictionaryContainsKey(dct, b"key1".decode("ascii"))
)
self.assertFalse(
CoreFoundation.CFDictionaryContainsKey(dct, b"key3".decode("ascii"))
)
self.assertResultHasType(
CoreFoundation.CFDictionaryContainsValue, objc._C_NSBOOL
)
self.assertTrue(CoreFoundation.CFDictionaryContainsValue(dct, 42))
self.assertFalse(
CoreFoundation.CFDictionaryContainsValue(dct, b"key3".decode("ascii"))
)
self.assertEqual(CoreFoundation.CFDictionaryGetValue(dct, "key2"), 42)
self.assertIs(CoreFoundation.CFDictionaryGetValue(dct, "key3"), None)
self.assertResultHasType(
CoreFoundation.CFDictionaryGetValueIfPresent, objc._C_NSBOOL
)
self.assertArgIsOut(CoreFoundation.CFDictionaryGetValueIfPresent, 2)
ok, value = CoreFoundation.CFDictionaryGetValueIfPresent(dct, "key2", None)
self.assertTrue(ok)
self.assertEqual(value, 42)
ok, value = CoreFoundation.CFDictionaryGetValueIfPresent(dct, "key3", None)
self.assertFalse(ok)
self.assertIs(value, None)
keys, values = CoreFoundation.CFDictionaryGetKeysAndValues(dct, None, None)
self.assertEqual(values, (42, 42))
keys = list(keys)
keys.sort()
self.assertEqual(keys, ["key1", "key2"])
def testMutation(self):
dct = CoreFoundation.CFDictionaryCreateMutable(
None,
0,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 0)
CoreFoundation.CFDictionaryAddValue(
dct, b"key1".decode("ascii"), b"value1".decode("ascii")
)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 1)
self.assertTrue(
CoreFoundation.CFDictionaryContainsKey(dct, b"key1".decode("ascii"))
)
CoreFoundation.CFDictionarySetValue(
dct, b"key2".decode("ascii"), b"value2".decode("ascii")
)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 2)
self.assertTrue(
CoreFoundation.CFDictionaryContainsKey(dct, b"key2".decode("ascii"))
)
CoreFoundation.CFDictionaryReplaceValue(
dct, b"key2".decode("ascii"), b"value2b".decode("ascii")
)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 2)
self.assertTrue(
CoreFoundation.CFDictionaryContainsKey(dct, b"key2".decode("ascii"))
)
self.assertEqual(
CoreFoundation.CFDictionaryGetValue(dct, "key2"), b"value2b".decode("ascii")
)
CoreFoundation.CFDictionaryReplaceValue(
dct, b"key3".decode("ascii"), b"value2b".decode("ascii")
)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 2)
self.assertFalse(
CoreFoundation.CFDictionaryContainsKey(dct, b"key3".decode("ascii"))
)
CoreFoundation.CFDictionaryRemoveValue(dct, b"key1".decode("ascii"))
self.assertFalse(
CoreFoundation.CFDictionaryContainsKey(dct, b"key1".decode("ascii"))
)
CoreFoundation.CFDictionaryRemoveAllValues(dct)
self.assertFalse(
CoreFoundation.CFDictionaryContainsKey(dct, b"key2".decode("ascii"))
)
self.assertEqual(CoreFoundation.CFDictionaryGetCount(dct), 0)
| 40.180412 | 88 | 0.636947 | 590 | 7,795 | 8.405085 | 0.161017 | 0.0732 | 0.031458 | 0.025812 | 0.64872 | 0.59266 | 0.508772 | 0.469449 | 0.428111 | 0.387175 | 0 | 0.013381 | 0.252213 | 7,795 | 193 | 89 | 40.388601 | 0.837365 | 0.000385 | 0 | 0.44 | 0 | 0 | 0.058922 | 0 | 0 | 0 | 0 | 0 | 0.274286 | 1 | 0.04 | false | 0 | 0.017143 | 0 | 0.062857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e57a3b1f5bc69858ee77a65eb9d305c058566c9 | 3,910 | py | Python | melior_transformers/encoding/encoding_model.py | MeliorAI/meliorTransformers | b2936e1aac23e63e0b737d03975124c31a960812 | [
"Apache-2.0"
] | 1 | 2020-08-06T10:48:49.000Z | 2020-08-06T10:48:49.000Z | melior_transformers/encoding/encoding_model.py | MeliorAI/meliorTransformers | b2936e1aac23e63e0b737d03975124c31a960812 | [
"Apache-2.0"
] | 2 | 2020-02-13T12:45:57.000Z | 2020-04-14T11:30:33.000Z | melior_transformers/encoding/encoding_model.py | MeliorAI/meliorTransformers | b2936e1aac23e63e0b737d03975124c31a960812 | [
"Apache-2.0"
] | 2 | 2020-07-21T12:43:51.000Z | 2021-08-13T15:21:22.000Z | import logging
from typing import Dict, List
import numpy as np
import torch
from numpy import ndarray
from sentence_transformers import SentenceTransformer, models
from melior_transformers.config.global_args import global_args
from melior_transformers.encoding.constants import MODEL_CLASSES
logger = logging.getLogger(__name__)
class SentenceEncoder:
""" Simple wrapper class around 'sentence-transformers':
(https://github.com/UKPLab/sentence-transformers/) that
allow us to easly-extract embeddings from pre-trained models.
You can find the full list of models here:
https://huggingface.co/transformers/pretrained_models.html
"""
def __init__(
self,
model_type: str = "bert",
model_name: str = "bert-base-uncased",
args: Dict = None,
use_cuda: bool = False,
random_seed: int = None,
):
"""
Initializes a pre-trained Transformer model for Sentence Encoding.
Args:
model_type (optional): The type of model.
model_size (optional): The model name.
args (optional): Aditional arguments to configure embeddigs extraction.
use_cuda (optional): Use GPU if available. Setting to False will
force model to use CPU only.
Returns:
None
"""
if random_seed is not None:
np.random.seed(random_seed)
torch.manual_seed(random_seed)
if use_cuda:
device = "cuda"
else:
device = "cpu"
self.args = {
# Model config
"max_seq_length": 128,
"do_lower_case": False,
# Model config
"pooling_mode_mean_tokens": True,
"pooling_mode_cls_token": False,
"pooling_mode_max_tokens": False,
"pooling_mode_mean_sqrt_len_tokens": False,
}
if args is not None:
self.args.update(args)
if model_type not in MODEL_CLASSES:
raise ValueError(
f"Model type {model_type} doesn't exist."
f"\nPlease select one of the follwing: {MODEL_CLASSES.keys()}"
)
try:
logger.info(f"Loading model '{model_name}'")
word_embedding_model = MODEL_CLASSES[model_type](
model_name_or_path=model_name,
max_seq_length=self.args["max_seq_length"],
do_lower_case=self.args["do_lower_case"],
)
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=self.args["pooling_mode_mean_tokens"],
pooling_mode_cls_token=self.args["pooling_mode_cls_token"],
pooling_mode_max_tokens=self.args["pooling_mode_max_tokens"],
pooling_mode_mean_sqrt_len_tokens=self.args[
"pooling_mode_mean_sqrt_len_tokens"
],
)
self.encoder_model = SentenceTransformer(
modules=[word_embedding_model, pooling_model], device=device
)
except Exception as e:
raise ValueError(f"Error loading model: {e}")
def encode(
self, sentences: List[str], batch_size: int = 8, show_progress_bar: bool = False
) -> List[np.ndarray]:
"""
Extract sentence embeddings from the selected model.
Args:
sentences: List of sentences to extract embeddings.
batch_size (optional): Batch size used for the computation
show_progress_bar (optional): Output a progress bar when encode sentences
Returns:
List with ndarrays of the embeddings for each sentence.
"""
return self.encoder_model.encode(
sentences, batch_size=batch_size, show_progress_bar=show_progress_bar
)
| 33.418803 | 88 | 0.615601 | 449 | 3,910 | 5.100223 | 0.347439 | 0.057642 | 0.039301 | 0.033188 | 0.065502 | 0.054585 | 0.027948 | 0 | 0 | 0 | 0 | 0.001482 | 0.309719 | 3,910 | 116 | 89 | 33.706897 | 0.84698 | 0.251918 | 0 | 0 | 0 | 0 | 0.159108 | 0.082663 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.119403 | 0 | 0.179104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e58286e282abcb985fa69b50248bb70d6390ed9 | 3,138 | py | Python | main.py | Qumeric/flappypython | 40f4ab00870f6e5f02ef7de74190c6a277feb034 | [
"MIT"
] | null | null | null | main.py | Qumeric/flappypython | 40f4ab00870f6e5f02ef7de74190c6a277feb034 | [
"MIT"
] | 1 | 2015-04-07T15:17:30.000Z | 2015-04-07T17:34:01.000Z | main.py | Qumeric/flappypython | 40f4ab00870f6e5f02ef7de74190c6a277feb034 | [
"MIT"
] | null | null | null | from pygame import display, font, image, init, time, event
from pygame.locals import *
from random import randrange
from GameObject import Bird, Pipe
SCORE_COLOR = (255, 255, 0)
HIGHSCORE_COLOR = (255, 165, 0)
DISPLAY_WIDTH = 320
DISPLAY_HEIGHT = 480
FONT = None
FONT_SIZE = 22
HOLE_SIZE = 50
PIPE_FREQUENCY = 50
PIPE_MAXLIFETIME = 100
score = 1
highscore = 0
def save():
global score, highscore
if score > highscore:
highscore = score
with open('save', 'a+') as f:
f.seek(0)
save = f.read()
if highscore > int(save) if save.isdigit() else '0':
f.seek(0)
f.truncate()
f.write(str(highscore))
else:
highscore = int(save)
score = 0
def pause(display):
screen = display.get_surface()
hsfont = font.Font(FONT, 100)
hs = hsfont.render(str(highscore), True, HIGHSCORE_COLOR)
screen.blit(image.load('pause.png').convert_alpha(), (0, 0))
screen.blit(hs, (77, 110))
display.flip()
while True:
for i in event.get():
if i.type == MOUSEBUTTONDOWN or i.type == KEYDOWN:
return
def main():
global score, highscore
init()
display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
display.set_caption('Flappy bird')
myfont = font.Font(FONT, FONT_SIZE)
screen = display.get_surface()
bird = Bird(150, 1)
bg = image.load('background.png').convert_alpha()
pipes = []
save()
running = True
while running:
lScore = myfont.render(str(score), True, SCORE_COLOR)
time.Clock().tick(30) # Set FPS to 30
screen.blit(bg, (0, 0))
score += 1
# Create new pipes
if score % PIPE_FREQUENCY == 0:
hole = randrange(HOLE_SIZE, DISPLAY_HEIGHT - HOLE_SIZE)
pipe1 = Pipe(DISPLAY_WIDTH, hole + HOLE_SIZE)
pipe2 = Pipe(DISPLAY_WIDTH, -DISPLAY_HEIGHT + hole - HOLE_SIZE)
pipes.extend((pipe1, pipe2))
# Move pipes
for pipe in pipes:
screen.blit(pipe.img, pipe.rect)
pipe.fly()
# Remove old pipes
for pipe in pipes:
if pipe.lifetime > PIPE_MAXLIFETIME:
pipes.remove(pipe)
# Move the bird on the y-axis
bird.fly()
# Handle the input
for i in event.get():
if i.type == MOUSEBUTTONDOWN or i.type == KEYDOWN:
bird.speedY = -10
elif i.type == QUIT:
running = False
# Check collisions with pipes and bottom
# If the bird is too low or touches a pipe
if bird.rect.y >= DISPLAY_HEIGHT - bird.img.get_height() or \
bird.checkCollisions(pipes):
bird.die()
pipes.clear()
save()
pause(display)
elif bird.rect.y < -HOLE_SIZE: # The bird is too high
bird.speedY = 1
# Draw the bird and score info
screen.blit(bird.img, bird.rect)
screen.blit(lScore, (0, 0))
display.flip()
if __name__ == '__main__':
main()
| 25.512195 | 75 | 0.564691 | 396 | 3,138 | 4.373737 | 0.330808 | 0.027714 | 0.020785 | 0.026559 | 0.079677 | 0.057737 | 0.057737 | 0.057737 | 0.057737 | 0.057737 | 0 | 0.031339 | 0.328872 | 3,138 | 122 | 76 | 25.721311 | 0.791073 | 0.074251 | 0 | 0.183908 | 0 | 0 | 0.016932 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.045977 | 0 | 0.091954 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e589b21bf05fda4a1eedf746c2f50a2e1830a1e | 17,283 | py | Python | queryDB.py | ElsevierSoftwareX/SOFTX-D-20-00075 | 825efe6a95011d055eaa19fabed42e9dad73b164 | [
"BSD-3-Clause"
] | 3 | 2021-09-15T03:51:14.000Z | 2022-03-05T15:36:47.000Z | queryDB.py | ElsevierSoftwareX/SOFTX-D-20-00075- | 06de316e3973b65e28ca7e047b5b1674640e7e5c | [
"BSD-3-Clause"
] | null | null | null | queryDB.py | ElsevierSoftwareX/SOFTX-D-20-00075- | 06de316e3973b65e28ca7e047b5b1674640e7e5c | [
"BSD-3-Clause"
] | 3 | 2021-07-21T10:25:59.000Z | 2022-03-19T21:16:33.000Z | #!/usr/bin/env python3
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
from cat_setup import src_localDB, src_onlineDB
from buildDB import addData, check_ldb
import sys, os
import csv
from more_itertools import locate
import argparse
import subprocess
from pathlib import Path
# for the spectrum search:
from astropy import units as u
import astropy.coordinates as coord
from getSpect import queryCASSIS, queryISO
import warnings
warnings.filterwarnings('ignore', category=UserWarning)
# Describe the script:
description = \
"""
description:
script to pull photometry from set catalogs in VizieR
(specified in cat_setup.py) and from local database of
data tables not presently in VizieR. If optional
argument --getSpect is set equal to True (boolean),
the script will also pull flux calibrated infrared
spectra from the CASSIS low resolution Spitzer
Atlas and Gregory C Sloan's ISO/SWS Atlas.
"""
epilog = \
"""
examples:
queryDB.py --obj=HD_283571 --rad=10s --getSpect=True
"""
parser = argparse.ArgumentParser(description=description,epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--obj",dest="obj",default='',type=str,
help='Object name')
parser.add_argument("--rad",dest="rad",default='10s',type=str,
help='Search radius for VizieR catalog query')
parser.add_argument("--ldb",dest='ldb',default='',type=str,
help='')
parser.add_argument("--getSpect",dest="getSpect",default=False,type=bool,
help='Choose whether to query CASSIS for IRS spectra (default False)')
parser.add_argument("--closest",dest="closest",default=False,type=bool,
help='Retreive closest entry from VizieR catalogs (default False)')
parser.add_argument("--queryAll",dest="query",default='True',type=str,
help='Choose whether to query full database ("all") or specific catalog')
argopt = parser.parse_args()
obj = argopt.obj.replace('_', ' ')
searchR = argopt.rad
# Check that the local database can be found:
localDB_trunk = check_ldb(argopt.ldb) # returns a pathlib.Path object
qu = argopt.query
# Read in the details of the VizieR catalogs to be queried:
if qu == 'True':
catN, catR, catW, catA, catM, catE, catU, catB = src_onlineDB('simbad')
else:
# Expect to be given one catalog to query
try:
catN,catR,catW,catA,catM,catE,catU,catB = [{qu:item[qu]} for item in src_onlineDB('simbad')]
except KeyError:
print('No online catalog matching keyword ',qu)
catN,catR,catW,catA,catM,catE,catU,catB = [[]]*8
# Read in the details of the local catalogs to be queried:
if qu == 'True':
try:
ldbN, ldbR, ldbW, ldbA, ldbM, ldbE, ldbU, ldbB = src_localDB(localDB_trunk)
except TypeError:
print('Error: local database files not found!')
print('Please check local database directory trunk before continuing.')
print('')
sys.exit()
else:
try:
ldbN,ldbR,ldbW,ldbA,ldbM,ldbE,ldbU,ldbB = [{qu:item[qu]} for item in src_localDB(localDB_trunk)]
except KeyError:
print('No local catalog matching keyword ',qu)
if catN == []:
print('Exiting...')
sys.exit()
ldbN,ldbR,ldbW,ldbA,ldbM,ldbE,ldbU,ldbB = [[]]*8
##########
# Initialise outputs:
##########
wvlen, band, mag, emag, units = ['m'], ['--'], ['--'], ['--'], ['--']
beam, odate, ref = ['arcsec'], ['--'], ['--']
##########
# Collect SIMBAD names and VizieR catalog matches
##########
# Create custom SIMBAD (cS) query to retrieve 2MASS flux
cS = Simbad()
cS.add_votable_fields('flux(J)', 'flux(H)', 'flux(K)')
cS.add_votable_fields('flux_error(J)', 'flux_error(H)', 'flux_error(K)')
cS.add_votable_fields('flux_bibcode(J)', 'flux_bibcode(H)', 'flux_bibcode(K)')
cS.remove_votable_fields('coordinates')
objsim = cS.query_object(obj)
if not objsim:
print('')
print('Warning: object name '+obj+' not recognised by SIMBAD!')
# Try treat it as photometry of binary component (expect e.g. A or A+B label)
print(' - blindly assuming multiplicity: checking "'+' '.join(obj.split(' ')[:-1])+'"')
try:
objB = [a[0] for a in Simbad.query_objectids(' '.join(obj.split(' ')[:-1]))]
# If we get to here, the object is a component of a multiple system
print(' - Success! '+' '.join(obj.split(' ')[:-1])+' recognised by SIMBAD!')
print('Info: photometry search will be limited to the local database')
print('--------------------------------------------')
print(' CAUTION: ')
print(' Individual component identifiers can vary ')
print(' according to wavelength or between studies.')
print(' You are advised to check the collated ')
print(' references to ensure consistent naming. ')
print('--------------------------------------------')
print('')
if ' '.join(obj.split(' ')[:-1]) not in [' '.join(o.split()) for o in objB]:
for o in objB:
# Retrieve full name of parent star from SIMBAD (in case e.g. XZ Tau
# parsed instead of V* XZ Tau):
if ' '.join(obj.split(' ')[:-1]) in o:
obj2 = o+' '+obj.split(' ')[-1]
else:
# Parsed name matches required format of full simbad name of parent star plus
# component flag (e.g. A).
print('')
obj2 = obj
altIDs = [obj2]
except TypeError:
print('Error: not multiple. Object name not registered in SIMBAD!')
print('Please provide a valid object identifier.')
print('')
sys.exit()
else:
# Only get here if the object identifier is simbad-compatible
# Retrieve data from online catalogs:
for o in catN:
resM, resE = [], []
found = ''
print('Retrieving photometry from '+o+' ('+catR[o]+') ...')
if o == '2MASS':
for t in range(0, 3):
if catR[o] in str(objsim[catN[o][t]][0]):
addData(objsim[catM[o][t]][0], objsim[catE[o][t]][0], catB[o][t],
catW[o][t], catA[o][t], catU[o][t], 'unknown', catR[o],
m=mag, em=emag, b1=band, u=units, b2=beam, d=odate, r=ref,
w=wvlen)
else:
print('No match')
else:
res = Vizier(columns=['**', '+_r'], catalog=catN[o])
result = res.query_region(obj, radius=searchR)
try:
l_tmp = result[catN[o]]
except TypeError:
found = 'No match'
if result.keys() and found != 'No match':
if len(result[catN[o]]) > 1 and argopt.closest == False:
# Get the user to specify the matching catalog entry for the object:
print('Multiple results returned by Vizier within search radius')
print(result[catN[o]])
print('')
obj_r = input('Enter "_r" value for required target: ')
# Retrieve row number:
for r in range(0, len(result[catN[o]])):
if (result[catN[o]][r]['_r'] == float(obj_r)):
row = r
elif len(result[catN[o]]) > 1 and argopt.closest == True:
# Retrieve the entry with smallest _r
print('Multiple results returned by Vizier within search radius')
print(result[catN[o]])
print('')
q_r = min([r['_r'] for r in result[catN[o]]])
# Retrieve row number:
print('Closest entry has _r =',q_r)
row = None
for r in range(0, len(result[catN[o]])):
if row == None and result[catN[o]][r]['_r'] == q_r:
row = r
else:
row = 0
# Retrieve mag/flux and its error from the catalog, given the row number
#for mm in catM[o]:
for m in range(0, len(catM[o])):
# Retrieve each of the mag/flux measurements...
try:
if '--' not in str(result[catN[o]][row][catM[o][m]]):
resM = result[catN[o]][row][catM[o][m]]
else:
resM = '--'
except KeyError:
print('Warning: potential flux column name change in VizieR!')
print(result[catN[o]][row])
print (catM[o][m])
raise KeyError
# ... and their errors...
if o == 'IRAS':
t_resM = result[catN[o]][row][catE[o][m]]
resE = result[catN[o]][row][catM[o][m]]*0.01*t_resM
elif isinstance(catE[o][m], str):
if '--' not in str(result[catN[o]][row][catE[o][m]]):
resE = result[catN[o]][row][catE[o][m]]
else:
resE = '--'
else:
resE = catE[o][m] * result[catN[o]][row][catM[o][m]]
# And add it to the data to be written to file:
addData(resM, resE, catB[o][m], catW[o][m], catA[o][m], catU[o][m],
'unknown', catR[o], m=mag, em=emag, b1=band,
u=units, b2=beam, d=odate, r=ref, w=wvlen)
else:
print('No match.')
##########
# Account for specific case of Vieira+2003 which provides mag + colour table
# and object ID in PDS format:
##########
altIDs = [a[0] for a in Simbad.query_objectids(obj)]
if qu == 'True':
cmN = {'Vieira03' : 'J/AJ/126/2971/table2'}
cmR = {'Vieira03' : '2003AJ....126.2971V'}
cmW = {'Vieira03' : [540e-9, 442e-9, 364e-9, 647e-9, 786.5e-9]}
cmA = {'Vieira03' : [(1.22*w/0.60)*206265 for w in cmW['Vieira03']]}
cmM = {'Vieira03' : ['Vmag', 'B-V', 'U-B', 'V-Rc', 'Rc-Ic']}
cmE = {'Vieira03' : ['--', '--', '--', '--', '--']}
cmU = {'Vieira03' : ['mag', 'mag', 'mag', 'mag', 'mag']}
cmB = {'Vieira03' : ['Johnson:V','Johnson:B','Johnson:U','Cousins:Rc',
'Cousins:Ic']}
print('Retrieving photometry from Vieira et al. ('+cmR['Vieira03']+') ...')
if any('PDS' in b for b in altIDs):
indices = [i for i, s in enumerate(altIDs) if 'PDS' in s]
p_obj = altIDs[indices[0]]
# Ensure pds_obj is just numeric and has leading zeros so that len = 3
if len(p_obj.split()[1]) == 1:
pds_obj = '00'+p_obj.split()[1]
elif len(p_obj.split()[1]) == 2:
pds_obj = '0'+p_obj.split()[1]
elif len(p_obj.split()[1]) == 3:
pds_obj = p_obj.split()[1]
else:
print('Format of PDS identifier not recognised: '+p_obj)
print('Exiting...')
sys.exit()
result = Vizier.get_catalogs(cmN['Vieira03'])
ind = [i for i, s in enumerate([a for a in result[0]['PDS']]) if pds_obj in s]
if len(ind) > 1:
jvmag = result[0]['Vmag'][ind]
jbmag = result[0]['B-V'][ind] + jvmag
jumag = result[0]['U-B'][ind] + jbmag
crmag = jvmag - result[0]['V-Rc'][ind]
cimag = crmag - result[0]['Rc-Ic'][ind]
vieira_m = [jvmag, jbmag, jumag, crmag, cimag]
for m in range(0, len(vieira_m)):
addData(vieira_m[m], cmE['Vieira03'][m], cmB['Vieira03'][m],
cmW['Vieira03'][m], cmA['Vieira03'][m], cmU['Vieira03'][m],
'unknown', cmR['Vieira03'], m=mag, em=emag, b1=band,
u=units, b2=beam, d=odate, r=ref, w=wvlen)
else:
print('No match.')
else:
print('No match.')
##########
# Then deal with local data base of tables not on VizieR:
##########
suggestAlt = []
for o in ldbN:
print('Retrieving photometry from '+o+' ('+ldbR[o]+') ...')
with open(ldbN[o]) as f_in:
reader = csv.DictReader(f_in, delimiter=',')
entries = [a for a in reader]
targs = [row['Target'] for row in entries]
match = list(set(targs).intersection([' '.join(a.split()) for a in altIDs]))
# check for entries where any of [a for altIDs] match local database catalog
# entry.split(' ')[:-1] (i.e. the portion of the name up to the final space)
smatch = list(set([' '.join(t.split(' ')[:-1]) for t in targs]).intersection([' '.join(a.split()) for a in altIDs]))
if len(match) == 0 and len(smatch) == 0:
print(' - no match.')
elif len(match) == 0 and len(smatch) != 0:
# Alert the user to the fact that there are entries for individual components of
# the target they are querying.
print(' - no match for '+obj+' but individual component/blended photometry exists')
for ind in list(locate([' '.join(t.split(' ')[:-1]) for t in targs], lambda a: a == smatch[0])):
suggestAlt.append(str(targs[ind]))
else:
# Identical matches are found:
for ind in list(locate(targs, lambda a: a == match[0])):
resM = []
resE = []
resD = []
for mm in ldbM[o]:
# Retrieve each of the mag/flux measurements...
resM.append(entries[ind][mm])
resD.append(entries[ind]['ObsDate'])
for me in ldbE[o]:
# ... and their errors
resE.append(entries[targs.index(match[0])][me])
for m in range(0, len(resM)):
addData(resM[m], resE[m], ldbB[o][m], ldbW[o][m], ldbA[o][m], ldbU[o][m],
resD[m], ldbR[o], m=mag, em=emag, b1=band, u=units,
b2=beam, d=odate, r=ref, w=wvlen)
if len(smatch) != 0:
# ...AND potential individual component photometry exists in the table:
for ind in list(locate([' '.join(t.split(' ')[:-1]) for t in targs], lambda a: a == smatch[0])):
suggestAlt.append(str(targs[ind]))
if len(suggestAlt) != 0:
print('')
print('------------------------------------------------------')
print(' !!! CAUTION !!! ')
print('------------------------------------------------------')
print('Individual component or blended photometry also found!')
print(' - Data exists in local database for:')
for sA in list(set(suggestAlt)):
print(' '+str(sA))
print('')
print('Suggestion: use each of the target IDs with queryDB.py')
print('to collate all available photometry.')
print('')
print('Important note: collated photometry may contain ')
print('contributions from any/all of these components. Use ')
print('inspectSED.py to check this.')
print('------------------------------------------------------')
print('')
##############
# Write output to ascii file:
##############
resS = Simbad.query_object(obj)
Path.mkdir(Path(os.getcwd()) / Path(obj.replace(" ", "")), parents=True, exist_ok=True)
output = Path(os.getcwd()) / Path(obj.replace(" ", "")) / Path(obj.replace(" ", "")+'_phot.dat')
if output.exists() and qu == 'True':
print('File '+str(output.name)+' already exists in '+str(output.parent)+ '...')
print('Exiting...')
sys.exit()
elif output.exists() and qu != 'True':
f = open(output, mode='a')
f.write('#New photometry obtained using search radius of '+searchR+'\n')
for i in range(1, len(wvlen)):
oLINE = str(wvlen[i])+' '+str(band[i])+' '+str(mag[i])+' '+str(emag[i])+' -- '+str(units[i])+' '+str(beam[i])+' '+str(odate[i])+' '+str(ref[i])
f.write(oLINE+"\n")
else:
f = open(output, mode='w')
f.write('#Photometry obtained for '+obj)
try:
f.write(': RA='+str(resS['RA'][0])+', Dec='+str(resS['DEC'][0]))
f.write(', cone search radius='+searchR+'\n')
except:
f.write('. Sky coordinates not retrievable; cone search not used\n')
f.write("lam band mag e_mag f_mag u_mag beam obsDate ref\n")
for i in range(0, len(wvlen)):
oLINE = str(wvlen[i])+' '+str(band[i])+' '+str(mag[i])+' '+str(emag[i])+' -- '+str(units[i])+' '+str(beam[i])+' '+str(odate[i])+' '+str(ref[i])
f.write(oLINE+"\n")
f.close()
print('Collated photometry written to ',output)
print('')
if argopt.getSpect == True:
# objRA = str(65.48922), objDEC = str(28.443204)
objPos = coord.SkyCoord(resS['RA'][0]+' '+resS['DEC'][0], unit=(u.hourangle, u.deg))
RA = objPos.ra.value
DEC = objPos.dec.value
queryCASSIS(obj, str(RA), str(DEC), searchR=str(20))
queryISO(obj, str(RA), str(DEC), searchR=str(20))
| 43.977099 | 151 | 0.525025 | 2,201 | 17,283 | 4.085416 | 0.21672 | 0.004671 | 0.02202 | 0.012456 | 0.263456 | 0.213857 | 0.189502 | 0.167705 | 0.120663 | 0.104204 | 0 | 0.016578 | 0.301973 | 17,283 | 392 | 152 | 44.089286 | 0.72878 | 0.112133 | 0 | 0.266212 | 0 | 0 | 0.206198 | 0.016991 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.051195 | 0 | 0.051195 | 0.21843 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e591f8ce49d4cfdc76a7051dbb0e79d30e622d3 | 1,690 | py | Python | celescope/tcr_fl/multi_tcr_fl.py | susucy/CeleScope | a5d9501ff8bd9dc067b9718070876acb0767a6cf | [
"MIT"
] | null | null | null | celescope/tcr_fl/multi_tcr_fl.py | susucy/CeleScope | a5d9501ff8bd9dc067b9718070876acb0767a6cf | [
"MIT"
] | null | null | null | celescope/tcr_fl/multi_tcr_fl.py | susucy/CeleScope | a5d9501ff8bd9dc067b9718070876acb0767a6cf | [
"MIT"
] | null | null | null | from celescope.__init__ import __CONDA__
from celescope.tcr_fl.__init__ import __STEPS__, __ASSAY__
from celescope.tools.Multi import Multi
class Multi_tcr_fl(Multi):
def custome_args(self):
self.parser.add_argument('--thread', help='thread', default=4)
self.parser.add_argument("--nCell", help="select top N cell")
def read_custome_args(self):
self.thread = self.args.thread
self.nCell = self.args.nCell
def split_fq(self, sample):
step = 'split_fq'
fq = f'{self.outdir_dic[sample]["cutadapt"]}/{sample}_clean_2.fq.gz'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--fq {fq} '
f'--nCell {self.nCell} '
f'--match_dir {self.col4_dict[sample]} '
)
self.process_cmd(cmd, step, sample, m=5, x=1)
def assemble(self, sample):
step = 'assemble'
fastq_dir = f'{self.outdir_dic[sample]["split_fq"]}/fastq'
cmd = (
f'{self.__APP__} '
f'{self.__ASSAY__} '
f'{step} '
f'--outdir {self.outdir_dic[sample][step]} '
f'--sample {sample} '
f'--assay {self.__ASSAY__} '
f'--fastq_dir {fastq_dir} '
f'--thread {self.thread} '
)
self.process_cmd(cmd, step, sample, m=4 * int(self.thread), x=self.thread)
def main():
multi = Multi_tcr_fl(__ASSAY__, __STEPS__, __CONDA__)
multi.col4_default = None
multi.run()
if __name__ == '__main__':
main()
| 30.727273 | 82 | 0.560947 | 210 | 1,690 | 4.080952 | 0.280952 | 0.035006 | 0.060677 | 0.088681 | 0.310385 | 0.263711 | 0.263711 | 0.198366 | 0.198366 | 0.198366 | 0 | 0.005828 | 0.289349 | 1,690 | 54 | 83 | 31.296296 | 0.707744 | 0 | 0 | 0.311111 | 0 | 0 | 0.311427 | 0.111901 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e5b83ffe1a421082cfb31bd3175565e924b9fe8 | 2,121 | py | Python | recommender/rest-api/globals.py | haiphucnguyen/MachineLearning | ecdc708a7f4a1a7917fd97f3374034b0f0a013aa | [
"MIT"
] | null | null | null | recommender/rest-api/globals.py | haiphucnguyen/MachineLearning | ecdc708a7f4a1a7917fd97f3374034b0f0a013aa | [
"MIT"
] | 18 | 2020-07-17T08:24:34.000Z | 2022-03-02T04:16:18.000Z | recommender/rest-api/globals.py | haiphucnguyen/MachineLearning | ecdc708a7f4a1a7917fd97f3374034b0f0a013aa | [
"MIT"
] | null | null | null | from pyspark.sql import SparkSession
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
class Globals:
def __init__(self):
print("Init Globals")
@staticmethod
def recommendator():
spark = SparkSession.builder.appName("Recommendation ALS").getOrCreate()
# do something to prove it works
movies_df = spark.read.option("header", "true").csv("data/movies.csv", inferSchema=True)
links_df = spark.read.option("header", "true").csv("data/links.csv", inferSchema=True)
movies_df = movies_df.join(links_df, on=['movieid'])
ratings_df = spark.read.option("header", "true").csv("data/ratings.csv", inferSchema=True)
tags_df = spark.read.option("header", "true").csv("data/tags.csv", inferSchema=True)
(training, test) = ratings_df.randomSplit([0.8, 0.2])
# Build the recommendation model using ALS on the training data
# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating",
coldStartStrategy="drop")
model = als.fit(training)
# Evaluate the model by computing the RMSE on the test data
predictions = model.transform(test)
predictions.printSchema()
predictions.orderBy('prediction').show(10)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
Globals.movies_df = movies_df
Globals.ratings_df = ratings_df
Globals.tags_df = tags_df
Globals.predictions = predictions
Globals.model = model
Globals.genres = ["Crime", "Romance", "Thriller", "Adventure", "Drama", "War", "Documentary", "Fantasy", "Mystery", \
"Musical", "Animation", "Film-Noir", "(no genres listed)", "IMAX", "Horror", "Western", \
"Comedy", "Children", "Action", "Sci-Fi"]
Globals.recommendator() | 48.204545 | 125 | 0.644979 | 240 | 2,121 | 5.625 | 0.483333 | 0.02963 | 0.032593 | 0.05037 | 0.100741 | 0.100741 | 0.100741 | 0.100741 | 0 | 0 | 0 | 0.00609 | 0.225837 | 2,121 | 44 | 126 | 48.204545 | 0.816078 | 0.112211 | 0 | 0 | 0 | 0 | 0.178819 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.090909 | 0 | 0.181818 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e5d69d46fbe2dcd9c5a3139cb2e51788974251b | 508 | py | Python | compiled/construct/expr_io_pos.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 4 | 2017-04-08T12:55:11.000Z | 2020-12-05T21:09:31.000Z | compiled/construct/expr_io_pos.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 7 | 2018-04-23T01:30:33.000Z | 2020-10-30T23:56:14.000Z | compiled/construct/expr_io_pos.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 6 | 2017-04-08T11:41:14.000Z | 2020-10-30T22:47:31.000Z | from construct import *
from construct.lib import *
expr_io_pos__all_plus_number = Struct(
'my_str' / NullTerminated(GreedyString(encoding='UTF-8'), term=b'\x00', include=False, consume=True),
'body' / FixedSized(((stream_size(_io) - stream_tell(_io)) - 2), GreedyBytes),
'number' / Int16ul,
)
expr_io_pos = Struct(
'substream1' / FixedSized(16, LazyBound(lambda: expr_io_pos__all_plus_number)),
'substream2' / FixedSized(14, LazyBound(lambda: expr_io_pos__all_plus_number)),
)
_schema = expr_io_pos
| 31.75 | 102 | 0.751969 | 70 | 508 | 5.057143 | 0.571429 | 0.084746 | 0.127119 | 0.101695 | 0.271186 | 0.271186 | 0.20904 | 0.20904 | 0 | 0 | 0 | 0.026432 | 0.106299 | 508 | 15 | 103 | 33.866667 | 0.753304 | 0 | 0 | 0 | 0 | 0 | 0.088583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e5fbb0ee05033c7efad72d8169c33b9ad7669f2 | 2,463 | py | Python | books/management/commands/email_books.py | phildini/bockus | 004508166f5b1a7c3c4d8accf32578a80379b385 | [
"MIT"
] | 3 | 2015-07-15T05:29:17.000Z | 2021-06-23T21:50:25.000Z | books/management/commands/email_books.py | phildini/bockus | 004508166f5b1a7c3c4d8accf32578a80379b385 | [
"MIT"
] | 4 | 2020-02-11T22:15:04.000Z | 2021-06-10T17:41:51.000Z | books/management/commands/email_books.py | phildini/bockus | 004508166f5b1a7c3c4d8accf32578a80379b385 | [
"MIT"
] | null | null | null | import dropbox
import json
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage
from allauth.socialaccount.models import SocialApp, SocialToken
from books.forms import ImportForm
from books.models import (
Book,
BookFileVersion,
BookEmail,
Series,
)
from readers.models import Reader
from libraries.models import Library, Librarian
logger = logging.getLogger('scripts')
class Command(BaseCommand):
help = "send pending book emails"
def handle(self, *args, **options):
logger.debug('Starting book email send cronjob')
books_to_send = BookEmail.objects.filter(
status=BookEmail.PENDING)[:4]
for book_email in books_to_send:
logger.debug('Working on email job %s' % book_email.id)
book_email.status = BookEmail.PROCESSING
book_email.save()
book_file_path = book_email.book_file.path
token = None
try:
dropbox_app_creds = SocialApp.objects.filter(
provider='dropbox_oauth2'
)[0]
token = SocialToken.objects.get(
account__user=book_email.book_file.book.added_by,
app__provider='dropbox_oauth2'
).token
except:
logger.exception(
'Error getting dropbox token for email job %s' % book_email.id
)
book_email.status = BookEmail.ERROR
book_email.save()
if token:
client = dropbox.client.DropboxClient(token)
message = EmailMessage(
subject='[Booksonas] A book for you!',
body=book_email.book_file.book.title,
from_email="books@booksonas.com",
to=[book_email.reader.email,],
)
f, metadata = client.get_file_and_metadata(book_file_path)
message.attach(
'book.{}'.format(book_email.book_file.filetype),
f.read(),
metadata.get('mime_type'),
)
message.send()
book_email.status=BookEmail.SENT
book_email.save()
logger.debug('Successfully sent %s' % book_email.id)
logger.debug('Book email cronjob finished')
| 33.283784 | 82 | 0.578157 | 256 | 2,463 | 5.402344 | 0.410156 | 0.110629 | 0.037599 | 0.049168 | 0.093999 | 0.06363 | 0.06363 | 0.06363 | 0.06363 | 0.06363 | 0 | 0.002465 | 0.341048 | 2,463 | 73 | 83 | 33.739726 | 0.849661 | 0 | 0 | 0.048387 | 0 | 0 | 0.108404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016129 | false | 0 | 0.177419 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e6038d2a3f2553efb2ffd56b40294b3bf3b78f9 | 511 | py | Python | 2018/Insomni'hack teaser/Rule86/outputs/super_cipher_partial.py | birdsoup/CTF-Writeups | feaeda46a5afa3dabbfa81adcbe184f7bc24d5cb | [
"MIT"
] | 3 | 2019-03-03T00:08:19.000Z | 2021-06-09T14:20:41.000Z | 2018/Insomni'hack teaser/Rule86/outputs/super_cipher_partial.py | birdsoup/CTF-Writeups | feaeda46a5afa3dabbfa81adcbe184f7bc24d5cb | [
"MIT"
] | null | null | null | 2018/Insomni'hack teaser/Rule86/outputs/super_cipher_partial.py | birdsoup/CTF-Writeups | feaeda46a5afa3dabbfa81adcbe184f7bc24d5cb | [
"MIT"
] | 1 | 2021-06-18T08:56:05.000Z | 2021-06-18T08:56:05.000Z | #!/usr/bin/env python3
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("key")
args = parser.parse_args()
RULE = [86 >> i & 1 for i in range(8)]
N_BYTES = 32
N = 8 * N_BYTES
def next(x):
x = (x & 1) << N+1 | x << 1 | x >> N-1
y = 0
for i in range(N):
y |= RULE[(x >> i) & 7] << i
return y
# Bootstrap the PNRG
keystream = int.from_bytes(args.key.encode(),'little')
for i in range(N//2):
keystream = next(keystream)
# Encrypt / decrypt stdin to stdout
plainte | 18.925926 | 54 | 0.626223 | 89 | 511 | 3.539326 | 0.52809 | 0.038095 | 0.057143 | 0.104762 | 0.07619 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037221 | 0.21135 | 511 | 27 | 55 | 18.925926 | 0.744417 | 0.144814 | 0 | 0 | 0 | 0 | 0.02069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e6a40545d687c3ecf51841513c392eec05aec51 | 398 | py | Python | projects/NameSorter.py | SilverBlaze109/VAMPY2017 | 68197544009aebc494b640b5d266c97ad6331c00 | [
"MIT"
] | null | null | null | projects/NameSorter.py | SilverBlaze109/VAMPY2017 | 68197544009aebc494b640b5d266c97ad6331c00 | [
"MIT"
] | null | null | null | projects/NameSorter.py | SilverBlaze109/VAMPY2017 | 68197544009aebc494b640b5d266c97ad6331c00 | [
"MIT"
] | null | null | null | import Merge_Sorting
read_filename = "/home/vampy/data/test1"
write_filename = "/home/vampy/data/test2"
fp = open(read_filename, "r")
N = int(fp.readline())
names = []
for i in range(N):
names.append(fp.readline().strip())
fp.close()
Merge_Sorting.mergesort(names)
fp = open(write_filename, "w")
fp.write("{0}\n".format(N))
for i in range(N):
fp.write("{0}".format(names[i]))
fp.close()
| 18.090909 | 41 | 0.675879 | 65 | 398 | 4.046154 | 0.446154 | 0.091255 | 0.129278 | 0.159696 | 0.091255 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011396 | 0.11809 | 398 | 21 | 42 | 18.952381 | 0.737892 | 0 | 0 | 0.266667 | 0 | 0 | 0.135678 | 0.110553 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e70a8cf3463ffe9421d2adf4f0d082d0018a938 | 1,423 | py | Python | migrations/versions/efd88a3c30a8_.py | andraune/Run4IT_BackEnd | a481427a0d1189a1f08c42e7ac1b452af6bbfc8d | [
"MIT"
] | 1 | 2022-03-29T06:11:20.000Z | 2022-03-29T06:11:20.000Z | migrations/versions/efd88a3c30a8_.py | andraune/run4it_backend | a481427a0d1189a1f08c42e7ac1b452af6bbfc8d | [
"MIT"
] | null | null | null | migrations/versions/efd88a3c30a8_.py | andraune/run4it_backend | a481427a0d1189a1f08c42e7ac1b452af6bbfc8d | [
"MIT"
] | null | null | null | """empty message
Revision ID: efd88a3c30a8
Revises: edf37629d0f4
Create Date: 2020-08-23 12:34:41.656176
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'efd88a3c30a8'
down_revision = 'edf37629d0f4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('polar_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('profile_id', sa.Integer(), nullable=False),
sa.Column('member_id', sa.String(length=24), nullable=False),
sa.Column('polar_user_id', sa.Integer(), nullable=False),
sa.Column('state', sa.String(length=16), nullable=True),
sa.Column('access_token', sa.String(length=64), nullable=True),
sa.Column('access_token_expires', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'], ['user_profiles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('member_id')
)
op.create_index(op.f('ix_polar_users_profile_id'), 'polar_users', ['profile_id'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_polar_users_profile_id'), table_name='polar_users')
op.drop_table('polar_users')
# ### end Alembic commands ###
| 33.093023 | 98 | 0.697119 | 188 | 1,423 | 5.106383 | 0.393617 | 0.066667 | 0.0625 | 0.0875 | 0.366667 | 0.316667 | 0.252083 | 0.152083 | 0 | 0 | 0 | 0.042588 | 0.141954 | 1,423 | 42 | 99 | 33.880952 | 0.743653 | 0.207309 | 0 | 0 | 0 | 0 | 0.225481 | 0.04583 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e7303e5e2805c01c2695e6e83bc9c5498d330bd | 2,497 | py | Python | code/model/loss/loss.py | ZJU-Fangyin/KCL | 1d1002aeee785e4eb1dfc121d6cbb9cefa4e985c | [
"MIT"
] | 24 | 2021-12-04T13:44:22.000Z | 2022-03-19T08:10:19.000Z | code/model/loss/loss.py | Fangyin1994/KCL | 004f5681b77e4e75c791c909696fdb8a208501a2 | [
"MIT"
] | 3 | 2021-12-20T08:14:06.000Z | 2022-03-28T08:03:09.000Z | code/model/loss/loss.py | Fangyin1994/KCL | 004f5681b77e4e75c791c909696fdb8a208501a2 | [
"MIT"
] | 1 | 2021-12-22T09:29:55.000Z | 2021-12-22T09:29:55.000Z |
from .loss_computer import NCESoftmaxLoss
import torch.nn.functional as F
import torch.nn as nn
import torch
import logging
import pdb
logger = logging.getLogger()
class ContrastiveLoss(nn.Module):
def __init__(self, loss_computer: str, temperature: float, args) -> None:
super().__init__()
self.device = args['device']
if loss_computer == 'nce_softmax':
self.loss_computer = NCESoftmaxLoss(self.device)
else:
raise NotImplementedError(f"Loss Computer {loss_computer} not Support!")
self.temperature = temperature
def forward(self, z_i, z_j):
# SimCSE
batch_size = z_i.size(0)
emb = F.normalize(torch.cat([z_i, z_j]))
similarity = torch.matmul(emb, emb.t()) - torch.eye(batch_size*2).to(self.device) * 1e12
similarity = similarity * 20
loss = self.loss_computer(similarity)
return loss
class FlatNCE(nn.Module):
def __init__(self, temperature):
self.temperature = temperature
super().__init__()
def forward(self, z_i, z_j):
batch_size = z_i.size(0)
features = torch.cat([z_i, z_j], dim=0)
labels = torch.cat([torch.arange(batch_size) for i in range(2)], dim=0)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
mask = torch.eye(labels.shape[0], dtype=torch.bool)
labels = labels[~mask].view(labels.shape[0], -1)
similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)
negatives = similarity_matrix[~labels.bool()].view(labels.shape[0], -1)
# logits = torch.cat([positives, negatives], dim=1)
labels = torch.zeros(positives.shape[0], dtype=torch.long)
logits = (negatives - positives)/self.temperature
clogits = torch.logsumexp(logits, dim=1, keepdim=True)
loss = torch.exp(clogits - clogits.detach())
# _, features = self.model(images)
# logits, labels = self.flat_loss(features)
# v = torch.logsumexp(logits, dim=1, keepdim=True) #(512,1)
# loss_vec = torch.exp(v-v.detach())
# assert loss_vec.shape == (len(logits),1)
# dummy_logits = torch.cat([torch.zeros(logits.size(0),1).to(self.args.device), logits],1)
# loss = loss_vec.mean()-1 + self.criterion(logits, labels).detach()
| 34.205479 | 96 | 0.64998 | 330 | 2,497 | 4.769697 | 0.281818 | 0.053367 | 0.007624 | 0.010165 | 0.192503 | 0.15756 | 0.121982 | 0.054638 | 0.054638 | 0 | 0 | 0.018228 | 0.209051 | 2,497 | 72 | 97 | 34.680556 | 0.778734 | 0.168202 | 0 | 0.186047 | 0 | 0 | 0.028585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.139535 | 0 | 0.302326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e73e5b2fe9aa6d77e943c822dde424875f561fa | 3,359 | py | Python | Script_fullBandCapture.py | gdavila/PyDocsisMon | 461bb05a732f9c20bfcca18624d266f7ff89f8f2 | [
"MIT"
] | 4 | 2019-06-27T13:49:09.000Z | 2021-07-26T05:30:04.000Z | Script_fullBandCapture.py | gdavila/PyDocsisMon | 461bb05a732f9c20bfcca18624d266f7ff89f8f2 | [
"MIT"
] | null | null | null | Script_fullBandCapture.py | gdavila/PyDocsisMon | 461bb05a732f9c20bfcca18624d266f7ff89f8f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 12:16:14 2018
@author: gdavila
This is a example to get Full Band Channel information
On Docsis 3.0 Full Band Channels is a feature that allows to get detailed info
about the power distribution of the espectrum
"""
import docsisMon.cmDevices as cmDevices
from docsisMon.snmp import SnmpError
import time
import ggplot
import sys
def asint(s):
try: return int(s), ''
except ValueError: return sys.maxsize, s
def format_fb_data(data):
spectrum_freq = []
spectrum_pot = []
if data is not None:
for key in sorted(data, key=asint):
center_frec = int('0x'+data[key][2:10], 16)
span = int('0x'+data[key][10:18], 16)
samples = int('0x'+data[key][18:26], 16)
resolution_bw = int('0x'+data[key][26:34], 16)
offset = 42
for i in range(0, samples):
frec = (center_frec-span/2)+i*resolution_bw
dec_value = int('0x'+data[key][offset+i*4:offset+i*4+4], 16)
if dec_value > 32767:
value = (dec_value-65535)/100
else:
value = dec_value/100
item = [frec, round(value, 2)]
spectrum_freq.append(item[0])
spectrum_pot.append(item[1])
return spectrum_freq, spectrum_pot
else:
return None
def main():
try:
myIP = '10.218.49.38'
myCm = cmDevices.Cm(myIP)
myCmModel = myCm.getModel()
print ("CM IP:\t", myIP)
print ("CM Model:\t", myCmModel)
print ("CM Firmware:\t", myCm.getSw_rev())
#Accesing to Docsis Interfaces
myCmDocIf = myCm.DocsIf()
#Getting the MAC address of Docsis Interfaces (CM)
myCmMac = myCmDocIf.getMac()
print ("CM Mac:\t", myCmMac)
#Gettingfull band capture information;
print ("Full Band Capture Information:")
print("Modelo \t\tTiempo Espera SET/GET(s) \tTiempo de Descarga FB data(s)\t Result\t\t Nro Muestras")
for i in range(1,2):
data = {}
fbc = myCm.fbc()
fbc.turnOff()
time.sleep(2)
fbc.inactivityTimeout = 300
fbc.firstFrequency = 50000000
fbc.lastFrequency = 1000000000
fbc.span = 10000000
fbc.binsPerSegment = 250
fbc.noisebandwidth = 150
fbc.numberOfAverages = 1
fbc.config()
timeConfig = time.time()
result = 'data OK'
timeGet = time.time()
data = fbc.get()
timeResponse = time.time()
while(data == {}):
time.sleep(1)
if (time.time() - timeConfig > 600): break
timeGet = time.time()
data = fbc.get()
timeResponse = time.time()
print(str(i)+" "+myCm.getModel() +'\t\t\t' + str(round(timeGet-timeConfig)) + \
'\t\t\t'+ str(round(timeResponse - timeGet)) + '\t\t\t'+ str(result)+'\t\t'+ str(len(format_fb_data(data)[0])))
return(format_fb_data(data))
except SnmpError as e:
print(e)
result = e
freq, pot= main()
ggplot.qplot(freq[0:], pot[0:], geom="line")
| 30.816514 | 130 | 0.533194 | 412 | 3,359 | 4.296117 | 0.395631 | 0.00904 | 0.025424 | 0.033898 | 0.063277 | 0.050847 | 0.050847 | 0.050847 | 0.050847 | 0 | 0 | 0.057351 | 0.345936 | 3,359 | 108 | 131 | 31.101852 | 0.748293 | 0.110747 | 0 | 0.105263 | 0 | 0.013158 | 0.074286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039474 | false | 0 | 0.065789 | 0 | 0.131579 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e76a6d7201c50230ea9c60f989c6e9e8acbee75 | 398 | py | Python | examples/crack_egg.py | mkclairhong/quail | a6d6502746c853518a670d542222eb5fc2b05542 | [
"MIT"
] | 1 | 2018-05-30T15:33:26.000Z | 2018-05-30T15:33:26.000Z | examples/crack_egg.py | mkclairhong/quail | a6d6502746c853518a670d542222eb5fc2b05542 | [
"MIT"
] | 7 | 2018-06-21T13:21:22.000Z | 2018-07-24T21:20:05.000Z | examples/crack_egg.py | mkclairhong/quail | a6d6502746c853518a670d542222eb5fc2b05542 | [
"MIT"
] | 1 | 2018-06-01T17:39:48.000Z | 2018-06-01T17:39:48.000Z | # -*- coding: utf-8 -*-
"""
=============================
Crack Egg
=============================
This an example of how to crack an egg (take a slice of subjects/lists from it)
"""
# Code source: Andrew Heusser
# License: MIT
#import
import quail
#load data
egg = quail.load_example_data()
#crack egg
cracked_egg = quail.crack_egg(egg, subjects=range(5), lists=range(4))
cracked_egg.info()
| 16.583333 | 79 | 0.59799 | 55 | 398 | 4.236364 | 0.6 | 0.103004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008824 | 0.145729 | 398 | 23 | 80 | 17.304348 | 0.676471 | 0.59799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e77ef009463c9c8b20da40c4cad47baa5105caf | 488 | py | Python | cython_wordle/setup.py | richardtiffin/wordle | 9bdca662f204673e8fdac6fe262f596a4c1a7971 | [
"MIT"
] | null | null | null | cython_wordle/setup.py | richardtiffin/wordle | 9bdca662f204673e8fdac6fe262f596a4c1a7971 | [
"MIT"
] | null | null | null | cython_wordle/setup.py | richardtiffin/wordle | 9bdca662f204673e8fdac6fe262f596a4c1a7971 | [
"MIT"
] | null | null | null | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules=[
Extension("BBox", ["BBox.pyx"]),
Extension("spirals", ["spirals.pyx"]),
Extension("wordle", ["wordle.pyx"]),
]
setup(
name = 'wordle',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
)
# from terminal run the following command for cythonization
# python setup.py build_ext --inplace
| 25.684211 | 60 | 0.659836 | 56 | 488 | 5.625 | 0.464286 | 0.101587 | 0.069841 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.219262 | 488 | 18 | 61 | 27.111111 | 0.826772 | 0.190574 | 0 | 0 | 0 | 0 | 0.163102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e781496e1aa125be921635999ca76ed9c8a3c2b | 4,296 | py | Python | workon/models/file.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | workon/models/file.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | workon/models/file.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import magic, os, urllib2
from django.core.files.storage import default_storage as storage
class FileTypeTester(models.Model):
class Meta:
abstract = True
file_type_tester_fieldname = 'file'
file_type_tester_file = None
file_type_tester_url = None
file_typemime = models.CharField(_(u"Type Mime"), max_length=254, blank=True, null=True)
def get_file_file(self, **kwargs):
if self.file_type_tester_file:
return self.file_type_tester_file
fieldname = self.file_type_tester_fieldname
if not hasattr(self, fieldname):
raise Exception("Field '%s' doesn't exists in this FileTypeTesterMixin model" % fieldname)
else:
file = getattr(self, fieldname)
self.file_type_tester_file = file.file
return file.file
def get_file_url(self, **kwargs):
if self.file_type_tester_url:
return self.file_type_tester_url
fieldname = self.file_type_tester_fieldname
if not hasattr(self, fieldname):
raise Exception("Field '%s' doesn't exists in this FileTypeTesterMixin model" % fieldname)
else:
file = getattr(self, fieldname)
self.file_type_tester_url = file.url
return file.url
#TODO: open file with S3 storage system to retrieve typemime (storage.open(file) -> and get headers)
def get_file_mimetype(self, fieldname=None, **kwargs):
if self.file_typemime:
return self.file_typemime
if fieldname == None:
fieldname = self.file_type_tester_fieldname
if not hasattr(self, fieldname):
raise Exception("Field '%s' doesn't exists in this FileTypeTesterMixin model" % fieldname)
else:
file = getattr(self, fieldname)
#return magic.from_file(str(storage.open(file.name)), mime=True)
try:
# Normal storage condition
self.file_typemime = magic.from_file(file.path, mime=True)
self.save()
return self.file_typemime
except:
# if there is no name, stop right here
if not file.name:
self.file_typemime = None
return self.file_typemime
# Type static S3 amazon collection
try:
url = os.path.join(settings.MEDIA_URL, file.name)
# stop if the url is not a valid url
if any([
url.startswith('//'),
url.startswith('http://'),
url.startswith('https://')]):
file = urllib2.urlopen(url)
self.file_typemime = file.info().gettype()
self.save()
else:
self.file_typemime = None
except:
self.file_typemime = None
return self.file_typemime
def is_image(self, **kwargs):
mimetype = self.get_file_mimetype()
return mimetype in ['image/rgb', 'image/gif', 'image/pbm', 'image/pgm', 'image/ppm',
'image/tiff', 'image/rast', 'image/xbm', 'image/jpeg', 'image/bmp', 'image/png', 'image/x-icon']
def is_zip(self, **kwargs):
return self.get_file_mimetype() in ['application/zip']
def is_pdf(self, **kwargs):
return self.get_file_mimetype() in ['application/pdf']
def is_powerpoint(self, **kwargs):
return self.get_file_mimetype() in ['application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow']
def is_word(self, **kwargs):
return self.get_file_mimetype() in ['application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.ms-excel', 'vnd.ms-word.document']
def is_zip(self, **kwargs):
return self.get_file_mimetype() in ['application/zip'] | 41.307692 | 153 | 0.598929 | 490 | 4,296 | 5.089796 | 0.269388 | 0.060946 | 0.067362 | 0.064956 | 0.442261 | 0.369687 | 0.369687 | 0.34563 | 0.311949 | 0.25421 | 0 | 0.002681 | 0.3054 | 4,296 | 104 | 154 | 41.307692 | 0.83311 | 0.071462 | 0 | 0.417722 | 0 | 0 | 0.168508 | 0.067052 | 0 | 0 | 0 | 0.009615 | 0 | 1 | 0.113924 | false | 0 | 0.063291 | 0.063291 | 0.43038 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e7c45b57c7b1f68c58be33048ab411b59607f95 | 3,734 | py | Python | retroarch_emulationstation.py | allebb/personal-influxdb | 280be0cceb0bc00412849be4ca5ad4c7be539fc3 | [
"Apache-2.0"
] | 217 | 2020-01-07T20:25:46.000Z | 2022-03-29T06:09:58.000Z | retroarch_emulationstation.py | allebb/personal-influxdb | 280be0cceb0bc00412849be4ca5ad4c7be539fc3 | [
"Apache-2.0"
] | 16 | 2020-02-10T12:40:23.000Z | 2022-02-26T13:01:55.000Z | retroarch_emulationstation.py | allebb/personal-influxdb | 280be0cceb0bc00412849be4ca5ad4c7be539fc3 | [
"Apache-2.0"
] | 34 | 2020-01-15T15:42:20.000Z | 2022-02-22T17:29:15.000Z | #!/usr/bin/python3
# Copyright (C) 2021 Sam Steele
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, ntpath, json, pytz, urllib
import xml.etree.ElementTree as ET
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
LOCAL_TIMEZONE = pytz.timezone('America/New_York')
RETROARCH_LOGS = '/home/ark/.config/retroarch/playlists/logs/'
EMULATIONSTATION_ROMS = '/roms'
IMAGE_WEB_PREFIX = 'https://example.net/retroarch_images/'
INFLUXDB_HOST = 'localhost'
INFLUXDB_PORT = 8086
INFLUXDB_USERNAME = 'root'
INFLUXDB_PASSWORD = 'root'
INFLUXDB_DATABASE = 'gaming'
points = []
try:
client = InfluxDBClient(host=INFLUXDB_HOST, port=INFLUXDB_PORT, username=INFLUXDB_USERNAME, password=INFLUXDB_PASSWORD)
client.create_database(INFLUXDB_DATABASE)
client.switch_database(INFLUXDB_DATABASE)
except InfluxDBClientError as err:
print("InfluxDB connection failed: %s" % (err))
sys.exit()
roms = {}
for platform in os.listdir(EMULATIONSTATION_ROMS):
if os.path.exists(EMULATIONSTATION_ROMS + '/' + platform + '/gamelist.xml'):
gamelist = ET.parse(EMULATIONSTATION_ROMS + '/' + platform + '/gamelist.xml').getroot()
for game in gamelist.findall('game'):
if gamelist.find('provider/System') != None:
rom = {}
rom['name'] = game.find('name').text
rom['filename'] = ntpath.basename(game.find('path').text)
rom['key'] = os.path.splitext(rom['filename'])[0]
rom['path'] = platform
rom['platform'] = gamelist.find('provider/System').text
if(rom['platform'] == 'Mame'):
rom['platform'] = 'Arcade'
roms[rom['key']] = rom
for core in os.listdir(RETROARCH_LOGS):
totals = client.query('SELECT last("total") AS "total" FROM "time" WHERE "total" > 0 AND "player_id" = \'' + core + '\' GROUP BY "application_id" ORDER BY "time" DESC')
for log in os.listdir(RETROARCH_LOGS + '/' + core):
key = os.path.splitext(log)[0]
if key in roms:
with open(RETROARCH_LOGS + '/' + core + '/' + log, 'r') as f:
playtime = json.load(f)
rom = roms[key]
h, m, s = playtime['runtime'].split(':')
runtime = value = int(h) * 3600 + int(m) * 60 + int(s)
total = list(totals.get_points(tags={'application_id': rom['key']}))
if len(total) == 1 and total[0]['total'] > 0:
value -= total[0]['total']
if value > 1:
time = datetime.fromisoformat(playtime['last_played'])
utc_time = LOCAL_TIMEZONE.localize(time).astimezone(pytz.utc).isoformat()
points.append({
"measurement": "time",
"time": utc_time,
"tags": {
"player_id": core,
"application_id": rom['key'],
"platform": rom['platform'],
"player_name": core,
"title": rom['name'],
},
"fields": {
"value": int(value),
"total": runtime,
"image": IMAGE_WEB_PREFIX + urllib.parse.quote(rom['path']) + '/' + urllib.parse.quote(rom['key']) + '.png',
"url": 'https://thegamesdb.net/search.php?name=' + urllib.parse.quote_plus(rom['name'])
}
})
try:
client.write_points(points)
except InfluxDBClientError as err:
print("Unable to write points to InfluxDB: %s" % (err))
sys.exit()
print("Successfully wrote %s data points to InfluxDB" % (len(points)))
| 36.970297 | 169 | 0.676754 | 492 | 3,734 | 5.052846 | 0.410569 | 0.024135 | 0.013274 | 0.012872 | 0.078842 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008715 | 0.170327 | 3,734 | 100 | 170 | 37.34 | 0.793738 | 0.15399 | 0 | 0.078947 | 0 | 0.013158 | 0.229816 | 0.013668 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.026316 | 0.065789 | 0 | 0.065789 | 0.039474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e8225b7ada5e1ad54098e2705b2ce6cd71e8f49 | 5,254 | py | Python | rect_creator.py | jeffreyleifer/draw_rect | 80082f53b52f5af9378f2a624024c6784ec9b565 | [
"MIT"
] | null | null | null | rect_creator.py | jeffreyleifer/draw_rect | 80082f53b52f5af9378f2a624024c6784ec9b565 | [
"MIT"
] | null | null | null | rect_creator.py | jeffreyleifer/draw_rect | 80082f53b52f5af9378f2a624024c6784ec9b565 | [
"MIT"
] | null | null | null | from PySide2.QtWidgets import QWidget
from PySide2.QtCore import Qt
from PySide2.QtCore import QPoint
from PySide2.QtWidgets import QWidget, QLabel
from PySide2.QtGui import QPainter
from PySide2.QtGui import QPixmap
from PySide2.QtGui import QPen
from draw_utils import is_adjacent, is_contained,is_intersect
from rectangle import Rectangle
from constants import RECT_A,RECT_B,PEN_WIDTH
"""
Rectangle Creator:
* GUI to create rectangles
* Extended from QWidget
"""
class RectangleCreator(QWidget):
def __init__(self):
super().__init__()
""" Setup """
self.setMouseTracking(True)
self.begin = QPoint()
self.end = QPoint()
self.coord_list = []
self.rect_list = []
self.clicked = False
"""
Paint Event
* Paints Rectangles onto a Pixmap from a list of coordinates
* Stores created rectangles in a list
* Rectangle store is cleared and rebuild each iteration
"""
def paintEvent(self, event):
"""Create pallet"""
pixmap = QPixmap()
painter = QPainter(self)
painter.drawPixmap(self.rect(), pixmap)
pen = QPen()
pen.setWidth(PEN_WIDTH)
pen.setColor(Qt.black)
painter.setPen(pen)
"""Rebuild rectangle store"""
self.rect_list.clear()
for coord in self.coord_list:
rec = Rectangle(coord[RECT_A], coord[RECT_B])
self.rect_list.append(rec)
painter.drawRect(rec)
if not self.clicked:
return
"""Create new rectangle"""
rec = Rectangle(self.begin, self.end)
self.rect_list.append(rec)
painter.drawRect(rec)
"""
mousePressEvent
* Deletes oldest rectangle from the coordinate list
* Updates begin and end values
* Tracks click for use in display of rectangles
"""
def mousePressEvent(self, event):
"""Remove oldest"""
if len(self.coord_list) > 1:
self.coord_list.pop(0)
"""Update tracking variables"""
self.begin = event.pos()
self.end = event.pos()
self.clicked = True
self.update()
"""
mouseMoveEvent
* Updates endpoint
* Updates Coordinates on display
"""
def mouseMoveEvent(self, event):
self.end = event.pos()
self.setWindowTitle('Coordinates: ( x = %d : y = %d )' % (event.x(), event.y()))
self.update()
"""
mouseReleaseEvent
* Checks for position of start and end points of rectangle
* Transforms rectangle so start is topleft and end is bottom right
* Adds rectangle coordinates to the coordinates list
* If two rectangle exist:
* Runs test for Adjacent, contained and intersection
"""
def mouseReleaseEvent(self, event):
"""Needs horizontal flip?"""
if self.begin.x() > self.end.x() and self.begin.y() < self.end.y():
if len(self.rect_list) == 1:
self.rect_list[RECT_A] = self.flip_hor(self.rect_list[RECT_A])
else:
self.rect_list[RECT_B] = self.flip_hor(self.rect_list[RECT_B])
"""Needs vertical flip?"""
if self.begin.x() < self.end.x() and self.begin.y() > self.end.y():
if len(self.rect_list) == 1:
self.rect_list[RECT_A] = self.flip_ver(self.rect_list[RECT_A])
else:
self.rect_list[RECT_B] = self.flip_ver(self.rect_list[RECT_B])
"""Needs refection?"""
if self.begin.x() > self.end.x() and self.begin.y() > self.end.y():
if len(self.rect_list) == 1:
self.rect_list[RECT_A] = self.reflect(self.rect_list[RECT_A])
else:
self.rect_list[RECT_B] = self.reflect(self.rect_list[RECT_B])
self.clicked = False
self.update()
"""Add new coordinates to the coordinates list"""
self.coord_list.append([self.begin,self.end])
"""Run Tests"""
if len(self.coord_list) == 2:
is_adjacent(self.rect_list[RECT_A],self.rect_list[RECT_B],silent=False)
contained = is_contained(self.rect_list[RECT_A],self.rect_list[RECT_B])
if not contained:
contained = is_contained(self.rect_list[RECT_B],self.rect_list[RECT_A])
if not contained:
is_intersect(self.rect_list[RECT_A],self.rect_list[RECT_B])
print('------')
"""
flip_hor
* Call rectangle flip_h function
* Flip start and end points horizontal
"""
def flip_hor(self,rect):
rect.flip_h()
self.begin = rect.topLeft()
self.end = rect.bottomRight()
return rect
"""
flip_ver
* Calls rectangle flip_v function and
* Flip start and end points vertical
"""
def flip_ver(self,rect):
rect.flip_v()
self.begin = rect.topLeft()
self.end = rect.bottomRight()
return rect
"""
reflect
* Calls flip_hor then flip_ver to produce a reflection of the start and end points
* Same as above for the input rectangle coordinates
"""
def reflect(self,rect):
rect = self.flip_hor(rect)
rect = self.flip_ver(rect)
return rect
| 33.044025 | 88 | 0.604111 | 665 | 5,254 | 4.63609 | 0.227068 | 0.080441 | 0.105092 | 0.103795 | 0.352903 | 0.267921 | 0.251054 | 0.218618 | 0.193318 | 0.193318 | 0 | 0.003469 | 0.286639 | 5,254 | 158 | 89 | 33.253165 | 0.819104 | 0.009517 | 0 | 0.292135 | 0 | 0 | 0.010141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089888 | false | 0 | 0.11236 | 0 | 0.258427 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e8434d13fe760e8cea23fce54f739923c5f3b0f | 6,622 | py | Python | mining_shoots_discord.py | Hofei90/stfc_mining_shoots | 1eb89c0296b203719d32ae0cbf7c15ae7ca620dc | [
"MIT"
] | null | null | null | mining_shoots_discord.py | Hofei90/stfc_mining_shoots | 1eb89c0296b203719d32ae0cbf7c15ae7ca620dc | [
"MIT"
] | null | null | null | mining_shoots_discord.py | Hofei90/stfc_mining_shoots | 1eb89c0296b203719d32ae0cbf7c15ae7ca620dc | [
"MIT"
] | null | null | null | import toml
from discord.ext import commands
import discord
import pathlib
from peewee import SqliteDatabase, fn
import db_model as db
import datetime
from dataclasses import dataclass
import matplotlib.pyplot as plt
import asyncio
SKRIPTPFAD = pathlib.Path(__file__).parent
CONFIGPFAD = SKRIPTPFAD / "config.toml"
CONFIG = toml.load(CONFIGPFAD)
bot = commands.Bot(command_prefix="!")
db.DB_PROXY.initialize(SqliteDatabase(SKRIPTPFAD / "mining_shoots.db3"))
db.create_tables()
@dataclass
class UCShoot:
date: datetime.date
allianz: str
enemy: str
player: str
def check_input_allianz(input_):
if any(character.isdigit() for character in input_):
raise ValueError(f"digit in {input_!r} not allowed")
def check_user(userid):
return db.User.get_or_none(db.User.userid == userid)
def get_or_create_unbekannter_user():
user, _ = db.User.get_or_create(userid=1, name="unbekannter_user")
user.datum = datetime.datetime.now()
user.save()
return user
def create_uc_shoot(user, daten):
if len(daten) == 2:
allianz = daten[0]
check_input_allianz(allianz)
uc_shoot = UCShoot(
date=datetime.date.today(),
allianz=allianz,
enemy=daten[1],
player=user
)
elif len(daten) == 3:
allianz = daten[1]
check_input_allianz(allianz)
uc_shoot = UCShoot(
date=datetime.datetime.strptime(daten[0], "%d.%m.%Y").date(),
allianz=daten[1],
enemy=daten[2],
player=user
)
else:
uc_shoot = None
return uc_shoot
def schreibe_in_datenbank(daten):
db.UCShoots.create(
date=daten.date,
allianz=daten.allianz.upper(),
enemy=daten.enemy.lower(),
player=daten.player
)
def load_players():
query = db.UCShoots.raw("select enemy, allianz, count(enemy) as cenemy from ucshoots group by enemy")
return query
def load_allys():
query = db.UCShoots.raw("select allianz, count(enemy) as cenemy from ucshoots group by allianz")
return query
@bot.command(name="reg",
help=CONFIG["texte"]["reg"]["help"],
brief=CONFIG["texte"]["reg"]["brief"])
async def user_registrieren(ctx):
channel = ctx.channel
author = ctx.author
await ctx.channel.send(CONFIG["texte"]["reg"]["zustimmung_text"])
def check(m):
return m.content.lower() == CONFIG["texte"]["reg"][
"zustimmung_antwort"].lower() and m.channel == channel and m.author == author
try:
msg = await bot.wait_for('message', timeout=60*2, check=check)
except asyncio.TimeoutError:
await channel.send('👎 - Timeout')
else:
if msg.content.lower() == CONFIG["texte"]["reg"]["zustimmung_antwort"].lower():
db.User.get_or_create(userid=ctx.author.id, name=ctx.author.name, datum=datetime.datetime.now())
await ctx.channel.send('Benutzer registriert, Bot kann nun verwendet werden')
@bot.command(name="del",
help=CONFIG["texte"]["del"]["help"],
brief=CONFIG["texte"]["del"]["brief"])
async def user_loeschen(ctx):
user = check_user(ctx.author.id)
if user is not None:
channel = ctx.channel
author = ctx.author
await ctx.channel.send(CONFIG["texte"]["del"]["text"])
def check(m):
return m.content.lower() in CONFIG["texte"]["del"][
"antworten"] and m.channel == channel and m.author == author
try:
msg = await bot.wait_for('message', timeout=60 * 2, check=check)
except asyncio.TimeoutError:
await channel.send('👎 - Timeout')
else:
if msg.content.lower() == "ja":
user = check_user(ctx.author.id)
unbekannter_user = get_or_create_unbekannter_user()
db.UCShoots.update(player=unbekannter_user).where(db.UCShoots.player == user).execute()
db.User.delete().where(db.User.userid == user.userid).execute()
await ctx.channel.send('Benutzer gelöscht')
else:
await ctx.channel.send('Löschung abgebrochen')
else:
await ctx.channel.send("Benutzer unbekannt")
@bot.command(name="dia",
help=CONFIG["texte"]["dia"]["help"],
brief=CONFIG["texte"]["dia"]["brief"])
async def kuchen_backen(ctx):
user = check_user(ctx.author.id)
if user is not None:
labels = []
values = []
query = load_allys()
for datum in query:
labels.append(datum.allianz)
values.append(datum.cenemy)
fig1, ax1 = plt.subplots()
ax1.pie(values, labels=labels, autopct='%1.1f%%')
ax1.axis('equal')
picpfad = pathlib.Path(SKRIPTPFAD / CONFIG["pic"])
plt.savefig(picpfad)
await ctx.send(file=discord.File(picpfad))
else:
await ctx.send(CONFIG["texte"]["nicht_registiert"])
@bot.command(name="uca",
help=CONFIG["texte"]["uca"]["help"],
brief=CONFIG["texte"]["uca"]["brief"])
async def show_player_stat(ctx):
user = check_user(ctx.author.id)
if user is not None:
player_stat = load_allys()
await ctx.send(
"\n".join(
f"{datum.allianz}: {datum.cenemy}"
for datum in player_stat
)
)
else:
await ctx.send(CONFIG["texte"]["nicht_registiert"])
@bot.command(name="ucp",
help=CONFIG["texte"]["ucp"]["help"],
brief=CONFIG["texte"]["ucp"]["brief"])
async def show_player_stat(ctx):
user = check_user(ctx.author.id)
if user is not None:
player_stat = load_players()
await ctx.send(
"\n".join(
f"[{datum.allianz}]{datum.enemy}: {datum.cenemy}"
for datum in player_stat
)
)
else:
await ctx.send(CONFIG["texte"]["nicht_registiert"])
@bot.command(name="uc",
help=CONFIG["texte"]["uc"]["help"],
brief=CONFIG["texte"]["uc"]["brief"]
)
async def add_uc_shoot(ctx, *args):
user = check_user(ctx.author.id)
if user is not None:
try:
uc_shoot = create_uc_shoot(user, args)
except ValueError:
uc_shoot = None
if uc_shoot is not None:
schreibe_in_datenbank(uc_shoot)
await ctx.send(f"UC Abschuss gespeichert")
else:
await ctx.send("Ungültiges Format")
else:
await ctx.send(CONFIG["texte"]["nicht_registiert"])
bot.run(CONFIG["token"])
| 30.1 | 108 | 0.598762 | 815 | 6,622 | 4.761963 | 0.218405 | 0.059521 | 0.027828 | 0.03092 | 0.427467 | 0.373873 | 0.355836 | 0.355836 | 0.320794 | 0.244009 | 0 | 0.004509 | 0.263214 | 6,622 | 219 | 109 | 30.237443 | 0.790531 | 0 | 0 | 0.320442 | 0 | 0 | 0.130645 | 0.004682 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049724 | false | 0 | 0.055249 | 0.016575 | 0.171271 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e8540b88c0120bfb5afdddeb8fe99456df3e245 | 3,115 | py | Python | test_utils/test_utils.py | Sawrz/yeti | 1dd0653e570dc80663cd23aab1ebf61a54e44001 | [
"MIT"
] | null | null | null | test_utils/test_utils.py | Sawrz/yeti | 1dd0653e570dc80663cd23aab1ebf61a54e44001 | [
"MIT"
] | null | null | null | test_utils/test_utils.py | Sawrz/yeti | 1dd0653e570dc80663cd23aab1ebf61a54e44001 | [
"MIT"
] | null | null | null | import string
import numpy as np
from yeti.get_features.hydrogen_bonds import Triplet
from yeti.systems.building_blocks import Atom
def create_data_type_exception_messages(parameter_name, data_type_name):
return 'Wrong data type for parameter "{name}". Desired type is {data_type}'.format(name=parameter_name,
data_type=data_type_name)
def create_array_shape_exception_messages(parameter_name, desired_shape):
return 'Wrong shape for parameter "{name}". Desired shape: {des_shape}.'.format(name=parameter_name,
des_shape=desired_shape)
def create_array_dtype_exception_messages(parameter_name, dtype_name):
return 'Wrong dtype for ndarray "{name}". Desired dtype is {data_type}'.format(name=parameter_name,
data_type=dtype_name)
def build_unit_cell_angles_and_vectors(number_of_frames):
angles = []
vectors = []
for i in range(number_of_frames):
angles.append([90, 90, 90])
vectors.append([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
angles = np.array(angles, dtype=np.float32)
vectors = np.array(vectors, dtype=np.float32)
return angles, vectors
def build_atom_triplet():
# first frame is h-bond
# second frame is not because of distance
# third frame is not because of angle
donor = Atom(structure_file_index=0, subsystem_index=0, name='A',
xyz_trajectory=np.array([[0.1, 0.4, 0.3], [0.1, 0.4, 0.3], [0.1, 0.4, 0.3]]))
donor_atom = Atom(structure_file_index=1, subsystem_index=1, name='B',
xyz_trajectory=np.array([[0.1, 0.5, 0.2], [0.1, 0.5, 0.2], [0.5, 0.5, 0.2]]))
acceptor = Atom(structure_file_index=2, subsystem_index=2, name='C',
xyz_trajectory=np.array([[0.1, 0.6, 0.4], [0.1, 0.7, 0.4], [0.1, 0.6, 0.4]]))
donor.add_covalent_bond(atom=donor_atom)
donor_atom.update_donor_state(is_donor_atom=True, donor_slots=1)
acceptor.update_acceptor_state(is_acceptor=True, acceptor_slots=2)
return donor, donor_atom, acceptor
def build_multi_atom_triplets(amount=2):
all_atoms = []
names = list(string.ascii_uppercase)
for triplet_number in range(amount):
atoms = build_atom_triplet()
starting_index = triplet_number * len(atoms)
for atom_number, atom in enumerate(atoms):
atom.subsystem_index = starting_index + atom_number
atom.name = names[atom.subsystem_index]
atom.structure_file_index = atom.subsystem_index + 2
all_atoms.append(atom)
return tuple(all_atoms)
def build_triplet():
donor, donor_atom, acceptor = build_atom_triplet()
unit_cell_angles, unit_cell_vectors = build_unit_cell_angles_and_vectors(number_of_frames=3)
return Triplet(donor_atom=donor_atom, acceptor=acceptor, periodic=True, unit_cell_angles=unit_cell_angles,
unit_cell_vectors=unit_cell_vectors)
| 38.45679 | 113 | 0.646228 | 433 | 3,115 | 4.376443 | 0.228637 | 0.010554 | 0.014248 | 0.046438 | 0.208443 | 0.173615 | 0.138259 | 0.098153 | 0.098153 | 0.009499 | 0 | 0.035897 | 0.248796 | 3,115 | 80 | 114 | 38.9375 | 0.773932 | 0.03114 | 0 | 0 | 0 | 0 | 0.064698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14 | false | 0 | 0.08 | 0.06 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e865f0471666eee6566c8792893e373522a1f73 | 72,247 | py | Python | vaccinate/core/models.py | MoralCode/vial | cdaaab053a9cf1cef40104a2cdf480b7932d58f7 | [
"MIT"
] | null | null | null | vaccinate/core/models.py | MoralCode/vial | cdaaab053a9cf1cef40104a2cdf480b7932d58f7 | [
"MIT"
] | null | null | null | vaccinate/core/models.py | MoralCode/vial | cdaaab053a9cf1cef40104a2cdf480b7932d58f7 | [
"MIT"
] | null | null | null | from __future__ import annotations
import uuid
from datetime import datetime, timedelta
from functools import reduce
from operator import or_
from typing import Any, List, NamedTuple, Optional
import beeline
import pytz
import sentry_sdk
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.gis.db import models as gis_models
from django.contrib.gis.geos import Point
from django.db import IntegrityError, models, transaction
from django.db.models import Min, Q
from django.db.models.query import QuerySet
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from django.utils import dateformat, timezone
from social_django.models import UserSocialAuth
from .baseconverter import pid
from .fields import CharTextField
class LocationType(models.Model):
"""
Represents a type of location, such as "Pharmacy" or "Hospital/Clinic"
"""
name = CharTextField(unique=True)
def __str__(self):
return self.name
class Meta:
db_table = "location_type"
class ProviderType(models.Model):
"""
Represents a type of provider, such as "Pharmacy" for CVS or "Health Plan" for Kaiser.
"""
name = CharTextField(unique=True)
def __str__(self):
return self.name
class Meta:
db_table = "provider_type"
class ProviderPhase(models.Model):
"Current phase, e.g. 'Not currently vaccinating'"
name = CharTextField(unique=True)
def __str__(self):
return self.name
class Meta:
db_table = "provider_phase"
class Provider(models.Model):
"""
A provider is a larger entity that encompasses several vaccination sites. A provider will generally have its own
vaccination policies, which at least nominally apply to all locations.
Examples include:
- The pharmacy chain CVS
- The Kaiser HMO
- LA County Fire Department-operated Super Sites in LA County
"""
name = CharTextField(unique=True)
contact_phone_number = CharTextField(null=True, blank=True)
main_url = CharTextField(null=True, blank=True)
vaccine_info_url = CharTextField(null=True, blank=True)
vaccine_locations_url = CharTextField(null=True, blank=True)
public_notes = models.TextField(null=True, blank=True)
appointments_url = CharTextField(null=True, blank=True)
provider_type = models.ForeignKey(
ProviderType, related_name="providers", on_delete=models.PROTECT
)
internal_contact_instructions = models.TextField(null=True, blank=True)
last_updated = models.DateField(null=True, blank=True)
airtable_id = models.CharField(
max_length=20,
null=True,
blank=True,
help_text="Airtable record ID, if this has one",
)
public_id = models.SlugField(
unique=True,
help_text="ID that we expose outside of the application",
)
import_json = models.JSONField(
null=True,
blank=True,
help_text="Original JSON if this record was imported from elsewhere",
)
phases = models.ManyToManyField(
ProviderPhase,
blank=True,
related_name="providers",
db_table="provider_provider_phase",
)
def __str__(self):
return self.name
class Meta:
db_table = "provider"
@property
def pid(self):
return "p" + pid.from_int(self.pk)
def save(self, *args, **kwargs):
set_public_id_later = False
if (not self.public_id) and self.airtable_id:
self.public_id = self.airtable_id
elif not self.public_id:
set_public_id_later = True
self.public_id = "tmp:{}".format(uuid.uuid4())
super().save(*args, **kwargs)
if set_public_id_later:
self.public_id = self.pid
Provider.objects.filter(pk=self.pk).update(public_id=self.pid)
class State(models.Model):
"""
Information about a US state or territory
"""
abbreviation = models.CharField(max_length=2, unique=True)
name = CharTextField(unique=True)
fips_code = models.CharField(unique=True, blank=True, null=True, max_length=2)
def __str__(self):
return self.name
class Meta:
db_table = "state"
@classmethod
def __get_validators__(cls):
yield cls.pydantic_convert
@classmethod
def pydantic_convert(cls, abbreviation: str) -> State:
try:
return cls.objects.get(abbreviation=abbreviation)
except cls.DoesNotExist:
raise ValueError("State '{}' does not exist".format(abbreviation))
class County(models.Model):
"""
Every part of California is in one of the state's 58 counties, which are also the primary unit that coordinates
vaccinations and sets vaccination policies. A county's policies may not apply to every location in the county if the
locations vaccines are sourced directly from the state or federal government.
"""
fips_code = models.CharField(unique=True, max_length=5)
name = CharTextField()
state = models.ForeignKey(State, related_name="counties", on_delete=models.PROTECT)
hotline_phone_number = CharTextField(null=True, blank=True)
vaccine_info_url = CharTextField(null=True, blank=True)
vaccine_locations_url = CharTextField(null=True, blank=True)
official_volunteering_url = CharTextField(null=True, blank=True)
public_notes = models.TextField(null=True, blank=True)
internal_notes = models.TextField(null=True, blank=True)
facebook_page = CharTextField(null=True, blank=True)
twitter_page = CharTextField(null=True, blank=True)
vaccine_reservations_url = CharTextField(null=True, blank=True)
population = models.IntegerField(null=True, blank=True)
vaccine_dashboard_url = CharTextField(null=True, blank=True)
vaccine_data_url = CharTextField(null=True, blank=True)
vaccine_arcgis_url = CharTextField(null=True, blank=True)
age_floor_without_restrictions = models.IntegerField(
null=True, blank=True, verbose_name="Age Floor"
)
airtable_id = models.CharField(
max_length=20,
null=True,
unique=True,
help_text="Airtable record ID, if this has one",
)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "counties"
db_table = "county"
class ImportRun(models.Model):
created_at = models.DateTimeField(default=timezone.now)
api_key = models.ForeignKey(
"api.ApiKey", blank=True, null=True, on_delete=models.SET_NULL
)
def __str__(self):
return str(self.created_at)
class Meta:
db_table = "import_run"
class DeriveAvailabilityAndInventoryResults(NamedTuple):
vaccines_offered: Optional[list[str]]
vaccines_offered_provenance_report: Optional[Report]
vaccines_offered_provenance_source_location: Optional[SourceLocation]
vaccines_offered_last_updated_at: Optional[datetime]
accepts_appointments: Optional[bool]
accepts_walkins: Optional[bool]
appointments_walkins_provenance_report: Optional[Report]
appointments_walkins_provenance_source_location: Optional[SourceLocation]
appointments_walkins_last_updated_at: Optional[datetime]
# Additional debugging info:
most_recent_report_on_vaccines_offered: Optional[Report]
most_recent_source_location_on_vaccines_offered: Optional[SourceLocation]
most_recent_report_on_availability: Optional[Report]
most_recent_source_location_on_availability: Optional[SourceLocation]
class Location(gis_models.Model):
"A location is a distinct place where one can receive a COVID vaccine."
name = CharTextField()
phone_number = CharTextField(null=True, blank=True)
full_address = models.TextField(
null=True,
blank=True,
help_text="the entire address, including city and zip code",
)
street_address = CharTextField(
null=True, blank=True, help_text="the first line of the address"
)
city = CharTextField(null=True, blank=True)
state = models.ForeignKey(State, related_name="locations", on_delete=models.PROTECT)
zip_code = models.CharField(
max_length=10,
blank=True,
null=True,
help_text="can accomodate ZIP+4 in standard formatting if needed",
)
hours = models.TextField(
blank=True,
null=True,
help_text="Do not enter hours here for mobile clinics! File a report and put mobile clinic hours in the public notes.",
)
website = CharTextField(blank=True, null=True)
location_type = models.ForeignKey(
LocationType, related_name="locations", on_delete=models.PROTECT
)
vaccines_offered = models.JSONField(
null=True,
blank=True,
help_text="JSON array of strings representing vaccines on offer here - enter 'null' if we do not know",
)
vaccines_offered_provenance_report = models.ForeignKey(
"Report",
null=True,
blank=True,
related_name="+",
help_text="The report that last populated vaccines_offered",
on_delete=models.PROTECT,
)
vaccines_offered_provenance_source_location = models.ForeignKey(
"SourceLocation",
null=True,
blank=True,
related_name="+",
help_text="The source location that last populated vaccines_offered",
on_delete=models.PROTECT,
)
vaccines_offered_last_updated_at = models.DateTimeField(
help_text="When vaccines_offered was last updated",
blank=True,
null=True,
)
accepts_appointments = models.BooleanField(
null=True, blank=True, help_text="Does this location accept appointments"
)
accepts_walkins = models.BooleanField(
null=True, blank=True, help_text="Does this location accept walkins"
)
appointments_walkins_provenance_report = models.ForeignKey(
"Report",
null=True,
blank=True,
related_name="+",
help_text="The report that last populated accepts_walkins and accepts_appointments",
on_delete=models.PROTECT,
)
appointments_walkins_provenance_source_location = models.ForeignKey(
"SourceLocation",
null=True,
blank=True,
related_name="+",
help_text="The source location that last populated accepts_walkins and accepts_appointments",
on_delete=models.PROTECT,
)
appointments_walkins_last_updated_at = models.DateTimeField(
help_text="When accepts_walkins and accepts_appointments were last updated",
blank=True,
null=True,
)
public_notes = models.TextField(blank=True, null=True)
google_places_id = CharTextField(
null=True,
blank=True,
help_text="an ID that associates a location with a unique entry in the Google Places ontology",
)
vaccinespotter_location_id = CharTextField(
null=True,
blank=True,
help_text="This location's ID on vaccinespotter.org",
)
vaccinefinder_location_id = CharTextField(
null=True,
blank=True,
help_text="This location's ID on vaccinefinder.org",
)
provider = models.ForeignKey(
Provider,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="locations",
help_text="If you're certain that this location is part of a chain or network of providers -- like CVS, Costco, or Kroger -- add the right provider network.",
)
county = models.ForeignKey(
County,
null=True,
blank=True,
related_name="locations",
on_delete=models.PROTECT,
help_text="Use the 🔍 lookup tool or enter the county number.",
)
# This was originally specified as a 'coordinate point' but Django doesn't easily
# expose the 'point' type - we could adopt GeoDjango later though but it's a heavy dependency
latitude = models.DecimalField(max_digits=9, decimal_places=5)
longitude = models.DecimalField(
max_digits=9,
decimal_places=5,
help_text="Enter coordinates up to 5 decimal places, or use search box and pin below to ‘pin’ the location",
)
point = gis_models.PointField(
geography=True, blank=True, null=True, spatial_index=True
)
soft_deleted = models.BooleanField(
default=False,
help_text="we never delete rows from this table; all deletes are soft",
)
soft_deleted_because = CharTextField(null=True, blank=True)
duplicate_of = models.ForeignKey(
"self",
null=True,
blank=True,
related_name="duplicate_locations",
on_delete=models.PROTECT,
help_text="duplicate locations are associated with a canonical location",
)
import_run = models.ForeignKey(
ImportRun,
null=True,
blank=True,
related_name="created_locations",
on_delete=models.PROTECT,
help_text="the import run that created this location, if any",
)
provenance = CharTextField(null=True, blank=True)
internal_notes = models.TextField(null=True, blank=True)
do_not_call = models.BooleanField(default=False)
do_not_call_reason = models.TextField(null=True, blank=True)
created_at = models.DateTimeField(default=timezone.now)
created_by = models.ForeignKey(
"auth.User",
blank=True,
null=True,
related_name="created_locations",
on_delete=models.PROTECT,
)
airtable_id = models.CharField(
max_length=20,
null=True,
unique=True,
help_text="Airtable record ID, if this has one",
)
public_id = models.SlugField(
unique=True, help_text="ID that we expose outside of the application"
)
import_json = models.JSONField(
null=True,
blank=True,
help_text="Original JSON if this record was imported from elsewhere",
)
import_ref = models.CharField(
max_length=100,
db_index=True,
null=True,
blank=True,
help_text="If imported, unique identifier in the system it was imported from",
)
preferred_contact_method = models.CharField(
max_length=32,
choices=(
("online_only", "online_only"),
("online_preferred", "online_preferred"),
("call_preferred", "call_preferred"),
("call_only", "call_only"),
),
blank=True,
null=True,
help_text="Preferred method of collecting status about this location",
)
# Denormalized foreign keys for efficient "latest yes report" style queries
# https://github.com/CAVaccineInventory/vial/issues/193
# Latest report, NOT including is_pending_review reports:
dn_latest_report = models.ForeignKey(
"Report", related_name="+", on_delete=models.SET_NULL, null=True, blank=True
)
# Latest report including is_pending_review reports:
dn_latest_report_including_pending = models.ForeignKey(
"Report", related_name="+", on_delete=models.SET_NULL, null=True, blank=True
)
# Latest with at least one YES availability tag, NOT including is_pending_review:
dn_latest_yes_report = models.ForeignKey(
"Report", related_name="+", on_delete=models.SET_NULL, null=True, blank=True
)
# Latest with at least one SKIP availability tag, NOT including is_pending_review:
dn_latest_skip_report = models.ForeignKey(
"Report", related_name="+", on_delete=models.SET_NULL, null=True, blank=True
)
# Latest report that is NOT is_pending_review and does NOT have a skip tag:
dn_latest_non_skip_report = models.ForeignKey(
"Report", related_name="+", on_delete=models.SET_NULL, null=True, blank=True
)
# Denormalized counts for non is_pending_review reports:
dn_skip_report_count = models.IntegerField(default=0)
dn_yes_report_count = models.IntegerField(default=0)
is_pending_review = models.BooleanField(
default=False, help_text="Locations that are pending review by our QA team"
)
claimed_by = models.ForeignKey(
"auth.User",
related_name="claimed_locations",
on_delete=models.PROTECT,
blank=True,
null=True,
help_text="QA reviewer who has claimed this location",
)
claimed_at = models.DateTimeField(
help_text="When the QA reviewer claimed this location",
blank=True,
null=True,
)
def __str__(self):
return self.name
@classmethod
def __get_validators__(cls):
yield cls.pydantic_convert
@classmethod
def pydantic_convert(cls, id: str) -> Location:
if str(id).isdigit():
kwargs = {"pk": id}
else:
kwargs = {"public_id": id}
try:
obj = cls.objects.get(**kwargs)
except cls.DoesNotExist:
raise ValueError("Location '{}' does not exist".format(id))
return obj
class Meta:
db_table = "location"
permissions = [
("merge_locations", "Can merge two locations"),
]
@property
def pid(self):
return "l" + pid.from_int(self.pk)
@classmethod
def valid_for_call(cls) -> QuerySet[Location]:
return (
cls.objects.filter(soft_deleted=False, do_not_call=False)
.exclude(phone_number__isnull=True)
.exclude(phone_number="")
.exclude(
preferred_contact_method="research_online",
)
)
def derive_availability_and_inventory(
self, save=False
) -> DeriveAvailabilityAndInventoryResults:
"""
Use recent reports and matched source_locations to derive inventory/availability
This populates self.vaccines_offered, .accepts_appointments and .accepts_walkins
plus the columns that track when and why they were updated based on finding the
reports or source locations with the most recent opinions on these.
Returns namedtuple of changes it would make. save=True to save those changes.
"""
SOURCE_NAMES_TO_CONSIDER = (
"vaccinefinder_org",
"vaccinespotter_org",
"getmyvax_org",
)
vaccines_offered = None
vaccines_offered_provenance_report = None
vaccines_offered_provenance_source_location = None
vaccines_offered_last_updated_at = None
accepts_appointments = None
accepts_walkins = None
appointments_walkins_provenance_report = None
appointments_walkins_provenance_source_location = None
appointments_walkins_last_updated_at = None
most_recent_report_on_vaccines_offered = None
most_recent_source_location_on_vaccines_offered = None
most_recent_report_on_availability = None
most_recent_source_location_on_availability = None
most_recent_report_on_vaccines_offered = (
self.reports.all()
.exclude(soft_deleted=True)
.prefetch_related("availability_tags")
.exclude(availability_tags__group="skip")
.exclude(vaccines_offered__isnull=True)
.order_by("-created_at")
.first()
)
most_recent_source_location_on_vaccines_offered = (
self.matched_source_locations.all()
.filter(source_name__in=SOURCE_NAMES_TO_CONSIDER)
.exclude(import_json__inventory=None)
.order_by("-last_imported_at")
.first()
)
report_to_use_for_vaccines_offered = most_recent_report_on_vaccines_offered
source_location_to_use_for_vaccines_offered = (
most_recent_source_location_on_vaccines_offered
)
if (
report_to_use_for_vaccines_offered
and source_location_to_use_for_vaccines_offered
):
# Should we go with the report or the source location? Depends which is most recent
if (
source_location_to_use_for_vaccines_offered.last_imported_at
and source_location_to_use_for_vaccines_offered.last_imported_at
> report_to_use_for_vaccines_offered.created_at
):
# Use the source_location, ignore the report
report_to_use_for_vaccines_offered = None
else:
# Use the report, ignore the source location
source_location_to_use_for_vaccines_offered = None
if source_location_to_use_for_vaccines_offered:
vaccines_offered = (
source_location_to_use_for_vaccines_offered.vaccines_offered
)
vaccines_offered_provenance_source_location = (
source_location_to_use_for_vaccines_offered
)
vaccines_offered_last_updated_at = (
source_location_to_use_for_vaccines_offered.last_imported_at
)
elif report_to_use_for_vaccines_offered:
vaccines_offered = report_to_use_for_vaccines_offered.vaccines_offered
vaccines_offered_provenance_report = report_to_use_for_vaccines_offered
vaccines_offered_last_updated_at = (
report_to_use_for_vaccines_offered.created_at
)
# Now do accepts_appointments and accepts_walkins based on most recent report
# or source_location that provides useful data on those
most_recent_report_on_availability = (
self.reports.all()
.exclude(soft_deleted=True)
.prefetch_related("availability_tags")
.exclude(availability_tags__group="skip")
.order_by("-created_at")
.first()
)
most_recent_source_location_on_availability = (
self.matched_source_locations.all()
.filter(source_name__in=SOURCE_NAMES_TO_CONSIDER)
.exclude(import_json__availability=None)
.order_by("-last_imported_at")
.first()
)
report_to_use_for_availability = most_recent_report_on_availability
source_location_to_use_for_availability = (
most_recent_source_location_on_availability
)
if report_to_use_for_availability and source_location_to_use_for_availability:
# Should we go with the report or the source location? Depends which is most recent
if source_location_to_use_for_availability.last_imported_at and (
source_location_to_use_for_availability.last_imported_at
> report_to_use_for_availability.created_at
):
# Use the source_location, ignore the report
report_to_use_for_availability = None
else:
# Use the report, ignore the source location
source_location_to_use_for_availability = None
if source_location_to_use_for_availability:
availability = source_location_to_use_for_availability.import_json[
"availability"
]
accepts_appointments = bool(availability.get("appointments"))
accepts_walkins = bool(availability.get("drop_in"))
appointments_walkins_provenance_source_location = (
source_location_to_use_for_availability
)
appointments_walkins_last_updated_at = (
source_location_to_use_for_availability.last_imported_at
)
elif report_to_use_for_availability:
# Use the availability tags
tags = {
t.slug for t in report_to_use_for_availability.availability_tags.all()
}
accepts_appointments = any(
tag in tags
for tag in (
"appointment_calendar_currently_full",
"appointment_required",
"appointments_available",
"appointments_or_walkins",
)
)
accepts_walkins = any(
tag in tags for tag in ("walk_ins_only", "appointments_or_walkins")
)
appointments_walkins_provenance_report = report_to_use_for_availability
appointments_walkins_last_updated_at = (
report_to_use_for_availability.created_at
)
derived = DeriveAvailabilityAndInventoryResults(
vaccines_offered=vaccines_offered,
vaccines_offered_provenance_report=vaccines_offered_provenance_report,
vaccines_offered_provenance_source_location=vaccines_offered_provenance_source_location,
vaccines_offered_last_updated_at=vaccines_offered_last_updated_at,
accepts_appointments=accepts_appointments,
accepts_walkins=accepts_walkins,
appointments_walkins_provenance_report=appointments_walkins_provenance_report,
appointments_walkins_provenance_source_location=appointments_walkins_provenance_source_location,
appointments_walkins_last_updated_at=appointments_walkins_last_updated_at,
most_recent_report_on_vaccines_offered=most_recent_report_on_vaccines_offered,
most_recent_source_location_on_vaccines_offered=most_recent_source_location_on_vaccines_offered,
most_recent_report_on_availability=most_recent_report_on_availability,
most_recent_source_location_on_availability=most_recent_source_location_on_availability,
)
if save:
self.vaccines_offered = derived.vaccines_offered
self.vaccines_offered_provenance_report = (
derived.vaccines_offered_provenance_report
)
self.vaccines_offered_provenance_source_location = (
derived.vaccines_offered_provenance_source_location
)
self.vaccines_offered_last_updated_at = (
derived.vaccines_offered_last_updated_at
)
self.accepts_appointments = derived.accepts_appointments
self.accepts_walkins = derived.accepts_walkins
self.appointments_walkins_provenance_report = (
derived.appointments_walkins_provenance_report
)
self.appointments_walkins_provenance_source_location = (
derived.appointments_walkins_provenance_source_location
)
self.appointments_walkins_last_updated_at = (
derived.appointments_walkins_last_updated_at
)
self.save(
update_fields=[
"vaccines_offered",
"vaccines_offered_provenance_report",
"vaccines_offered_provenance_source_location",
"vaccines_offered_last_updated_at",
"accepts_appointments",
"accepts_walkins",
"appointments_walkins_provenance_report",
"appointments_walkins_provenance_source_location",
"appointments_walkins_last_updated_at",
]
)
return derived
@beeline.traced("update_denormalizations")
def update_denormalizations(self):
reports = (
self.reports.all()
.exclude(soft_deleted=True)
.prefetch_related("availability_tags")
.order_by("-created_at")
)
try:
dn_latest_report = [r for r in reports if not r.is_pending_review][0]
except IndexError:
dn_latest_report = None
try:
dn_latest_report_including_pending = reports[0]
except IndexError:
dn_latest_report_including_pending = None
dn_latest_yes_reports = [
r
for r in reports
if not r.is_pending_review
and any(t for t in r.availability_tags.all() if t.group == "yes")
]
dn_yes_report_count = len(dn_latest_yes_reports)
if dn_latest_yes_reports:
dn_latest_yes_report = dn_latest_yes_reports[0]
else:
dn_latest_yes_report = None
dn_latest_skip_reports = [
r
for r in reports
if not r.is_pending_review
and any(t for t in r.availability_tags.all() if t.group == "skip")
]
dn_skip_report_count = len(dn_latest_skip_reports)
if dn_latest_skip_reports:
dn_latest_skip_report = dn_latest_skip_reports[0]
else:
dn_latest_skip_report = None
dn_latest_non_skip_reports = [
r
for r in reports
if not r.is_pending_review
and not any(t for t in r.availability_tags.all() if t.group == "skip")
]
if dn_latest_non_skip_reports:
dn_latest_non_skip_report = dn_latest_non_skip_reports[0]
else:
dn_latest_non_skip_report = None
# Has anything changed?
def pk_or_none(record):
if record is None:
return None
return record.pk
if (
self.dn_latest_report_id != pk_or_none(dn_latest_report)
or self.dn_latest_report_including_pending_id
!= pk_or_none(dn_latest_report_including_pending)
or self.dn_latest_yes_report_id != pk_or_none(dn_latest_yes_report)
or self.dn_latest_skip_report_id != pk_or_none(dn_latest_skip_report)
or self.dn_latest_non_skip_report_id
!= pk_or_none(dn_latest_non_skip_report)
or self.dn_skip_report_count != dn_skip_report_count
or self.dn_yes_report_count != dn_yes_report_count
):
beeline.add_context({"updates": True})
self.dn_latest_report = dn_latest_report
self.dn_latest_report_including_pending = dn_latest_report_including_pending
self.dn_latest_yes_report = dn_latest_yes_report
self.dn_latest_skip_report = dn_latest_skip_report
self.dn_latest_non_skip_report = dn_latest_non_skip_report
self.dn_skip_report_count = dn_skip_report_count
self.dn_yes_report_count = dn_yes_report_count
self.save(
update_fields=(
"dn_latest_report",
"dn_latest_report_including_pending",
"dn_latest_yes_report",
"dn_latest_skip_report",
"dn_latest_non_skip_report",
"dn_skip_report_count",
"dn_yes_report_count",
)
)
else:
beeline.add_context({"updates": False})
def save(self, *args, **kwargs):
# Point is derived from latitude/longitude
if self.longitude and self.latitude:
self.point = Point(float(self.longitude), float(self.latitude), srid=4326)
else:
self.point = None
set_public_id_later = False
if (not self.public_id) and self.airtable_id:
self.public_id = self.airtable_id
elif not self.public_id:
set_public_id_later = True
self.public_id = "tmp:{}".format(uuid.uuid4())
super().save(*args, **kwargs)
if set_public_id_later:
self.public_id = self.pid
Location.objects.filter(pk=self.pk).update(public_id=self.pid)
# If we don't belong in the callable locations anymore, remove
# from the call request queue
if Location.valid_for_call().filter(pk=self.pk).count() == 0:
CallRequest.objects.filter(location_id=self.id, completed=False).delete()
class LocationReviewTag(models.Model):
tag = models.CharField(unique=True, max_length=64)
description = models.TextField(blank=True)
def __str__(self):
return self.tag
class LocationReviewNote(models.Model):
location = models.ForeignKey(
Location, related_name="location_review_notes", on_delete=models.PROTECT
)
author = models.ForeignKey(
"auth.User", related_name="location_review_notes", on_delete=models.PROTECT
)
created_at = models.DateTimeField(default=timezone.now)
note = models.TextField(blank=True)
tags = models.ManyToManyField(
LocationReviewTag,
related_name="location_review_notes",
blank=True,
)
def __str__(self):
return f"{self.author} review note on {self.location}"
class Reporter(models.Model):
"""
A reporter is a user.
There are two types of reporters:
- Auth0 users: these include reports made through our reporting apps, and SQL users who are authenticated through Auth0
- Airtable users: these are users who are authenticated through Airtable rather than Auth0.
"""
external_id = models.SlugField(unique=True, max_length=400)
name = CharTextField(null=True, blank=True)
display_name = CharTextField(
null=True,
blank=True,
help_text="If set this is displayed within VIAL in place of the Auth0 name",
)
email = CharTextField(null=True, blank=True)
auth0_role_names = CharTextField(null=True, blank=True)
user = models.ForeignKey(
"auth.User",
blank=True,
null=True,
related_name="reporters",
help_text="Corresponding user record for this reporter",
on_delete=models.PROTECT,
)
def __str__(self):
return self.display_name or self.name or self.external_id
class Meta:
db_table = "reporter"
def get_user(self):
# Populates self.user if it does not yet have a value, then returns it
if self.user:
return self.user
# A user may exist based on a `UserSocialAuth` record
assert self.external_id.startswith(
"auth0:"
), "Only auth0 reporters can be associated with Django users, not {}".format(
self.external_id
)
identifier = self.external_id[len("auth0:") :]
user_social_auth = UserSocialAuth.objects.filter(uid=identifier).first()
if not user_social_auth:
# Create user, associate it and return
username = "r{}".format(self.pk)
# Some users have their email address as their name
email = self.email
if not email and self.name and "@" in self.name:
email = self.name
if email and "@" in email:
username += "-" + email.split("@")[0]
user = User.objects.create(
username=username,
email=email or "",
first_name=self.name or "",
)
UserSocialAuth.objects.create(uid=identifier, provider="auth0", user=user)
self.user = user
else:
self.user = user_social_auth.user
self.save()
return self.user
@classmethod
def for_user(cls, user):
return user.reporters.first()
class AvailabilityTag(models.Model):
"""
A tag indicating the nature of availability at a vaccination site.
This might be:
- a restriction on general availability (no inventory available)
- a restriction on who may be vaccinated (65+ only)
- an expansion of availability (vaccinating essential workers)
This free-form tagging interface is meant to make it easy to add new entries to our ontology as we (frequently)
encounter new rules.
This is modelled as a separate table so that metadata can be easily added to the tags.
For example, the 'disabled' boolean is used to determine which tags should no longer be used, even as they exist in
historical data.
"""
name = CharTextField(unique=True)
slug = models.SlugField(null=True)
group = models.CharField(
max_length=10,
choices=(("yes", "yes"), ("no", "no"), ("skip", "skip"), ("other", "other")),
null=True,
)
notes = CharTextField(null=True, blank=True)
disabled = models.BooleanField(default=False)
previous_names = models.JSONField(
default=list,
help_text="Any previous names used for this tag, used for keeping import scripts working",
blank=True,
)
def __str__(self):
return self.name
class Meta:
db_table = "availability_tag"
ordering = ["-group", "name"]
class AppointmentTag(models.Model):
"""
A tag indicating whether an appointment is needed and, if so, how it should be scheduled (e.g., by phone, online, other).
This is modelled as a separate table so that metadata can be easily added to the tags.
For example, has_details indicates whether the appointment_details on the report should contain more information,
such as a URL.
"""
slug = models.SlugField(unique=True)
name = models.CharField(max_length=30, unique=True)
has_details = models.BooleanField(
default=False,
help_text="should the report refer to the appointment details. Unfortunately we can't enforce constraints across joins.",
)
def __str__(self):
return self.name
class Meta:
db_table = "appointment_tag"
class Report(models.Model):
"""
A report on the availability of the vaccine. Could be from a phone call, or a site visit, or reading a website.
"""
class ReportSource(models.TextChoices):
CALLER_APP = "ca", "Caller app"
DATA_CORRECTIONS = "dc", "Data corrections"
WEB_BANK = "wb", "Web banking"
location = models.ForeignKey(
Location,
related_name="reports",
on_delete=models.PROTECT,
help_text="a report must have a location",
)
is_pending_review = models.BooleanField(
default=False, help_text="Reports that are pending review by our QA team"
)
originally_pending_review = models.BooleanField(
null=True,
help_text="Reports that were originally flagged as pending review",
)
pending_review_because = CharTextField(
null=True, blank=True, help_text="Reason this was originally flagged for review"
)
claimed_by = models.ForeignKey(
"auth.User",
related_name="claimed_reports",
on_delete=models.PROTECT,
blank=True,
null=True,
help_text="QA reviewer who has claimed this report",
)
claimed_at = models.DateTimeField(
help_text="When the QA reviewer claimed this report",
blank=True,
null=True,
)
soft_deleted = models.BooleanField(
default=False,
help_text="we never delete rows from this table; all deletes are soft",
)
soft_deleted_because = CharTextField(null=True, blank=True)
report_source = models.CharField(
max_length=2,
choices=ReportSource.choices,
default=ReportSource.CALLER_APP,
)
appointment_tag = models.ForeignKey(
AppointmentTag,
related_name="reports",
on_delete=models.PROTECT,
help_text="a single appointment tag, indicating how appointments are made",
)
appointment_details = CharTextField(
null=True,
blank=True,
help_text="appointment details (e.g., a URL). Should not be used if the appointment_tag's has_details is false.",
)
public_notes = models.TextField(null=True, blank=True)
internal_notes = models.TextField(
null=True, blank=True, verbose_name="Private notes"
)
restriction_notes = models.TextField(null=True, blank=True)
vaccines_offered = models.JSONField(
null=True,
blank=True,
help_text="JSON array of strings representing vaccines on offer here",
)
website = CharTextField(
null=True, blank=True, help_text="Update for website information"
)
full_address = models.TextField(
null=True,
blank=True,
help_text="Update for the entire address, including city and zip code",
)
hours = models.TextField(
blank=True,
null=True,
help_text="Update for hours information",
)
planned_closure = models.DateField(
blank=True,
null=True,
help_text='Date this site a site plans to stop operating, "planned_closure" in our API',
verbose_name="Last known event date",
)
reported_by = models.ForeignKey(
Reporter, related_name="reports", on_delete=models.PROTECT
)
created_at = models.DateTimeField(
default=timezone.now,
help_text="the time when the report was submitted. We will interpret this as a validity time",
)
call_request = models.ForeignKey(
"CallRequest",
null=True,
blank=True,
related_name="reports",
on_delete=models.SET_NULL,
help_text="the call request that this report was based on, if any.",
)
availability_tags = models.ManyToManyField(
AvailabilityTag,
related_name="reports",
db_table="call_report_availability_tag",
)
airtable_id = models.CharField(
max_length=20,
null=True,
unique=True,
help_text="Airtable record ID, if this has one",
)
airtable_json = models.JSONField(null=True, blank=True)
public_id = models.SlugField(
unique=True, help_text="ID that we expose outside of the application"
)
def created_at_utc(self):
tz = pytz.UTC
created_at_utc = timezone.localtime(self.created_at, tz)
return dateformat.format(created_at_utc, "jS M Y fA e")
def availability(self):
# Used by the admin list view
return ", ".join(t.name for t in self.availability_tags.all())
def based_on_call_request(self):
return self.call_request is not None
def full_appointment_details(self, location: Optional[Location] = None):
# We often call this from contexts where the report was
# prefetched off of a location, and fetching self.location
# would be another DB query within a tight loop; support
# passing it in as an extra arg.
if location is not None:
assert location.id == self.location_id
else:
location = self.location
# Do not access self.location below; use location instead.
if self.appointment_details:
return self.appointment_details
elif location.county and self.appointment_tag.slug == "county_website":
return location.county.vaccine_reservations_url
elif self.appointment_tag.slug == "myturn_ca_gov":
return "https://myturn.ca.gov/"
elif location.website:
return location.website
elif location.provider and location.provider.appointments_url:
return location.provider.appointments_url
return None
class Meta:
db_table = "report"
def __str__(self):
return "Call to {} by {} at {}".format(
self.location, self.reported_by, self.created_at
)
@property
def pid(self):
return "r" + pid.from_int(self.pk)
def save(self, *args, **kwargs):
set_public_id_later = False
if (not self.public_id) and self.airtable_id:
self.public_id = self.airtable_id
elif not self.public_id:
set_public_id_later = True
self.public_id = "tmp:{}".format(uuid.uuid4())
super().save(*args, **kwargs)
if set_public_id_later:
self.public_id = self.pid
Report.objects.filter(pk=self.pk).update(public_id=self.pid)
location = self.location
location.update_denormalizations()
# location.derive_availability_and_inventory(save=True)
# will not work here because the availability tags have not yet been saved
def delete(self, *args, **kwargs):
location = self.location
super().delete(*args, **kwargs)
location.update_denormalizations()
location.derive_availability_and_inventory(save=True)
class ReportReviewTag(models.Model):
tag = models.CharField(unique=True, max_length=64)
description = models.TextField(blank=True)
def __str__(self):
return self.tag
class ReportReviewNote(models.Model):
report = models.ForeignKey(
Report, related_name="review_notes", on_delete=models.PROTECT
)
author = models.ForeignKey(
"auth.User", related_name="review_notes", on_delete=models.PROTECT
)
created_at = models.DateTimeField(default=timezone.now)
note = models.TextField(blank=True)
tags = models.ManyToManyField(
ReportReviewTag,
related_name="review_notes",
blank=True,
)
def __str__(self):
return "{} review note on {}".format(self.author, self.report)
class EvaReport(models.Model):
"""
A report obtained by our robotic assistant Eva. Eva only gathers a subset of the data that we would normally gather.
"""
location = models.ForeignKey(
Location, related_name="eva_reports", on_delete=models.PROTECT
)
name_from_import = CharTextField(null=True, blank=True)
phone_number_from_import = CharTextField(null=True, blank=True)
has_vaccines = models.BooleanField()
hung_up = models.BooleanField()
valid_at = models.DateTimeField(
help_text="the time when Eva's report was made (or our best estimate"
)
uploaded_at = models.DateTimeField(
help_text="this is the time when we uploaded Eva's report. It might not even be on the same day that the report was filed"
)
airtable_id = models.CharField(
max_length=20,
null=True,
unique=True,
help_text="Airtable record ID, if this has one",
)
def __str__(self):
return "Eva call to {} at {}".format(self.location, self.valid_at)
class Meta:
db_table = "eva_report"
class CallRequestReason(models.Model):
short_reason = CharTextField(unique=True)
long_reason = models.TextField(null=True, blank=True)
def __str__(self):
return self.short_reason
class Meta:
db_table = "call_request_reason"
class CallRequest(models.Model):
"""
A request to make a phone call (i.e., an entry in the call queue).
This reifies the notion of "requesting a call" so that all of the call attempts can be tracked with full history.
For example, if a bug in an app has us call a location repeatedly, we have the full record of why those calls were made.
"""
class PriorityGroup(models.IntegerChoices):
CRITICAL_1 = 1, "1-critical"
IMPORTANT_2 = 2, "2-important"
NORMAL_3 = 3, "3-normal"
LOW_4 = 4, "4-low"
NOT_PRIORITIZED_99 = 99, "99-not_prioritized"
class TipType(models.TextChoices):
EVA = "eva_report", "Eva report"
SCOOBY = "scooby_report", "Scooby report"
DATA_CORRECTIONS = "data_corrections_report", "Data corrections report"
location = models.ForeignKey(
Location, related_name="call_requests", on_delete=models.PROTECT
)
created_at = models.DateTimeField(
help_text="the time the call request entered the queue.",
null=True,
blank=True,
default=timezone.now,
)
vesting_at = models.DateTimeField(
help_text="the time at which this call request is considered 'active'. For example, a call request made by a skip will have a future vesting time."
)
claimed_by = models.ForeignKey(
Reporter,
blank=True,
null=True,
related_name="call_requests_claimed",
on_delete=models.PROTECT,
help_text="if non-null, the reporter who has currently 'claimed' this request",
)
claimed_until = models.DateTimeField(
blank=True,
null=True,
help_text="if non-null, the time until which the report is considered claimed",
)
call_request_reason = models.ForeignKey(
CallRequestReason,
related_name="call_requests",
on_delete=models.PROTECT,
help_text="a tag indicating why the call was added to the queue",
)
completed = models.BooleanField(
default=False, help_text="Has this call been completed"
)
completed_at = models.DateTimeField(
blank=True, null=True, help_text="When this call was marked as completed"
)
priority_group = models.IntegerField(
choices=PriorityGroup.choices,
default=PriorityGroup.NOT_PRIORITIZED_99,
)
priority = models.IntegerField(
default=0,
db_index=True,
help_text="Priority within this priority group - higher number means higher priority",
)
tip_type = CharTextField(
choices=TipType.choices,
blank=True,
null=True,
help_text=" the type of tip that prompted this call request, if any",
)
tip_report = models.ForeignKey(
Report,
blank=True,
null=True,
related_name="prompted_call_requests",
on_delete=models.PROTECT,
help_text="the id of the report, if any that prompted this call request",
)
def __str__(self):
return "Call request to {} vesting at {}".format(self.location, self.vesting_at)
class Meta:
db_table = "call_request"
# Group 1 comes before group 2 comes before group 3
# Within those groups, lower priority scores come before higher
# Finally we tie-break on ID optimizing for mostl recently created first
ordering = ("priority_group", "-priority", "-id")
constraints = [
models.UniqueConstraint(
name="unique_locations_in_queue",
fields=["location"],
condition=Q(completed=False),
)
]
@classmethod
def available_requests(
cls, qs: Optional[QuerySet[CallRequest]] = None
) -> QuerySet[CallRequest]:
if qs is None:
qs = cls.objects
now = timezone.now()
return qs.filter(
# Unclaimed
Q(claimed_until__isnull=True)
| Q(claimed_until__lte=now)
).filter(completed=False, vesting_at__lte=now)
@classmethod
@beeline.traced("insert")
def insert(
cls,
locations: QuerySet[Location],
reason: str,
limit: Optional[int] = 0,
**kwargs: Any,
) -> List[CallRequest]:
now = timezone.now()
reason_obj = CallRequestReason.objects.get_or_create(short_reason=reason)[0]
with transaction.atomic():
# Lock the locations we want to insert, so they don't
# change if they're valid to be in the queue, while we
# insert them.
locations = (locations & Location.valid_for_call()).select_for_update(
of=["self"]
)
# Now that we have a lock on them, we know any other
# inserts of them (though not others) will block behind
# that. Estimate out how many duplicates we possibly
# have. We lock them so our estimate is more accurate.
existing_call_requests = CallRequest.objects.filter(
location__in=locations, completed=False
).select_for_update()
# Filter duplicates them out of the insert. Note that
# this is mostly advisory, so we get the right-ish objects
# from the bulk_create -- the `ignore_conflicts` on it
# will enfoce the uniqueness.
locations = locations.exclude(
id__in=existing_call_requests.values("location_id")
)
if limit:
locations = locations[0:limit]
args = {
"vesting_at": now,
"call_request_reason": reason_obj,
}
args.update(**kwargs)
# Do the insert, ignoring duplicates. bulk_create returns
# all rows, even ones whose insert failed because of
# conflicts; this _may_, on races, contain too many rows
# in the return value, so the returned list of "new"
# values is advisory.
return cls.objects.bulk_create(
[cls(location=location, **args) for location in locations],
ignore_conflicts=True,
)
@classmethod
@beeline.traced("get_call_request")
def get_call_request(
cls,
claim_for: Optional[Reporter] = None,
state: Optional[str] = None,
) -> Optional[CallRequest]:
# First, drop some items there are some items in the queue, in case
# it has run dry. We backfill according to the state we're
# looking for, which may affect which locations are in the
# queue for people who are _not_ asking for a specific state.
cls.backfill_queue(state=state)
now = timezone.now()
available_requests = cls.available_requests()
if state is not None:
available_requests = available_requests.filter(
location__state__abbreviation=state
)
# We need to lock the record we select so we can update
# it marking that we have claimed it
with transaction.atomic():
call_requests = available_requests.select_for_update()[:1]
try:
call_request: Optional[CallRequest] = call_requests[0]
except IndexError:
call_request = None
if call_request is not None and claim_for:
call_request.claimed_by = claim_for
call_request.claimed_until = now + timedelta(
minutes=settings.CLAIM_LOCK_MINUTES
)
call_request.save()
return call_request
@classmethod
@beeline.traced("mark_completed_by")
def mark_completed_by(
cls, report: Report, enqueue_again_at: Optional[datetime] = None
) -> None:
# Make sure the call request doesn't go away (e.g. from a bulk
# load) while we update it
with transaction.atomic():
# There can only be _one_ incomplete report for a
# location; find it and lock it.
existing_call_request = (
report.location.call_requests.filter(completed=False)
.select_for_update()
.first()
)
# The call request may no longer exist -- either it never
# did, because this was web-banked, orsomeone else also
# fulfilled it, or a queue update happened between when we
# took it and completed it, removing it.
if existing_call_request is not None:
# If this was based on a call request, mark it as
# completed and associate it with the report
existing_call_request.completed = True
existing_call_request.completed_at = timezone.now()
existing_call_request.save()
report.call_request = existing_call_request
report.save()
# If we're not re-scheduling, we're done!
if enqueue_again_at is None:
return
priority_in_group = 0
priority_group = 99
if existing_call_request:
# Priority group should match that of the original call
# request, BUT we use the separate priority integer to
# drop them to the very end of the queue within that
# priority group. "end" here means one less than the
# _smallest_ priority within the group, since we take from
# high to low priority within a group.
priority_group = existing_call_request.priority_group
priority_in_group = (
cls.objects.filter(
priority_group=existing_call_request.priority_group
).aggregate(min=Min("priority"))["min"]
- 1
)
cls.insert(
locations=Location.objects.filter(id=report.location.id),
reason="Previously skipped",
vesting_at=enqueue_again_at,
tip_type=cls.TipType.SCOOBY,
tip_report=report,
priority_group=priority_group,
priority=priority_in_group,
)
@classmethod
@beeline.traced("backfill_queue")
def backfill_queue(
cls, minimum: Optional[int] = None, state: Optional[str] = None
) -> None:
"""This is a last-resort refill of the queue.
It should only happen when we have exhausted all things
explicitly placed in the queue.
"""
if minimum is None:
minimum = settings.MIN_CALL_REQUEST_QUEUE_ITEMS
num_to_create = max(0, minimum - cls.available_requests().count())
beeline.add_context({"count": num_to_create})
if num_to_create == 0:
return
# num_to_create may be stale by now, but worst case if we race
# we'll insert more locations than necessary.
try:
with transaction.atomic():
# Only consider existing locations that are valid for
# calling that are not currently queued in _any_ form
# (even if that's claimed or not-yet-vested)
location_options = Location.valid_for_call().exclude(
id__in=cls.objects.filter(completed=False).values("location_id")
)
if state is not None:
location_options = location_options.filter(
state__abbreviation=state
)
# Add any locations that have never been called
created_call_requests = cls.insert(
location_options.filter(
dn_latest_report_including_pending__isnull=True
),
reason="Automatic backfill",
limit=num_to_create,
)
num_to_create -= len(created_call_requests)
if num_to_create <= 0:
return
# Then add locations by longest-ago
cls.insert(
location_options.order_by(
"dn_latest_report_including_pending__created_at"
),
reason="Automatic backfill",
limit=num_to_create,
)
except IntegrityError:
# We tried to add a location that was already in the
# queue, probably via a race condition! Just log, and
# carry on.
sentry_sdk.capture_exception()
class PublishedReport(models.Model):
"""
NOT CURRENTLY USED
See https://github.com/CAVaccineInventory/vial/issues/179#issuecomment-815353624
A report that should be published to our website and API feed.
This report is generally derived from one or more other report types, and might be created automatically or manually.
If a report is edited for publication, the published_report should be edited to maintain the integrity of our records.
This report represents the (possibly implicit) editorial aspects of our data pipeline.
The relationship between published reports and the various report types may be many-to-many:
a single report may trigger many published reports, and each published report may draw on several data sources.
"""
location = models.ForeignKey(
Location, related_name="published_reports", on_delete=models.PROTECT
)
appointment_tag = models.ForeignKey(
AppointmentTag,
related_name="published_reports",
on_delete=models.PROTECT,
help_text="a single appointment tag, indicating how appointments are made",
)
appointment_details = models.TextField(
blank=True,
null=True,
help_text="appointment details (e.g., a URL). Should not be used if the appointment_tag's has_details is false.",
)
public_notes = models.TextField(blank=True, null=True)
reported_by = models.ForeignKey(
Reporter, related_name="published_reports", on_delete=models.PROTECT
)
valid_at = models.DateTimeField(
help_text='the time that determines this report\'s time priority. Generally, only the latest report is displayed. This determines the "freshness" of the published report.'
)
created_at = models.DateTimeField(
help_text="the time at which this report is created (which may be different from the time at which it is valid)"
)
availability_tags = models.ManyToManyField(
AvailabilityTag,
related_name="published_reports",
db_table="published_report_availability_tag",
)
reports = models.ManyToManyField(
Report,
related_name="published_reports",
db_table="published_report_reports",
)
eva_reports = models.ManyToManyField(
EvaReport,
related_name="published_reports",
db_table="published_report_eva_report",
)
def __str__(self):
return "Published report for {} valid at {}".format(
self.location, self.valid_at
)
class Meta:
db_table = "published_report"
class SourceLocation(gis_models.Model):
"Source locations are unmodified records imported from other sources"
import_run = models.ForeignKey(
ImportRun,
blank=True,
null=True,
related_name="imported_source_locations",
on_delete=models.SET_NULL,
)
source_uid = CharTextField(
unique=True,
help_text="The ID within that other source, UUID etc or whatever they have - globally unique because it includes a prefix which is a copy of the source_name",
)
source_name = CharTextField(help_text="e.g. vaccinespotter")
content_hash = CharTextField(
blank=True,
null=True,
help_text="Hash of the content JSON, to allow our importer clients to avoid sending data we already have",
)
name = CharTextField(null=True, blank=True)
latitude = models.DecimalField(
max_digits=9, decimal_places=5, null=True, blank=True
)
longitude = models.DecimalField(
max_digits=9, decimal_places=5, null=True, blank=True
)
point = gis_models.GeometryField(
geography=True, blank=True, null=True, spatial_index=True
)
import_json = models.JSONField(
null=True,
blank=True,
help_text="Big bag of JSON with original data",
)
matched_location = models.ForeignKey(
Location,
blank=True,
null=True,
related_name="matched_source_locations",
on_delete=models.SET_NULL,
)
created_at = models.DateTimeField(default=timezone.now)
last_imported_at = models.DateTimeField(
blank=True, null=True, help_text="When this source location was last imported"
)
def save(self, *args, **kwargs):
if self.longitude and self.latitude:
self.point = Point(float(self.longitude), float(self.latitude), srid=4326)
else:
self.point = None
super().save(*args, **kwargs)
def __str__(self):
bits = [self.source_uid]
if self.name:
bits.extend((" - ", self.name))
return "".join(bits)
@classmethod
def __get_validators__(cls):
yield cls.pydantic_convert
@classmethod
def pydantic_convert(cls, id: str) -> SourceLocation:
if str(id).isdigit():
kwargs = {"pk": id}
else:
kwargs = {"source_uid": id}
try:
obj = cls.objects.get(**kwargs)
except cls.DoesNotExist:
raise ValueError("SourceLocation '{}' does not exist".format(id))
return obj
@property
def vaccines_offered(self):
try:
inventory = self.import_json["inventory"]
except KeyError:
return None
inventory_mapping = {
"moderna": "Moderna",
"pfizer_biontech": "Pfizer",
"johnson_johnson_janssen": "Johnson & Johnson",
"oxford_astrazeneca": "Astrazeneca",
}
in_stock = [
stock["vaccine"]
for stock in inventory
if stock.get("supply_level") != "out_of_stock"
]
return list(sorted([inventory_mapping[v] for v in in_stock]))
class Meta:
db_table = "source_location"
indexes = [models.Index(fields=["matched_location"])]
class SourceLocationMatchHistory(models.Model):
created_at = models.DateTimeField(default=timezone.now)
api_key = models.ForeignKey(
"api.ApiKey",
null=True,
blank=True,
related_name="source_location_match_history",
on_delete=models.SET_NULL,
)
reporter = models.ForeignKey(
Reporter,
null=True,
blank=True,
related_name="source_location_match_history",
on_delete=models.PROTECT,
)
source_location = models.ForeignKey(
SourceLocation,
related_name="source_location_match_history",
on_delete=models.PROTECT,
)
old_match_location = models.ForeignKey(
Location,
blank=True,
null=True,
related_name="+",
on_delete=models.PROTECT,
)
new_match_location = models.ForeignKey(
Location,
blank=True,
null=True,
related_name="source_location_match_history",
on_delete=models.PROTECT,
)
def __str__(self):
return "{} set source_location {} to match {} on {}".format(
self.reporter or self.api_key,
self.source_location,
self.new_match_location,
self.created_at,
)
class Meta:
db_table = "source_location_match_history"
verbose_name_plural = "Source location match history"
class ConcordanceIdentifier(models.Model):
created_at = models.DateTimeField(default=timezone.now)
authority = models.CharField(max_length=32)
identifier = models.CharField(max_length=128)
locations = models.ManyToManyField(
Location,
related_name="concordances",
blank=True,
db_table="concordance_location",
)
source_locations = models.ManyToManyField(
SourceLocation,
related_name="concordances",
blank=True,
db_table="concordance_source_location",
)
class Meta:
unique_together = ("authority", "identifier")
db_table = "concordance_identifier"
def __str__(self):
return "{}:{}".format(self.authority, self.identifier)
@classmethod
def for_idref(cls, idref):
authority, identifier = idref.split(":", 1)
return cls.objects.get_or_create(authority=authority, identifier=identifier)[0]
@classmethod
def filter_for_idrefs(cls, idrefs):
# Returns a Q() object for use with .filter(), for example:
# e.g. Q(authority = 'cvs', identifier='11344') | Q(authority = 'cvs', identifier='11345')
pairs = [idref.split(":", 1) for idref in idrefs]
return reduce(or_, (Q(authority=p[0], identifier=p[1]) for p in pairs))
ConcordanceIdentifier.locations.through.__str__ = lambda self: "{} on {}".format( # type: ignore[assignment]
self.concordanceidentifier, self.location.public_id # type: ignore[attr-defined]
)
ConcordanceIdentifier.source_locations.through.__str__ = lambda self: "{} on source location {}".format( # type: ignore[assignment]
self.concordanceidentifier, self.sourcelocation_id # type: ignore[attr-defined]
)
class TaskType(models.Model):
"Types of task that we present to our volunteers"
name = CharTextField(unique=True)
def __str__(self):
return self.name
class Meta:
db_table = "task_type"
@classmethod
def __get_validators__(cls):
yield cls.pydantic_convert
@classmethod
def pydantic_convert(cls, name: str) -> TaskType:
try:
return cls.objects.get(name=name)
except cls.DoesNotExist:
raise ValueError("TaskType '{}' does not exist".format(name))
class Task(models.Model):
"A task for our volunteers"
created_at = models.DateTimeField(default=timezone.now)
created_by = models.ForeignKey(
"auth.User",
related_name="created_tasks",
on_delete=models.PROTECT,
blank=True,
null=True,
)
location = models.ForeignKey(
Location, related_name="tasks", on_delete=models.PROTECT
)
other_location = models.ForeignKey(
Location, related_name="+", blank=True, null=True, on_delete=models.SET_NULL
)
task_type = models.ForeignKey(
TaskType, related_name="tasks", on_delete=models.PROTECT
)
details = models.JSONField(
null=True,
blank=True,
help_text="Task details",
)
resolved_at = models.DateTimeField(blank=True, null=True)
resolved_by = models.ForeignKey(
"auth.User",
blank=True,
null=True,
related_name="resolved_tasks",
on_delete=models.PROTECT,
)
resolution = models.JSONField(
null=True,
blank=True,
help_text="Details from when this task was resolved",
)
def __str__(self):
return "{} task against {}{}".format(
self.task_type, self.location, " - resolved" if self.resolved_at else ""
)
class Meta:
db_table = "task"
@classmethod
def __get_validators__(cls):
yield cls.pydantic_convert
@classmethod
def pydantic_convert(cls, id: str) -> Task:
try:
return cls.objects.get(pk=id)
except cls.DoesNotExist:
raise ValueError("Task {} does not exist".format(id))
class CompletedLocationMerge(models.Model):
winner_location = models.ForeignKey(
Location, related_name="+", on_delete=models.PROTECT
)
loser_location = models.ForeignKey(
Location, related_name="+", on_delete=models.PROTECT
)
created_at = models.DateTimeField(default=timezone.now)
created_by = models.ForeignKey(
"auth.User", related_name="completed_location_merges", on_delete=models.PROTECT
)
task = models.ForeignKey(
Task,
null=True,
blank=True,
related_name="completed_location_merges",
on_delete=models.PROTECT,
)
details = models.JSONField(
null=True,
blank=True,
help_text="Detailed information about the merge",
)
def __str__(self):
return "winner={}, loser={}, merged by {} at {}".format(
self.winner_location, self.loser_location, self.created_by, self.created_at
)
class Meta:
db_table = "completed_location_merge"
# Signals
@receiver(m2m_changed, sender=Report.availability_tags.through)
def denormalize_location(sender, instance, action, **kwargs):
if action in ("post_add", "post_remove", "post_clear"):
instance.location.update_denormalizations()
@receiver(m2m_changed, sender=ReportReviewNote.tags.through)
def approval_review_report_denormalize_location(sender, instance, action, **kwargs):
if action == "post_add" and len(instance.tags.filter(tag="Approved")):
instance.report.is_pending_review = False
instance.report.save()
# We don't _un-approve_ if the tag is removed because the flag can
# _also_ be just generally unset manually. Imagine:
# - report is flagged on creation
# - is_pending_review unset by unchecking the box
# - approval is made
# - approval is deleted
@receiver(m2m_changed, sender=LocationReviewNote.tags.through)
def approval_review_location_denormalize_location(sender, instance, action, **kwargs):
if action == "post_add" and len(instance.tags.filter(tag="Approved")):
instance.location.is_pending_review = False
instance.location.save()
# We don't _un-approve_ if the tag is removed because the flag can
# _also_ be just generally unset manually. Imagine:
# - location is flagged on creation
# - is_pending_review unset by unchecking the box
# - approval is made
# - approval is deleted
| 36.654997 | 179 | 0.648428 | 8,496 | 72,247 | 5.278131 | 0.110993 | 0.02589 | 0.024931 | 0.031465 | 0.53277 | 0.46054 | 0.408737 | 0.352273 | 0.306558 | 0.264456 | 0 | 0.002853 | 0.272247 | 72,247 | 1,970 | 180 | 36.673604 | 0.850013 | 0.128684 | 0 | 0.381646 | 0 | 0.007595 | 0.14085 | 0.017716 | 0 | 0 | 0 | 0 | 0.001266 | 1 | 0.041139 | false | 0 | 0.036076 | 0.020253 | 0.294937 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e88662b9dcdea887377483da9b99fbeb0a1e2ce | 1,650 | py | Python | src/train.py | SunbirdAI/SunBERT | b73ccf5115a0303b59b27df602bfe064c8ebd6fa | [
"MIT"
] | null | null | null | src/train.py | SunbirdAI/SunBERT | b73ccf5115a0303b59b27df602bfe064c8ebd6fa | [
"MIT"
] | 1 | 2021-02-24T17:11:26.000Z | 2021-02-24T17:11:31.000Z | src/train.py | SunbirdAI/SunBERT | b73ccf5115a0303b59b27df602bfe064c8ebd6fa | [
"MIT"
] | 1 | 2021-12-10T12:27:17.000Z | 2021-12-10T12:27:17.000Z | import pandas as pd
from dataset import create_dataloader
from engine import train_fn
from model import SunBERT
from transformers import AdamW
from torch import nn
def run():
MAX_LEN = 66
BATCH_SIZE = 32
EPOCHS = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "Couldn't find GPU device")
# BERT Specific Pre-processing
bert_cased = 'bert-base-cased'
tokenizer = BertTokenizer.from_pretrained(bert_cased)
cls_model = SunBERT(3) # replace hard-coded number of categories
cls_model = cls_model.to(device)
# Put the above lines in a config file
dfx = pd.read_csv("dataset.csv")
dfx.category = dfx.category.apply(lambda x: 0 if x=="Organic" else 1 if x=="Editorial" else 2) # replace hard-coded categories
random_seed = 42
df_train, df_test = train_test_split(dfx, test_size=0.15, random_state = random_seed)
df_val, df_test = train_test_split(df_test, test_size=0.5, random_state = random_seed)
dataloader_train = create_dataloader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
dataloader_test = create_dataloader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
dataloader_val = create_dataloader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
optimizer = AdamW(cls_model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(dataloader_train) * EPOCHS
ln_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
loss_func = nn.CrossEntropyLoss().to(device)
train_fn(cls_model, dataloader_train, loss_func, optimizer, device, ln_scheduler, len(df_train))
if __name__ == '__main__':
run()
| 38.372093 | 130 | 0.746667 | 247 | 1,650 | 4.696356 | 0.433198 | 0.034483 | 0.046552 | 0.051724 | 0.113793 | 0.058621 | 0 | 0 | 0 | 0 | 0 | 0.014493 | 0.163636 | 1,650 | 42 | 131 | 39.285714 | 0.826087 | 0.081818 | 0 | 0 | 0 | 0 | 0.05298 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.2 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e89e5918f3ccc62e2c9d037ec970ff4354a9c3b | 4,430 | py | Python | COVID-19_Old.py | JustHarsh/Drug_Discovery-COVID-19 | 0dd941797e6a928f70f2ea706f584259ca24478f | [
"MIT"
] | 5 | 2020-05-23T13:10:51.000Z | 2021-09-07T13:56:38.000Z | COVID-19_Old.py | JustHarsh/Drug_Discovery-COVID-19 | 0dd941797e6a928f70f2ea706f584259ca24478f | [
"MIT"
] | null | null | null | COVID-19_Old.py | JustHarsh/Drug_Discovery-COVID-19 | 0dd941797e6a928f70f2ea706f584259ca24478f | [
"MIT"
] | null | null | null | # Importing all essential libraries
import os
import random
import sys
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
from keras.callbacks import LambdaCallback
from keras.layers import Flatten
#-------------------------------------------------------------------#
# Change working directory
# os.chdir(desired_working_directory)
# print(os.getcwd())
#-------------------------------------------------------------------#
# Reading dataset
dataset = pd.read_csv('dataset_for_training.csv', sep=',')
data = open('dataset.csv', 'r').read()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('data has %d, %d unique' % (data_size, vocab_size)) # Identifying all unique characters
#-------------------------------------------------------------------#
# Giving a key for all the uniquqe characters
char_indices = {ch:i for i, ch in enumerate(chars)}
indices_char = {i:ch for ch, i in enumerate(chars)}
print(char_indices)
#-------------------------------------------------------------------#
# Vectorizing inputs to pass it in the model
import numpy as np
maxlen = 60
step = 3
smiles = []
corona_smiles = []
for i in range(0, len(data) - maxlen, step):
smiles.append(data[i: i + maxlen])
corona_smiles.append(data[i + maxlen])
print('nb sequences:', len(smiles))
print('Vectorization...')
x = np.zeros((len(smiles), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(smiles), len(chars)), dtype=np.bool)
for i, smiles in enumerate(smiles):
for t, char in enumerate(smiles):
x[i, t, char_indices[char]] = 1
y[i, char_indices[corona_smiles[i]]] = 1
#-------------------------------------------------------------------#
# BUILDING THE MODEL
model = Sequential()
model.add(LSTM(units = 128, return_sequences = True, input_shape=(maxlen, len(chars))))
model.add(Dropout(0.3))
model.add(LSTM(units = 128, return_sequences = True, input_shape=(maxlen, len(chars))))
model.add(Dropout(0.3))
model.add(LSTM(units = 128, return_sequences = True, input_shape=(maxlen, len(chars))))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(len(chars), activation='softmax'))
optimizer = RMSprop(learning_rate=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
model.summary()
#-------------------------------------------------------------------#
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def on_epoch_end(epoch, _):
# Function invoked at end of each epoch. Prints generated text.
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
#-------------------------------------------------------------------#
model.fit(x, y,
batch_size=256,
epochs=100,
callbacks=[print_callback])
#-------------------------------------------------------------------#
model.save_weights(desired_working_directory)
print('Saved model')
#-------------------------------------------------------------------#
# load weights into new model
# loaded_model.load_weights("COVID_19.hdf5")
# print("Loaded model")
| 29.731544 | 93 | 0.584876 | 545 | 4,430 | 4.645872 | 0.321101 | 0.028436 | 0.027646 | 0.033175 | 0.14139 | 0.097946 | 0.097946 | 0.097946 | 0.097946 | 0.097946 | 0 | 0.015646 | 0.163205 | 4,430 | 148 | 94 | 29.932432 | 0.667386 | 0.245598 | 0 | 0.117647 | 0 | 0 | 0.066485 | 0.014506 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.164706 | 0 | 0.2 | 0.141176 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e8c90529784c32fc97eee269bf1f19fcbbdd280 | 1,494 | py | Python | tests/service/test_base.py | flyinactor91/AVWX-Engine | 0d3ce2c6e962d2a3ec9db711caf9d1c94658fa80 | [
"MIT"
] | 30 | 2015-09-08T20:38:41.000Z | 2019-03-10T07:10:47.000Z | tests/service/test_base.py | sthagen/avwx-engine | af235b9d26e5495f04076ed5499cf8cd131d4efc | [
"MIT"
] | 5 | 2015-08-12T15:50:07.000Z | 2019-04-16T00:42:12.000Z | tests/service/test_base.py | sthagen/avwx-engine | af235b9d26e5495f04076ed5499cf8cd131d4efc | [
"MIT"
] | 11 | 2016-01-17T10:10:29.000Z | 2019-01-13T17:55:36.000Z | """
Service API Tests
"""
# pylint: disable=missing-class-docstring
# stdlib
from typing import Tuple
import unittest
# module
from avwx import service
BASE_ATTRS = ("url", "report_type", "_valid_types")
class BaseTestService(unittest.IsolatedAsyncioTestCase):
serv: service.Service
service_class = service.Service
report_type: str = ""
stations: Tuple[str] = tuple()
required_attrs: Tuple[str] = tuple()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.serv = self.service_class(self.report_type)
self.required_attrs = BASE_ATTRS + self.required_attrs
def test_init(self):
"""Tests that the Service class is initialized properly"""
for attr in self.required_attrs:
self.assertTrue(hasattr(self.serv, attr))
self.assertEqual(self.serv.report_type, self.report_type)
def test_fetch(self):
"""Tests that reports are fetched from service"""
try:
station = self.stations[0]
except IndexError:
return
report = self.serv.fetch(station)
self.assertIsInstance(report, str)
self.assertTrue(station in report)
async def test_async_fetch(self):
"""Tests that reports are fetched from async service"""
for station in self.stations:
report = await self.serv.async_fetch(station)
self.assertIsInstance(report, str)
self.assertTrue(station in report)
| 28.730769 | 66 | 0.660643 | 176 | 1,494 | 5.448864 | 0.340909 | 0.052138 | 0.05318 | 0.037539 | 0.22732 | 0.22732 | 0.22732 | 0.22732 | 0.145985 | 0.145985 | 0 | 0.00088 | 0.238956 | 1,494 | 51 | 67 | 29.294118 | 0.842568 | 0.113119 | 0 | 0.129032 | 0 | 0 | 0.0208 | 0 | 0 | 0 | 0 | 0 | 0.193548 | 1 | 0.096774 | false | 0 | 0.096774 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e8c94240a87d0206591689e63849ac4c1ef22b0 | 1,267 | py | Python | src/main.py | DanielWieczorek/FancyReadmeBuilder | a0a91ec60a673be97d7a942ded5e1da659dda885 | [
"MIT"
] | null | null | null | src/main.py | DanielWieczorek/FancyReadmeBuilder | a0a91ec60a673be97d7a942ded5e1da659dda885 | [
"MIT"
] | null | null | null | src/main.py | DanielWieczorek/FancyReadmeBuilder | a0a91ec60a673be97d7a942ded5e1da659dda885 | [
"MIT"
] | null | null | null | """
This is a nifty little tool to create really cool looking Readme files.
Its essentially a templating tool for such. The initial steps to create a template are the following:
- create a super cool ANSI Art design for your readme.
- create a template that uses this image and defines fields where text can be inserted.
After that you can create fill this template with life with another file defining the actions, e.g. inserting text.
Parameters:
templatedir:
desc: this parameter defines where to load the templates from
type: str
is_required: True
actions:
desc: path to the file containing the actions
type: str
is_required: True
template:
desc: name of the template to use
type: str
is_required: True
"""
from src.business.FancyReadmeBuilder import FancyReadmeBuilder
from src.ui.TerminalParser import TerminalParser
__author__ = 'DWI'
def main():
parser = TerminalParser()
args = parser.get_values(__doc__)
readme_builder = FancyReadmeBuilder.get_instance()
print(args.templatedir)
readme_builder.load_templates(args.templatedir)
print(args.actions)
print(args.template)
rendered = readme_builder.apply_actions_and_render(args.actions, args.template)
print(rendered)
if __name__ == "__main__":
main()
| 27.543478 | 115 | 0.767956 | 179 | 1,267 | 5.27933 | 0.502793 | 0.022222 | 0.028571 | 0.053968 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168903 | 1,267 | 45 | 116 | 28.155556 | 0.897436 | 0.562747 | 0 | 0 | 0 | 0 | 0.020221 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0.266667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e91bffc137dacb98dd65fdc54a68e7adb4518df | 3,080 | py | Python | bin/soundboard_scripts/soundboard-preprocess.py | andrewmichaud/scripts | 0e79a3454ec0116a0e3ed2902b79494939050937 | [
"0BSD"
] | 1 | 2016-10-13T18:36:59.000Z | 2016-10-13T18:36:59.000Z | bin/soundboard_scripts/soundboard-preprocess.py | andrewmichaud/scripts | 0e79a3454ec0116a0e3ed2902b79494939050937 | [
"0BSD"
] | null | null | null | bin/soundboard_scripts/soundboard-preprocess.py | andrewmichaud/scripts | 0e79a3454ec0116a0e3ed2902b79494939050937 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
import json
# If you change this be sure to change it everywhere in the main ahk file too.
bank_size = 8
input_file = "soundboard_data.json"
output_file = "soundboard_data.ahk"
def indent(indent_level):
return (indent_level * 4) * " "
def main():
with open(input_file) as f:
data = json.load(f)
default = data["default"]
if str(data["version"]) == "1.0.0":
flat_banks = data["sounds"]
flat_descriptions = data["bank_descriptions"]
# 2.0.0+
else:
# split multi-list data into single list for easier processing.
flat_banks = []
flat_descriptions = []
for bank in data["banks"]:
# fix slot ids as well.
fixed_slots = []
bi = bank["bank_id"]
for slot in bank["slots"]:
rel_id = slot["slot"]
slot["slot"] = (bank_size * (bi-1)) + rel_id
fixed_slots.append(slot)
flat_banks.extend(fixed_slots)
flat_descriptions.append(bank["description"])
# format for function injection into autohotkey.
output = []
indent_level = 0
output.append("PlaySound(idx) {")
indent_level += 1
output.append(f"{indent(indent_level)} Switch (idx) " + "{")
indent_level += 1
# do bank 1 description first and then do others on mod rollover.
old_bank_idx = 0
bank_idx = 0
output.append("")
output.append(f"{indent(indent_level)}; {flat_descriptions[bank_idx]}")
# I want to permit out-of-order definitions in the JSON,
# as well as leaving gaps in the soundboard for later ideas.
# No promises if you add multiple items with the same slot key.
for sound in sorted(flat_banks, key=lambda x: x["slot"]):
slot = sound["slot"]
old_bank_idx = bank_idx
bank_idx = slot // bank_size
if (old_bank_idx != bank_idx) and (bank_idx < len(flat_descriptions)):
output.append("")
output.append(f"{indent(indent_level)}; {flat_descriptions[bank_idx]}")
output.append("")
output.append(f"{indent(indent_level)}; {sound['description']}")
output.append(f"{indent(indent_level)}case {slot}:")
indent_level += 1
output.append(f"{indent(indent_level)}SoundPlay, {sound['filename']}")
output.append(f"{indent(indent_level)}return")
indent_level -= 1
output.append("")
output.append(f"{indent(indent_level)}; Default!")
output.append(f"{indent(indent_level)}default:")
indent_level += 1
output.append(f"{indent(indent_level)}SoundPlay, {default['filename']}")
output.append(f"{indent(indent_level)}return")
indent_level -= 1
output.append("")
output.append(f"{indent(indent_level)}" + "}")
output.append(f"{indent(indent_level)}return")
indent_level -= 1
output.append("")
output.append(f"{indent(indent_level)}" + "}")
out = "\n".join(output) + "\n"
with open(output_file, "w") as f:
f.write(out)
if __name__ == "__main__":
main()
| 29.615385 | 87 | 0.612662 | 403 | 3,080 | 4.506203 | 0.287841 | 0.145374 | 0.140419 | 0.146476 | 0.409692 | 0.390969 | 0.355727 | 0.331498 | 0.30837 | 0.285242 | 0 | 0.009063 | 0.247727 | 3,080 | 103 | 88 | 29.902913 | 0.774709 | 0.154545 | 0 | 0.298507 | 0 | 0 | 0.260601 | 0.178489 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.014925 | 0.014925 | 0.059701 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e93915ecdb2cdb619894439c631119916b71978 | 1,822 | py | Python | 1047.remove-all-adjacent-duplicates-in-string.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 1047.remove-all-adjacent-duplicates-in-string.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 1047.remove-all-adjacent-duplicates-in-string.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | # coding=utf-8
#
# @lc app=leetcode id=1047 lang=python
#
# [1047] Remove All Adjacent Duplicates In String
#
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string/description/
#
# algorithms
# Easy (63.77%)
# Likes: 184
# Dislikes: 18
# Total Accepted: 21.7K
# Total Submissions: 34K
# Testcase Example: '"abbaca"'
#
# Given a string S of lowercase letters, a duplicate removal consists of
# choosing two adjacent and equal letters, and removing them.
#
# We repeatedly make duplicate removals on S until we no longer can.
#
# Return the final string after all such duplicate removals have been made. It
# is guaranteed the answer is unique.
#
#
#
# Example 1:
#
#
# Input: "abbaca"
# Output: "ca"
# Explanation:
# For example, in "abbaca" we could remove "bb" since the letters are adjacent
# and equal, and this is the only possible move. The result of this move is
# that the string is "aaca", of which only "aa" is possible, so the final
# string is "ca".
#
#
#
#
# Note:
#
#
# 1 <= S.length <= 20000
# S consists only of English lowercase letters.
#
#
class Solution(object):
def removeDuplicates(self, S):
"""
:type S: str
:rtype: str
"""
while True:
index = 0
current = ""
flag = False
while index < len(S):
if index < len(S) - 1:
if S[index] == S[index+1]:
index += 2
flag = True
continue
current += S[index]
index += 1
if not flag:
return current
S = current
# if __name__ == "__main__":
# s = Solution()
# print s.removeDuplicates("abbaca")
# print s.removeDuplicates("azxxzy")
| 23.063291 | 85 | 0.57629 | 226 | 1,822 | 4.610619 | 0.544248 | 0.017274 | 0.03263 | 0.051823 | 0.067179 | 0.067179 | 0 | 0 | 0 | 0 | 0 | 0.02818 | 0.318332 | 1,822 | 78 | 86 | 23.358974 | 0.810789 | 0.634468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e99fc63ab0f76d9f24c67f04cff7cdb44f433f8 | 3,444 | py | Python | v1_fastecdsa/bsgs_algo_modified.py | Chesterfield007/bsgs | dc3a7ab73ca7b385d25f7b486e8efa293c4e3bf8 | [
"MIT"
] | 55 | 2021-01-15T17:01:10.000Z | 2022-03-26T20:15:33.000Z | v1_fastecdsa/bsgs_algo_modified.py | Chesterfield007/bsgs | dc3a7ab73ca7b385d25f7b486e8efa293c4e3bf8 | [
"MIT"
] | 53 | 2021-02-14T22:17:44.000Z | 2022-01-28T17:43:59.000Z | v1_fastecdsa/bsgs_algo_modified.py | Chesterfield007/bsgs | dc3a7ab73ca7b385d25f7b486e8efa293c4e3bf8 | [
"MIT"
] | 33 | 2021-02-19T13:30:49.000Z | 2022-01-12T06:58:07.000Z | # -*- coding: utf-8 -*-
"""
@author: iceland
"""
import bit
import time
import random
import os
from fastecdsa import curve
from fastecdsa.point import Point
bs_file = 'baby_steps_table.txt'
def Pub2Point(public_key):
x = int(public_key[2:66],16)
if len(public_key) < 70:
y = bit.format.x_to_y(x, int(public_key[:2],16)%2)
else:
y = int(public_key[66:],16)
return Point(x, y, curve=curve.secp256k1)
###############################################################################
# Puzzle 46 : Privatekey Key 0x2ec18388d544
public_key = '04fd5487722d2576cb6d7081426b66a3e2986c1ce8358d479063fb5f2bb6dd5849a004626dffa0fb7b934118ea84bacc3b030332eee083010efa60025e4fde7297'
Q = Pub2Point(public_key)
G = curve.secp256k1.G
###############################################################################
def create_table(m):
# create a table: f(x) => G * x
P = G
baby_steps = []
for x in range(m):
baby_steps.append(P.x)
P = P + G
return baby_steps
###############################################################################
m = 10000000 # default value
valid = os.path.isfile(bs_file)
if valid == True:
print('\nFound the Baby Steps Table file: '+bs_file+'. Will be used directly')
baby_steps = {int(line.split()[0],10):k for k, line in enumerate(open(bs_file,'r'))}
if m != len(baby_steps) and not len(baby_steps) == 0:
m = len(baby_steps)
print('Taken from table. m is adjusted to = ', m)
if len(baby_steps) == 0 :
print('Size of the file was 0. It will be created and overwritten')
valid = False
if valid == False:
print('\nNot Found '+bs_file+'. Will Create This File Now. \
\nIt will save to this file in the First Run. Next run will directly read from this file.')
out = open(bs_file,'w')
baby_steps = create_table(m)
for line in baby_steps: out.write(str(line) + '\n')
out.close()
baby_steps = {line:k for k, line in enumerate(baby_steps)}
# We have to solve P = k.G, we know that k lies in the range ]k1,k2]
# k1 = random.randint(1, curve.secp256k1.q//2) # if you want to start from a random key
k1 = 1 # if you want to start from 1
k2 = k1 + m*m
print('Checking {0} keys from {1}'.format(m*m, hex(k1)))
# m = math.floor(math.sqrt(k2-k1))
# start time
st = time.time()
###############################################################################
k1G = k1 * G
mG = m * G
def findkey(onePoint):
S = onePoint - k1G
if S == Point.IDENTITY_ELEMENT: return k1 # Point at Infinity
found = False
step = 0
while found is False and step<(1+k2-k1):
if S.x in baby_steps:
# b = baby_steps.index(S.x) # if using list
b = baby_steps.get(S.x)
found = True
break
else:
# Giant step
S = S - mG
step = step + m
if found == True:
final_key = k1 + step + b + 1
else:
final_key = -1
return final_key
###############################################################################
final_key = findkey(Q)
if final_key >0:
print("BSGS FOUND PrivateKey : {0}".format(hex(final_key)))
else:
print('PrivateKey Not Found')
print("Time Spent : {0:.2f} seconds".format(time.time()-st))
| 32.8 | 146 | 0.525842 | 455 | 3,444 | 3.89011 | 0.323077 | 0.086441 | 0.027119 | 0.014689 | 0.061017 | 0.045198 | 0 | 0 | 0 | 0 | 0 | 0.066693 | 0.255517 | 3,444 | 104 | 147 | 33.115385 | 0.623635 | 0.126597 | 0 | 0.054795 | 0 | 0.013699 | 0.16928 | 0.052272 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041096 | false | 0 | 0.082192 | 0 | 0.164384 | 0.109589 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1e9d25884d2e5c0101e720dedd1422a914d3a51f | 6,921 | py | Python | utils/search_methods/improved_genetic_algorithm.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | null | null | null | utils/search_methods/improved_genetic_algorithm.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | null | null | null | utils/search_methods/improved_genetic_algorithm.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# coding=UTF-8
"""
@Author: WEN Hao
@LastEditors: WEN Hao
@Description:
@Date: 2021-09-09
@LastEditTime: 2022-04-17
文章Natural Language Adversarial Attack and Defense in Word Level中提出的遗传算法
注意!中文因为切词,可能进行 crossover 的操作之后,词的数目发生变化,
目前暂时把搜索范围截断为两个句子词数目较小的句子的词数目。
"""
from typing import NoReturn, List, Tuple
import numpy as np
from .genetic_algorithm_base import GeneticAlgorithmBase
from .population_based_search import PopulationMember
from ..attacked_text import AttackedText
from ..goal_functions import GoalFunctionResult
from ..misc import DEFAULTS
__all__ = [
"ImprovedGeneticAlgorithm",
]
class ImprovedGeneticAlgorithm(GeneticAlgorithmBase):
""" """
__name__ = "ImprovedGeneticAlgorithm"
def __init__(
self,
pop_size: int = 60,
max_iters: int = 20,
temp: float = 0.3,
give_up_if_no_improvement: bool = False,
post_crossover_check: bool = True,
max_crossover_retries: int = 20,
max_replace_times_per_index: int = 5,
) -> NoReturn:
"""
Args:
pop_size: The population size. Defaults to 20.
max_iters: The maximum number of iterations to use. Defaults to 50.
temp: Temperature for softmax function used to normalize probability dist when sampling parents.
Higher temperature increases the sensitivity to lower probability candidates.
give_up_if_no_improvement: If True, stop the search early if no candidate that improves the score is found.
post_crossover_check: If True, check if child produced from crossover step passes the constraints.
max_crossover_retries: Maximum number of crossover retries if resulting child fails to pass the constraints.
Applied only when `post_crossover_check` is set to `True`.
Setting it to 0 means we immediately take one of the parents at random as the child upon failure.
max_replace_times_per_index: Maximum times words at the same index can be replaced in this algorithm.
"""
super().__init__(
pop_size=pop_size,
max_iters=max_iters,
temp=temp,
give_up_if_no_improvement=give_up_if_no_improvement,
post_crossover_check=post_crossover_check,
max_crossover_retries=max_crossover_retries,
)
self.max_replace_times_per_index = max_replace_times_per_index
def _modify_population_member(
self,
pop_member: PopulationMember,
new_text: AttackedText,
new_result: GoalFunctionResult,
word_idx: int,
) -> PopulationMember:
"""Modify `pop_member` by returning a new copy with `new_text`,
`new_result`, and `num_replacements_left` altered appropriately for given `word_idx`"""
num_replacements_left = np.copy(pop_member.attributes["num_replacements_left"])
num_replacements_left[word_idx] -= 1
return PopulationMember(
new_text,
result=new_result,
attributes={"num_replacements_left": num_replacements_left},
)
def _get_word_select_prob_weights(self, pop_member: PopulationMember) -> int:
"""Get the attribute of `pop_member` that is used for determining
probability of each word being selected for perturbation."""
return pop_member.attributes["num_replacements_left"]
def _crossover_operation(
self,
pop_member1: PopulationMember,
pop_member2: PopulationMember,
) -> Tuple[AttackedText, dict]:
"""Actual operation that takes `pop_member1` text and `pop_member2`
text and mixes the two to generate crossover between `pop_member1` and `pop_member2`.
Args:
pop_member1: The first population member.
pop_member2: The second population member.
Returns:
Tuple of `AttackedText` and a dictionary of attributes.
"""
indices_to_replace = []
words_to_replace = []
num_replacements_left = np.copy(pop_member1.attributes["num_replacements_left"])
# print("num_replacements_left:", num_replacements_left)
# print("num_replacements_left.shape:", num_replacements_left.shape)
# To better simulate the reproduction and biological crossover,
# IGA randomly cut the text from two parents and concat two fragments into a new text
# rather than randomly choose a word of each position from the two parents.
end_point = min(
pop_member1.num_words, pop_member2.num_words, len(num_replacements_left)
)
crossover_point = DEFAULTS.RNG.integers(0, end_point)
# print(f"crossover_point, pop_member1.num_words, pop_member2.num_words: {crossover_point, pop_member1.num_words, pop_member2.num_words}")
# if pop_member1.num_words != pop_member2.num_words:
# print(f"pop_member1: {pop_member1.attacked_text.text}")
# print(f"pop_member2: {pop_member2.attacked_text.text}")
end_point = max(crossover_point, end_point)
for i in range(crossover_point, end_point):
indices_to_replace.append(i)
words_to_replace.append(pop_member2.words[i])
num_replacements_left[i] = pop_member2.attributes["num_replacements_left"][
i
]
new_text = pop_member1.attacked_text.replace_words_at_indices(
indices_to_replace, words_to_replace
)
return new_text, {"num_replacements_left": num_replacements_left}
def _initialize_population(
self,
initial_result: GoalFunctionResult,
pop_size: int,
) -> List[PopulationMember]:
"""
Initialize a population of size `pop_size` with `initial_result`
Args:
initial_result: Original text
pop_size: size of population
Returns:
population as `list[PopulationMember]`
"""
words = initial_result.attacked_text.words
# For IGA, `num_replacements_left` represents the number of times the word at each index can be modified
num_replacements_left = np.array(
[self.max_replace_times_per_index] * len(words)
)
population = []
# IGA initializes the first population by replacing each word by its optimal synonym
for idx in range(len(words)):
pop_member = PopulationMember(
initial_result.attacked_text,
initial_result,
attributes={"num_replacements_left": np.copy(num_replacements_left)},
)
pop_member = self._perturb(pop_member, initial_result, index=idx)
population.append(pop_member)
return population[:pop_size]
def extra_repr_keys(self) -> List[str]:
return super().extra_repr_keys() + ["max_replace_times_per_index"]
| 40.238372 | 146 | 0.675625 | 828 | 6,921 | 5.353865 | 0.292271 | 0.074442 | 0.094293 | 0.024363 | 0.18836 | 0.138055 | 0.089781 | 0.0388 | 0.022558 | 0.022558 | 0 | 0.011036 | 0.253721 | 6,921 | 171 | 147 | 40.473684 | 0.847241 | 0.4063 | 0 | 0.043956 | 0 | 0 | 0.058009 | 0.058009 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065934 | false | 0 | 0.076923 | 0.010989 | 0.21978 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ea33dcbed661949c9cfc5e0d749a5bbbbaa4357 | 1,595 | py | Python | tests/operations/test_set_op.py | andypalmer9669/74_series_computer | 0b8a4776b49a2380a51863634b48bcc441bf74ec | [
"MIT"
] | null | null | null | tests/operations/test_set_op.py | andypalmer9669/74_series_computer | 0b8a4776b49a2380a51863634b48bcc441bf74ec | [
"MIT"
] | 46 | 2019-02-22T16:46:02.000Z | 2020-03-08T20:26:37.000Z | tests/operations/test_set_op.py | andypalmer9669/74_series_computer | 0b8a4776b49a2380a51863634b48bcc441bf74ec | [
"MIT"
] | null | null | null | import pytest
from eight_bit_computer.operations import set_op
from eight_bit_computer.data_structures import get_machine_code_byte_template
from eight_bit_computer.exceptions import OperationParsingError
def generate_parse_line_test_data():
ret = []
test_input = ""
expected = []
ret.append((test_input, expected))
test_input = " \t"
expected = []
ret.append((test_input, expected))
test_input = "LOAD [#123] A"
expected = []
ret.append((test_input, expected))
test_input = "SET A #123"
mc_0 = get_machine_code_byte_template()
mc_0["byte_type"] = "instruction"
mc_0["bitstring"] = "00111001"
mc_1 = get_machine_code_byte_template()
mc_1["byte_type"] = "constant"
mc_1["constant"] = "#123"
ret.append((test_input, [mc_0, mc_1]))
test_input = " SET B $monkey "
mc_0 = get_machine_code_byte_template()
mc_0["byte_type"] = "instruction"
mc_0["bitstring"] = "00111010"
mc_1 = get_machine_code_byte_template()
mc_1["byte_type"] = "constant"
mc_1["constant"] = "$monkey"
ret.append((test_input, [mc_0, mc_1]))
return ret
@pytest.mark.parametrize(
"test_input,expected", generate_parse_line_test_data()
)
def test_parse_line(test_input, expected):
assert set_op.parse_line(test_input) == expected
@pytest.mark.parametrize("test_input", [
"SET",
"SET A",
"SET A B",
"SET #123",
"SET BLAH #123",
"SET A #123 FOO",
])
def test_parse_line_raises(test_input):
with pytest.raises(OperationParsingError):
set_op.parse_line(test_input)
| 25.725806 | 77 | 0.676489 | 219 | 1,595 | 4.557078 | 0.228311 | 0.144289 | 0.119238 | 0.09018 | 0.638277 | 0.467936 | 0.421844 | 0.421844 | 0.244489 | 0.244489 | 0 | 0.038911 | 0.194357 | 1,595 | 61 | 78 | 26.147541 | 0.737743 | 0 | 0 | 0.333333 | 0 | 0 | 0.165517 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.0625 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ea3c749e4408cf0fa0ebae421190ee713d5bf51 | 10,098 | py | Python | src/models/ToothModel.py | arminnh/ma2-computer-vision | 9e931c87e097cd444ea292cdfff727636f003c36 | [
"MIT"
] | null | null | null | src/models/ToothModel.py | arminnh/ma2-computer-vision | 9e931c87e097cd444ea292cdfff727636f003c36 | [
"MIT"
] | null | null | null | src/models/ToothModel.py | arminnh/ma2-computer-vision | 9e931c87e097cd444ea292cdfff727636f003c36 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.spatial.distance
from scipy import linalg
import procrustes_analysis
import util
from Landmark import Landmark
from models.CenterInitializationModel import CenterInitializationModel
class ToothModel:
def __init__(self, name, landmarks, pcaComponents, sampleAmount):
self.name = name
self.landmarks = landmarks
self.preprocessedLandmarks = []
self.meanLandmark = None # type: Landmark
self.meanTheta = None
self.meanScale = None
self.pcaComponents = pcaComponents
self.eigenvalues = np.array([])
self.eigenvectors = np.array([])
self.sampleAmount = sampleAmount
self.meanProfilesForLandmarkPoints = {}
self.grayLevelModelCovarianceMatrices = {}
self.initializationModel = CenterInitializationModel(landmarks, 28) # TODO
def doProcrustesAnalysis(self):
# procrustes_analysis.drawLandmarks(self.landmarks, "before")
self.preprocessedLandmarks, self.meanLandmark, self.meanScale, self.meanTheta \
= procrustes_analysis.performProcrustesAnalysis(self.landmarks)
# procrustes_analysis.drawLandmarks(self.preprocessedLandmarks, "after")
return self
def getTranslatedMean(self, x, y):
""" Returns the mean landmark translated to x and y. """
return self.meanLandmark.translate(x, y)
def getTranslatedAndInverseScaledMean(self, x, y):
""" Returns the mean landmark rescaled back from unit variance (after procrustes) and translated to x and y. """
return self.meanLandmark.scale(self.meanScale * 0.75).translate(x, y)
def doPCA(self):
""" Perform PCA on the landmarks after procrustes analysis and store the eigenvalues and eigenvectors. """
data = [l.points for l in self.preprocessedLandmarks]
data.append(data[0])
S = np.cov(np.transpose(data))
eigenvalues, eigenvectors = np.linalg.eig(S)
sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]
self.eigenvalues = eigenvalues[sorted_values]
self.eigenvectors = eigenvectors[:, sorted_values]
# print(self.eigenvalues)
return self
def getShapeParametersForLandmark(self, landmark):
b = self.eigenvectors.T @ (landmark.points - self.meanLandmark.points)
return b.reshape((self.pcaComponents, -1))
def reconstructLandmarkForShapeParameters(self, b):
return Landmark(self.meanLandmark.points + (self.eigenvectors @ b).flatten())
def buildGrayLevelModels(self):
"""
Builds gray level models for each of the tooth's landmark points.
Build gray level models for each of the mean landmark points by averaging the gray level profiles for each
point of each landmark.
"""
self.grayLevelModelCovarianceMatrices = {}
self.meanProfilesForLandmarkPoints = {}
# Build gray level model for each landmark
for i, landmark in enumerate(self.landmarks):
# Get the gray level profiles for each of the 40 landmark points
normalizedGrayLevelProfiles = landmark.normalizedGrayLevelProfilesForLandmarkPoints(
img=landmark.getCorrectRadiographPart(),
grayLevelModelSize=self.sampleAmount
)
for j, normalizedProfile in normalizedGrayLevelProfiles.items():
if j not in self.meanProfilesForLandmarkPoints:
self.meanProfilesForLandmarkPoints[j] = []
self.meanProfilesForLandmarkPoints[j].append(normalizedProfile)
for pointIdx in range(len(self.meanProfilesForLandmarkPoints)):
cov = np.cov(np.transpose(self.meanProfilesForLandmarkPoints[pointIdx]))
self.grayLevelModelCovarianceMatrices[pointIdx] = linalg.pinv(cov)
# Replace each point's list of gray level profiles by their means
self.meanProfilesForLandmarkPoints[pointIdx] = np.mean(self.meanProfilesForLandmarkPoints[pointIdx], axis=0)
return self
def mahalanobisDistance(self, normalizedGrayLevelProfile, landmarkPointIndex):
"""
Returns the squared Mahalanobis distance of a new gray level profile from the built gray level model with index
landmarkPointIndex.
"""
Sp = self.grayLevelModelCovarianceMatrices[landmarkPointIndex]
pMinusMeanTrans = (normalizedGrayLevelProfile - self.meanProfilesForLandmarkPoints[landmarkPointIndex])
return pMinusMeanTrans.T @ Sp @ pMinusMeanTrans
def findBetterFittingLandmark(self, img, landmark):
"""
Active Shape Model Algorithm: An iterative approach to improving the fit of an instance X.
Returns a landmark that is a better fit on the image than the given according to the gray level pointProfiles of
points of the landmark and the mahalanobis distance.
"""
# Examine a region of the image around each point X_i to find the best nearby match for the point X_i'
# Get the gray level pointProfiles of points on normal lines of the landmark's points
profilesForLandmarkPoints = landmark.getGrayLevelProfilesForNormalPoints(
img=img,
grayLevelModelSize=self.sampleAmount,
sampleAmount=self.sampleAmount,
derive=True
)
bestPoints = []
# landmarkPointIdx = the points 0 to 39 on the landmark
for landmarkPointIdx in range(len(profilesForLandmarkPoints)):
# landmarkPointProfiles = list of {grayLevelProfile, normalPoint, grayLevelProfilePoints}
landmarkPointProfiles = profilesForLandmarkPoints[landmarkPointIdx]
distances = []
for profileContainer in landmarkPointProfiles:
grayLevelProfile = profileContainer["grayLevelProfile"]
normalPoint = profileContainer["normalPoint"]
d = self.mahalanobisDistance(grayLevelProfile, landmarkPointIdx)
distances.append((abs(d), normalPoint))
# print("Mahalanobis dist: {:.2f}, p: {}".format(abs(d), normalPoint))
bestPoints.append(min(reversed(distances), key=lambda x: x[0])[1])
landmark = landmark.copy(np.asarray(bestPoints).flatten())
# Find the pose parameters that best fit the new found points X
landmark, (translateX, translateY), scale, theta = landmark.superimpose(self.meanLandmark)
# Apply constraints to the parameters b to ensure plausible shapes
b = self.getShapeParametersForLandmark(landmark)
# Constrain the shape parameters to lie within certain limits
for i in range(len(b)):
limit = 2 * np.sqrt(abs(self.eigenvalues[i]))
b[i] = np.clip(b[i], -limit, limit)
return self.reconstructLandmarkForShapeParameters(b) \
.rotate(-theta).scale(scale).translate(-translateX, -translateY)
def matchModelPointsToTargetPoints(self, landmarkY):
"""
A simple iterative approach towards finding the best pose and shape parameters to match a model instance X to a
new set of image points Y.
"""
b = np.zeros((self.pcaComponents, 1))
diff = float("inf")
translateX = 0
translateY = 0
theta = 0
scale = 0
while diff > 1e-9:
# Generate model points using x = x' + Pb
x = self.reconstructLandmarkForShapeParameters(b)
# Project Y into the model coordinate frame by superimposition
# and get the parameters of the transformation
y, (translateX, translateY), scale, theta = landmarkY.superimpose(x)
# Update the model parameters b
newB = self.getShapeParametersForLandmark(y)
diff = scipy.spatial.distance.euclidean(b, newB)
b = newB
return self.reconstructLandmarkForShapeParameters(b) \
.rotate(-theta).scale(scale).translate(-translateX, -translateY)
def reconstruct(self, landmark):
"""
Reconstructs a landmark.
Be sure to create b for a preprocessed landmark. PCA is done on preprocessed landmarks.
"""
procrustes_analysis.plotLandmarks([self.meanLandmark], "self.meanLandmark")
superimposed, (translateX, translateY), scale, theta = landmark.superimpose(self.meanLandmark)
procrustes_analysis.plotLandmarks([superimposed], "superimposed landmark")
b = self.getShapeParametersForLandmark(superimposed)
reconstructed = self.reconstructLandmarkForShapeParameters(b)
procrustes_analysis.plotLandmarks([reconstructed], "reconstructed landmark")
reconstructed = reconstructed.rotate(-theta).scale(scale).translate(-translateX, -translateY)
procrustes_analysis.plotLandmarks([landmark, reconstructed],
"original + reconstructed and inverse transformed landmark")
print("shape b = {}, shape eigenvectors = {}".format(b.shape, self.eigenvectors.shape))
return reconstructed
def buildModels(radiographs, PCAComponents, sampleAmount):
# 1.1 Load the provided landmarks into your program
landmarks = []
for radiograph in radiographs:
landmarks += list(radiograph.landmarks.values())
# 1.2 Pre-process the landmarks to normalize translation, rotation, and scale differences
models = []
for t in util.TEETH:
models.append(
ToothModel(
name=t,
landmarks=[l for l in landmarks if l.toothNumber == t],
pcaComponents=PCAComponents,
sampleAmount=sampleAmount,
)
.buildGrayLevelModels()
.doProcrustesAnalysis()
)
# 1.3 Analyze the data using a Principal Component Analysis (PCA), exposing shape class variations
for model in models:
model.doPCA()
# model.reconstruct()
# Build gray level model for each point of the mean landmarks of the models
return models
| 42.970213 | 120 | 0.672509 | 997 | 10,098 | 6.794383 | 0.271815 | 0.014615 | 0.007086 | 0.005314 | 0.109536 | 0.109536 | 0.084145 | 0.060526 | 0.02982 | 0.02982 | 0 | 0.004095 | 0.250248 | 10,098 | 234 | 121 | 43.153846 | 0.890635 | 0.25005 | 0 | 0.080882 | 0 | 0 | 0.025003 | 0 | 0 | 0 | 0 | 0.004274 | 0 | 1 | 0.095588 | false | 0 | 0.051471 | 0.007353 | 0.242647 | 0.007353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ea6959c71f84b4fbb20cdee1843fe3bbf5d0e59 | 4,169 | py | Python | ImageProcessingScripts/Automated Pixel Sorting/automated_pixel_sorting.py | rafaelapcruz/Awesome_Python_Scripts | 6493ce989c1bcd707a10836f09462916388476a1 | [
"MIT"
] | 1 | 2021-12-07T17:37:56.000Z | 2021-12-07T17:37:56.000Z | ImageProcessingScripts/Automated Pixel Sorting/automated_pixel_sorting.py | rafaelapcruz/Awesome_Python_Scripts | 6493ce989c1bcd707a10836f09462916388476a1 | [
"MIT"
] | null | null | null | ImageProcessingScripts/Automated Pixel Sorting/automated_pixel_sorting.py | rafaelapcruz/Awesome_Python_Scripts | 6493ce989c1bcd707a10836f09462916388476a1 | [
"MIT"
] | null | null | null | import os
import sys
import getopt
import random as r
from PIL import Image
from pixelsort import pixelsort
def randomize_params():
angle = r.randint(90, 359)
# --- DEFINE ALL PARAMETERS ---
params = {
1: 'interval_function',
2: 'randomness',
3: 'lower_threshold',
4: 'upper_threshold',
5: 'sorting_function',
}
# --- RANDOMIZE COUNT AND CHOICE OF PARAMS ---
param_count = r.randint(1, 5)
selected_params = []
for _ in range(param_count):
param_choice = r.randint(1, 5)
if params[param_choice] not in selected_params:
selected_params.append(params[param_choice])
# --- SET DEFAULTS FOR PARAMS ---
args = {}
args['angle'] = angle
args['interval_function'] = 'threshold'
args['lower_threshold'] = 0.5
args['upper_threshold'] = 0.8
args['randomness'] = 0.5
args['sorting_function'] = 'lightness'
# --- UPDATE WITH RANDOMIZED VALUES ---
for param in selected_params:
if param == 'interval_function':
interval_fns = ['random', 'threshold', 'waves']
args['interval_function'] = r.choice(interval_fns)
elif param == 'randomness':
args['randomness'] = r.uniform(0.5, 1)
elif param == 'sorting_function':
sorting_fns = ['lightness', 'hue', 'saturation', 'intensity', 'minimum']
args['sorting_function'] = r.choice(sorting_fns)
elif param == 'lower_threshold':
args['lower_threshold'] = r.uniform(0.5, 1)
elif param == 'upper_threshold':
up_thresh = r.uniform(0.6, 1)
if up_thresh <= args['lower_threshold']:
up_thresh += r.uniform(0.1, 1 - args['lower_threshold'])
args['upper_threshold'] = up_thresh
elif args['upper_threshold'] - args['lower_threshold'] < 0.25:
args['lower_threshold'] -= 0.25
return args
def perform_sorting(args, img):
# --- PERFORM PIXELSORT WITH RANDOMIZED PARAMS ---
new_img = pixelsort(
image = img,
angle = args['angle'],
interval_function = args['interval_function'],
lower_threshold = args['lower_threshold'],
upper_threshold = args['upper_threshold'],
randomness = args['randomness'],
sorting_function = args['sorting_function']
)
return new_img
def Main():
# --- DEFINE ARGS AND SET DEFAULTS ---
count = 0
in_path = 'images/'
out_path = 'generated/'
argument_list = sys.argv[1:]
options = 'hi:n:'
# --- DEFINE TERMINAL ARG OPERATIONS ---
try:
args, _ = getopt.getopt(argument_list, options)
for current_argument, current_value in args:
if current_argument in ('-h'):
print('-'*30)
print('-h : args description')
print('-i : pass location of input img-file')
print('-n : number of outputs required')
print('-'*30)
if current_argument in ('-i'):
print('-'*30)
in_path += current_value
print(f'[+] Input-file: {in_path}')
if current_argument in ('-n'):
count = int(current_value)
print(f'[+] Output-Count: {current_value}')
print('-'*30)
except getopt.error as error:
print(str(error))
# --- DELETE PREVIOUS RESULTS ---
for file in os.listdir(out_path):
os.remove(os.path.join(out_path, file))
# --- GENERATE 'N=COUNT' INSTANCES ---
for index in range(count):
# --- CALL PARAMETER FUNCTION ---
args = randomize_params()
# --- PRINT RANDOMIZED CHOICES ---
for arg in args.items():
print(arg[0], ':', arg[1])
# --- DEFINE LOCATIONS FOR LOAD AND SAVE ---
in_file = in_path
out_file = out_path + f'result-0{index + 1}.png'
img = Image.open(in_file)
# --- CALL SORT FUNCTION ---
new_img = perform_sorting(args, img)
# --- SAVE NEW FILE ---
new_img.save(out_file)
print('-'*30)
if __name__ == "__main__":
Main()
| 32.317829 | 84 | 0.562725 | 474 | 4,169 | 4.767932 | 0.267932 | 0.061947 | 0.055752 | 0.047788 | 0.103982 | 0.040708 | 0.017699 | 0 | 0 | 0 | 0 | 0.018512 | 0.300312 | 4,169 | 129 | 85 | 32.317829 | 0.756256 | 0.118734 | 0 | 0.051546 | 0 | 0 | 0.201366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030928 | false | 0.010309 | 0.061856 | 0 | 0.113402 | 0.123711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ea73bdb618160f1e19273aa3d6ae48a8b2062e6 | 1,434 | py | Python | curl-monitor.py | geekvikas/hello-kubernetes | cdb94ef83adcb9ab4169f67137638aadc3d7509c | [
"MIT"
] | null | null | null | curl-monitor.py | geekvikas/hello-kubernetes | cdb94ef83adcb9ab4169f67137638aadc3d7509c | [
"MIT"
] | null | null | null | curl-monitor.py | geekvikas/hello-kubernetes | cdb94ef83adcb9ab4169f67137638aadc3d7509c | [
"MIT"
] | null | null | null | import requests
import time
import subprocess as sp
start_time = 0
running_instances = {}
errCount = 0
reqCount = 0
def printStatus(hostname,version):
global running_instances
global start_time, errCount, reqCount
elapsed_time = int(time.time() - start_time)
# if (elapsed_time % 2) == 0 :
string_to_print = "\n"
for x in running_instances:
string_to_print += "\tVersion: " + x + ", Instances: " + str(len(running_instances[x])) + "\n"
sp.call('clear',shell=True)
print(f"Instances seen in past {elapsed_time} seconds ", string_to_print + "Requests: " + str(reqCount) + ", Errors: " + str(errCount))
if elapsed_time >=30:
start_time = time.time()
running_instances = {}
errCount = 0
reqCount = 0
if running_instances and version in running_instances:
if not hostname in running_instances[version]:
running_instances[version].append(hostname)
else:
running_instances[version] = [hostname]
def main():
URL = "http://10.104.84.249"
global start_time, errCount, reqCount
start_time = time.time()
while True:
try:
res = requests.get(url = URL)
data = res.json()
printStatus(data["os"]["name"],data["version"])
reqCount+=1
except:
errCount+=1
time.sleep(0.005)
if __name__ == '__main__':
main() | 26.072727 | 139 | 0.608787 | 171 | 1,434 | 4.906433 | 0.374269 | 0.190703 | 0.046484 | 0.059595 | 0.154946 | 0.081049 | 0 | 0 | 0 | 0 | 0 | 0.023969 | 0.272664 | 1,434 | 55 | 140 | 26.072727 | 0.780441 | 0.019526 | 0 | 0.243902 | 0 | 0 | 0.099644 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0 | 0.121951 | 0.121951 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ea7b89e8805b8150979e44b372cf9c7862d376d | 4,352 | py | Python | utils/test_utils.py | kedartatwawadi/stanford_compression_library | a472a81c71aeb81e181421e42aed75971a1cf4be | [
"MIT"
] | 2 | 2022-01-10T06:18:47.000Z | 2022-01-10T06:18:55.000Z | utils/test_utils.py | kedartatwawadi/stanford_compression_library | a472a81c71aeb81e181421e42aed75971a1cf4be | [
"MIT"
] | 20 | 2021-10-17T23:40:52.000Z | 2022-03-08T04:35:42.000Z | utils/test_utils.py | kedartatwawadi/stanford_compression_library | a472a81c71aeb81e181421e42aed75971a1cf4be | [
"MIT"
] | null | null | null | """
Utility functions useful for testing
"""
import filecmp
from typing import Tuple
from core.data_block import DataBlock
from core.data_stream import TextFileDataStream
from core.data_encoder_decoder import DataDecoder, DataEncoder
from core.prob_dist import ProbabilityDist
from utils.bitarray_utils import BitArray, get_random_bitarray
import tempfile
import os
import numpy as np
def get_random_data_block(prob_dist: ProbabilityDist, size: int, seed: int = None):
"""generates i.i.d random data from the given prob distribution
Args:
prob_dist (ProbabilityDist): input probability distribution
size (int): size of the block to be returned
seed (int): random seed used to generate the data
"""
rng = np.random.default_rng(seed)
data = rng.choice(prob_dist.alphabet, size=size, p=prob_dist.prob_list)
return DataBlock(data)
def create_random_text_file(file_path: str, file_size: int, prob_dist: ProbabilityDist):
"""creates a random text file at the given path
Args:
file_path (str): file path to which random data needs to be written
file_size (int): The size of the random file to be generated
prob_dist (ProbabilityDist): the distribution to use to generate the random data
"""
data_block = get_random_data_block(prob_dist, file_size)
with TextFileDataStream(file_path, "w") as fds:
fds.write_block(data_block)
def are_blocks_equal(data_block_1: DataBlock, data_block_2: DataBlock):
"""
return True is the blocks are equal
"""
if data_block_1.size != data_block_2.size:
return False
# check if the encoding/decoding was lossless
for inp_symbol, out_symbol in zip(data_block_1.data_list, data_block_2.data_list):
if inp_symbol != out_symbol:
return False
return True
def try_lossless_compression(
data_block: DataBlock,
encoder: DataEncoder,
decoder: DataDecoder,
add_extra_bits_to_encoder_output: bool = False,
) -> Tuple[bool, int, BitArray]:
"""Encodes the data_block using data_compressor and returns True if the compression was lossless
Args:
data_block (DataBlock): input data_block to encode
encoder (DataEncoder): Encoder obj
decoder (DataDecoder): Decoder obj to test with
append_extra_bits_to_encoder_output (bool, optional): This flag adds a random number of slack bits at the end of encoder output.
This is to test the scenario where we are concatenating multiple encoder outputs in the same bitstream.
Defaults to False.
Returns:
Tuple[bool, int, BitArray]: whether encoding is lossless, size of the output block, encoded bitarray
"""
# test encode
encoded_bitarray = encoder.encode_block(data_block)
# if True, add some random bits to the encoder output
encoded_bitarray_extra = BitArray(encoded_bitarray) # make a copy
if add_extra_bits_to_encoder_output:
num_extra_bits = int(np.random.randint(100))
encoded_bitarray_extra += get_random_bitarray(num_extra_bits)
# test decode
decoded_block, num_bits_consumed = decoder.decode_block(encoded_bitarray_extra)
assert num_bits_consumed == len(encoded_bitarray)
# compare blocks
return are_blocks_equal(data_block, decoded_block), num_bits_consumed, encoded_bitarray
def try_file_lossless_compression(
input_file_path: str, encoder: DataEncoder, decoder: DataDecoder, encode_block_size=1000
):
"""try encoding the input file and check if it is lossless
Args:
input_file_path (str): input file path
encoder (DataEncoder): encoder object
decoder (DataDecoder): decoder object
"""
with tempfile.TemporaryDirectory() as tmpdirname:
encoded_file_path = os.path.join(tmpdirname, "encoded_file.bin")
reconst_file_path = os.path.join(tmpdirname, "reconst_file.txt")
# encode data using the FixedBitWidthEncoder and write to the binary file
encoder.encode_file(input_file_path, encoded_file_path, block_size=encode_block_size)
# decode data using th eFixedBitWidthDecoder and write output to a text file
decoder.decode_file(encoded_file_path, reconst_file_path)
# check if the reconst file and input match
return filecmp.cmp(input_file_path, reconst_file_path)
| 36.881356 | 136 | 0.735064 | 607 | 4,352 | 5.041186 | 0.257002 | 0.05 | 0.021242 | 0.017647 | 0.111111 | 0.063399 | 0 | 0 | 0 | 0 | 0 | 0.003741 | 0.201517 | 4,352 | 117 | 137 | 37.196581 | 0.876835 | 0.398438 | 0 | 0.041667 | 0 | 0 | 0.013398 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.104167 | false | 0 | 0.208333 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eab2801fa3a5a8256f44b1bd99c1e958db0f033 | 3,094 | py | Python | objects/classical_storage.py | h-oll/QuNetSim | e3647e512182eda2a303085ded30c7d1e55a4e01 | [
"MIT"
] | null | null | null | objects/classical_storage.py | h-oll/QuNetSim | e3647e512182eda2a303085ded30c7d1e55a4e01 | [
"MIT"
] | 4 | 2021-02-02T23:10:37.000Z | 2022-02-11T03:47:21.000Z | objects/classical_storage.py | h-oll/QuNetSim | e3647e512182eda2a303085ded30c7d1e55a4e01 | [
"MIT"
] | null | null | null | import components.protocols as protocols
class ClassicalStorage(object):
"""
A classical storage for messages.
"""
def __init__(self):
self._host_to_msg_dict = {}
self._host_to_read_index = {}
def _add_new_host_id(self, host_id):
self._host_to_msg_dict[host_id] = []
self._host_to_read_index[host_id] = 0
def remove_all_ack(self, from_sender=None):
"""
Removes all ACK messages stored. If from sender is given, only ACKs from
this sender are removed.
Args:
from_sender (String): Host id of the sender, whos ACKs should be delted.
"""
def delete_all_ack_for_sender(sender_id):
for c, msg in enumerate(self._host_to_msg_dict[sender_id]):
if msg.content == protocols.ACK:
del self._host_to_msg_dict[sender_id][c]
if from_sender is None:
for sender in self._host_to_msg_dict.keys():
delete_all_ack_for_sender(sender)
elif from_sender in self._host_to_msg_dict.keys():
delete_all_ack_for_sender(from_sender)
else:
return
def add_msg_to_storage(self, message):
"""
Adds a message to the storage.
"""
sender_id = message.sender
if sender_id not in self._host_to_msg_dict.keys():
self._add_new_host_id(sender_id)
self._host_to_msg_dict[sender_id].append(message)
def get_all_from_sender(self, sender_id, delete=False):
"""
Get all stored messages from a sender. If delete option is set,
the returned messages are removed from the storage.
Args:
sender_id (String): The host id of the host.
delete (bool): optional, True if returned messages should be removed from storage.
Returns:
List of messages of the sender. If there are none, an empyt list is
returned.
"""
if delete:
raise ValueError("delete option not implemented yet!")
if sender_id in self._host_to_msg_dict:
return self._host_to_msg_dict[sender_id]
return []
def get_next_from_sender(self, sender_id):
"""
Gets the next, unread, message from the sender. If there isn't one,
None is returned.
Args:
sender_id (String): The sender id of the message to get.
Returns:
Message object, if such a message exists, or none.
"""
if sender_id not in self._host_to_msg_dict.keys():
return None
if len(self._host_to_msg_dict[sender_id]) <= self._host_to_read_index[sender_id]:
return None
msg = self._host_to_msg_dict[sender_id][self._host_to_read_index[sender_id]]
self._host_to_read_index[sender_id] += 1
return msg
def get_all(self):
"""
Returns all Messages as a list.
"""
ret = []
for host_id in self._host_to_msg_dict.keys():
ret += self._host_to_msg_dict[host_id]
return ret
| 33.268817 | 94 | 0.616031 | 433 | 3,094 | 4.066975 | 0.212471 | 0.0954 | 0.113572 | 0.110733 | 0.387848 | 0.31289 | 0.28109 | 0.172061 | 0.172061 | 0.156729 | 0 | 0.000932 | 0.306723 | 3,094 | 92 | 95 | 33.630435 | 0.820047 | 0.275048 | 0 | 0.090909 | 0 | 0 | 0.017068 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.022727 | 0 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eabccfd77a86f3f295b6a683297245028bed3b7 | 4,331 | py | Python | build.py | sug0/libgoimg | 46545f2a9a68a1063579e8b15c09fa8600128986 | [
"MIT"
] | 3 | 2019-02-26T13:41:46.000Z | 2019-05-02T16:43:13.000Z | build.py | sugoiuguu/libgoimg | 46545f2a9a68a1063579e8b15c09fa8600128986 | [
"MIT"
] | 1 | 2019-02-04T22:07:56.000Z | 2019-02-04T22:07:56.000Z | build.py | sugoiuguu/libgoimg | 46545f2a9a68a1063579e8b15c09fa8600128986 | [
"MIT"
] | null | null | null | import re
import os
from sys import exit, argv
from platform import machine
from functools import reduce
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
if os.name == 'nt':
program += '.exe'
fpath, fname = os.path.split(program)
if fpath and is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
def find_cc():
return os.getenv('CC') or which('cc') or which('gcc') or which('clang')
def transform_fmt(fmt):
return '-DGOIMG_COMPILE_FMT_%s' % fmt.upper()
def build_fmt_opts(files):
if len(argv) < 2:
return ''
opt = []
for fmt in argv[1].split('+'):
opt.append(transform_fmt(fmt))
files.append('fmt_'+fmt)
return ' '.join(opt)
def sys(*args):
cmd = ' '.join(args)
print(cmd)
code = os.system(cmd)
if code != 0:
exit(code)
def ppath(prefix, *args):
return '%s%s%s' % (prefix, os.sep, os.sep.join(args))
def arm_opts():
try:
with open('/proc/device-tree/model', 'r') as f:
model = f.read()
if model.find('Raspberry Pi 3') != -1:
return '-mcpu=cortex-a53 -mtune=cortex-a53 '
elif model.find('Raspberry Pi 2') != -1:
return '-mcpu=cortex-a7 -mfloat-abi=hard -mfpu=neon-vfpv4 '
elif model.find('Raspberry Pi ') != -1:
return '-mcpu=arm1176jzf-s -mfloat-abi=hard -mfpu=vfp '
elif model.find('Xunlong Orange Pi PC') != -1:
return '-mcpu=cortex-a7 -mtune=cortex-a7 -mfloat-abi=hard -mfpu=neon-vfpv4 '
else:
return ''
except FileNotFoundError:
return ''
def x86_opts():
try:
f = open('/proc/cpuinfo', 'r')
line = f.readline()
while line:
if line.find('flags') == 0:
break
line = f.readline()
f.close()
if not line:
return
opts = [
r'mmx\s',
r'avx\s',
r'avx2\s',
r'sse\s',
r'sse2\s',
r'sse3\s',
r'ssse3\s',
r'sse4\s',
r'sse4a\s',
r'sse4_1\s',
r'sse4_2\s',
]
for opt in opts:
if re.search(opt, line) != None:
yield '-m'+opt[:-2].replace('_', '.')
if opt == r'sse\s':
yield '-mfpmath=sse'
except FileNotFoundError:
return
# https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
# https://stackoverflow.com/questions/661338/sse-sse2-and-sse3-for-gnu-c/662250
# https://gist.github.com/fm4dd/c663217935dc17f0fc73c9c81b0aa845
# https://en.wikipedia.org/wiki/Uname
def optimized():
if os.getenv('UNOPTIMIZED'):
return ''
m = machine().upper()
either = lambda *args: reduce(lambda x,y: x or y, map(lambda x: m == x, map(str.upper, args)))
if either('aarch64', 'armv7l', 'armv6l'):
return arm_opts()
elif either('i686', 'i386', 'x86', 'x86_64', 'amd64'):
return ' '.join(x86_opts()) + ' '
else:
return ''
def build(install=None):
# change to source dir
os.chdir('src')
files = ['goio', 'allocator', 'color', 'image', 'util']
ccopt = '-std=c99 -pedantic -fPIC -Wall -O3 ' + optimized() + build_fmt_opts(files)
outlib = 'libgoimg.a'
objs = [f+'.o' for f in files]
cfiles = [f+'.c' for f in files]
# build .o files
cc = find_cc()
for f in cfiles:
sys(cc, ccopt, '-c', f)
# pack libgoimg.a
sys('ar rcs', outlib, *objs)
sys('ranlib', outlib)
# cleanup .o files
sys('rm -f *.o')
# install
if install:
hfiles = ' '.join(f+'.h' for f in files if f != 'util' and f[:4] != 'fmt_') + ' goimg.h'
sys('mkdir -p', ppath(install, 'include', 'goimg'))
sys('mkdir -p', ppath(install, 'lib'))
sys('cp libgoimg.a', ppath(install, 'lib'))
sys('cp', hfiles, ppath(install, 'include', 'goimg'))
if __name__ == '__main__':
if len(argv) > 1 and argv[1][:2] == '-i':
argv = argv[1:]
build(install=argv[0][3:])
else:
build()
| 27.585987 | 98 | 0.529439 | 581 | 4,331 | 3.884682 | 0.342513 | 0.008861 | 0.019495 | 0.026584 | 0.095702 | 0.030128 | 0.030128 | 0.030128 | 0 | 0 | 0 | 0.034563 | 0.305241 | 4,331 | 156 | 99 | 27.762821 | 0.71552 | 0.070423 | 0 | 0.140496 | 0 | 0.008264 | 0.173058 | 0.011205 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.041322 | 0.033058 | 0.297521 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eac22c7615b08f822c855110b5fa481a819bff8 | 273 | py | Python | sisbp_cat.py | naotohori/cafysis | 9d8534121c01ea75ae965cf39a1e307052ff8523 | [
"MIT"
] | 2 | 2022-02-25T17:32:41.000Z | 2022-03-31T14:38:55.000Z | sisbp_cat.py | naotohori/cafysis | 9d8534121c01ea75ae965cf39a1e307052ff8523 | [
"MIT"
] | 2 | 2020-05-03T08:36:10.000Z | 2021-01-27T12:40:50.000Z | sisbp_cat.py | naotohori/life-of-py | 9d8534121c01ea75ae965cf39a1e307052ff8523 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from cafysis.file_io.sisbp import SisbpFile
import sys
f = SisbpFile(sys.argv[1])
f.open_to_read()
while f.has_more_data():
pairs = f.read_onestep()
for p in pairs:
print(f" ({p[0]:d}, {p[1]:d})", end='')
print("")
f.close()
| 15.166667 | 47 | 0.611722 | 47 | 273 | 3.425532 | 0.680851 | 0.074534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013636 | 0.194139 | 273 | 17 | 48 | 16.058824 | 0.718182 | 0.07326 | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eac32e0121b7c0cf9d2b404b91b0f2a824ee7a2 | 535 | py | Python | sendemail.py | kenelite/pypractise | ef79ca760533096e6da89846e2974fa47a060440 | [
"MIT"
] | null | null | null | sendemail.py | kenelite/pypractise | ef79ca760533096e6da89846e2974fa47a060440 | [
"MIT"
] | null | null | null | sendemail.py | kenelite/pypractise | ef79ca760533096e6da89846e2974fa47a060440 | [
"MIT"
] | null | null | null | #/usr/bin/env python
import smtplib
mail_server = 'localhost'
mail_server_port = 25
from_addr = 'sender@devcentos.example.com'
to_addr = 'root@devcentos.example.com'
from_header = 'From: %s\r\n' % from_addr
to_header = 'To: %s\r\n\r\n' % to_addr
subject_header = 'Subject: nothing interesting'
body = 'This is not-very-interestng email.'
email_message = '%s\n%s\n%s\n\n%s' %(from_header, to_header, subject_header, body)
s = smtplib.SMTP(mail_server, mail_server_port)
s.sendmail(from_addr, to_addr, email_message)
s.quit()
| 22.291667 | 84 | 0.73271 | 90 | 535 | 4.133333 | 0.411111 | 0.107527 | 0.075269 | 0.021505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004255 | 0.121495 | 535 | 23 | 85 | 23.26087 | 0.787234 | 0.035514 | 0 | 0 | 0 | 0 | 0.324272 | 0.104854 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ead0b0255a3ba3da34a217fdcc1371caa46d76d | 717 | py | Python | factioncli/commands/log.py | s3b4stian/CLI | 057e4e9e2bff7bdce0f60106a0c61938386f7d01 | [
"BSD-3-Clause"
] | null | null | null | factioncli/commands/log.py | s3b4stian/CLI | 057e4e9e2bff7bdce0f60106a0c61938386f7d01 | [
"BSD-3-Clause"
] | null | null | null | factioncli/commands/log.py | s3b4stian/CLI | 057e4e9e2bff7bdce0f60106a0c61938386f7d01 | [
"BSD-3-Clause"
] | null | null | null | import logging
from cliff.command import Command
from factioncli.processing.docker.logs import get_logs
class Log(Command):
"Handles Log Command"
def get_parser(self, prog_name):
parser = super(Log, self).get_parser(prog_name)
parser.add_argument('-f','--follow',
help="Enable log following",
action="store_true")
parser.add_argument('--container',
help="Target container name",
action="store",
nargs=1)
return parser
def take_action(self, parsed_args):
get_logs(parsed_args.container, parsed_args.follow) | 34.142857 | 63 | 0.559275 | 75 | 717 | 5.173333 | 0.493333 | 0.07732 | 0.072165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002146 | 0.35007 | 717 | 21 | 63 | 34.142857 | 0.830472 | 0.026499 | 0 | 0 | 0 | 0 | 0.133705 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eaeeb70f4d3820cedf81ea0c740713a9b0641c3 | 10,097 | py | Python | delfick_project/option_merge/path.py | delfick/delfick_project | 4b49209639dd10cff6f48b4f5617cb7874481aaa | [
"MIT"
] | 1 | 2020-12-01T21:11:24.000Z | 2020-12-01T21:11:24.000Z | delfick_project/option_merge/path.py | delfick/delfick_project | 4b49209639dd10cff6f48b4f5617cb7874481aaa | [
"MIT"
] | 2 | 2019-11-18T05:28:19.000Z | 2019-11-18T07:12:24.000Z | delfick_project/option_merge/path.py | delfick/delfick_project | 4b49209639dd10cff6f48b4f5617cb7874481aaa | [
"MIT"
] | 2 | 2019-11-18T05:35:39.000Z | 2021-01-06T18:48:37.000Z | """
Option_merge uses this Path object to encapsulate the idea of a path and the
converters that are available to use.
We are able to use this to store a reference to the root of the configuration
as well as whether the converters should be ignored or not.
It's purpose is to behave like a string regardless of whether it is a string
or a list of strings.
"""
from .joiner import dot_joiner, join
from .not_found import NotFound
class Path(object):
"""
Encapsulate a path; a root configuration; a list of converters; and whether
the converters should be used or not
A path may be just a string or a list of strings.
"""
@classmethod
def convert(
kls, path, configuration=None, converters=None, ignore_converters=None, joined=None
):
"""
Get us a Path object from this path
If path is already a Path instance, it is returned as is.
Otherwise, a joined version of the string is created and used,
along with the other kwargs to this function, to produce a new Path instance
"""
path_type = type(path)
if path_type is Path:
return path
else:
joined = dot_joiner(path, item_type=path_type)
return Path(path, configuration, converters, ignore_converters, joined=joined)
def __init__(
self,
path,
configuration=None,
converters=None,
ignore_converters=False,
joined=None,
joined_function=None,
):
self.path = path
self.path_type = type(self.path)
self.path_is_string = self.path_type is str
self._joined = joined
self._joined_function = joined_function
self.converters = converters
self.configuration = configuration
self.ignore_converters = ignore_converters
def __unicode__(self):
"""alias for self.joined"""
return self.joined()
def __str__(self):
"""alias for self.joined"""
return self.joined()
def __nonzero__(self):
"""Whether we have any path or not"""
return any(self.path)
def __len__(self):
"""
The length of our path
* If we have no path, then 0
* if path is a string, then 1
* if path is an array, then the length of the array
"""
if self.path_is_string:
if self.path:
return 1
else:
return 0
else:
if self.path_type in (list, tuple):
if not any(item for item in self.path):
return 0
return len(self.path)
def __iter__(self):
"""Iterate through the parts of our path"""
if self.path_is_string:
if self.path:
yield self.path
else:
for part in self.path:
yield part
def __repr__(self):
return f"<Path({str(self)})>"
def __eq__(self, other):
"""
Compare the joined version of this path
and the joined version of the other path
"""
joined = self.joined()
if not other and not joined:
return True
if other and joined:
return dot_joiner(other) == self.joined()
return False
def __ne__(self, other):
"""Negation of whether other is equal to this path"""
return not self.__eq__(other)
def __add__(self, other):
"""Create a copy of this path joined with other"""
if not other:
return self.clone()
else:
return self.using(join(self, other))
def __hash__(self):
"""The hash of the joined version of this path"""
return hash(self.joined())
def __getitem__(self, key):
"""
If the path is a string, treat it as a list of that one string,
otherwise, treat path as it is
and get the index of the path as specified by key
"""
path = self.path
if self.path_is_string:
path = [path]
return path[key]
def without(self, base):
"""Return a clone of this path without the base"""
base_type = type(base)
if base_type is not str:
base = dot_joiner(base, base_type)
if not self.startswith(base):
raise NotFound()
if self.path_is_string:
path = self.path[len(base) :]
while path and path[0] == ".":
path = path[1:]
return self.using(path, joined=path)
else:
if not base:
res = [part for part in self.path]
else:
res = []
for part in self.path:
if not base:
res.append(part)
continue
part_type = type(part)
if part_type is str:
joined_part = part
else:
joined_part = dot_joiner(part, part_type)
if base.startswith(joined_part):
base = base[len(joined_part) :]
while base and base[0] == ".":
base = base[1:]
elif joined_part.startswith(base):
res.append(joined_part[len(base) :])
base = ""
return self.using(res, joined=dot_joiner(res, list))
def prefixed(self, prefix):
"""Return a clone with this prefix to the path"""
if not prefix:
return self.clone()
else:
return self.using(join(prefix, self))
def first_part_is(self, key):
"""Return whether the first part of this path is this string"""
if self.path_is_string:
return self.path.startswith(str(key) + ".")
if not self.path:
return not bool(key)
if self.path_type is list:
return self.path[0] == key
if self.path_type is Path:
return self.path.first_part_is(key)
return self.joined().startswith(str(key) + ".")
def startswith(self, base):
"""Does the path start with this string?"""
if self.path_is_string:
return self.path.startswith(base)
if not self.path:
return not bool(base)
if self.path_type is list and len(self.path) == 1:
return self.path[0].startswith(base)
return self.joined().startswith(base)
def endswith(self, suffix):
"""Does the path end with this string?"""
return self.joined().endswith(suffix)
def using(
self, path, configuration=None, converters=None, ignore_converters=False, joined=None
):
"""Return a clone of this path and override with provided values"""
if configuration is None:
configuration = self.configuration
if converters is None:
converters = self.converters
if (
path == self.path
and self.configuration is configuration
and self.converters is converters
and self.ignore_converters is ignore_converters
):
return self
joined_function = None
if joined is None:
if type(path) is Path:
joined_function = lambda: dot_joiner(path.path, path.path_type)
else:
joined_function = lambda: dot_joiner(path)
return self.__class__(
path,
configuration,
converters,
ignore_converters=ignore_converters,
joined_function=joined_function,
)
def clone(self):
"""Return a clone of this path with all the same values"""
joined_function = lambda: dot_joiner(self.path, self.path_type)
return self.__class__(
self.path,
self.configuration,
self.converters,
self.ignore_converters,
joined_function=joined_function,
)
def ignoring_converters(self, ignore_converters=True):
"""Return a clone of this path with ignore_converters set to True"""
if self.ignore_converters == ignore_converters:
return self
return self.using(self.path, ignore_converters=ignore_converters, joined=self.joined())
def do_conversion(self, value):
"""
Do the conversion on some path if any conversion exists
Return (converted, did_conversion)
Where ``did_conversion`` is a boolean indicating whether a conversion
took place.
"""
converter, found = self.find_converter()
if not found:
return value, False
else:
converted = converter(self, value)
self.converters.done(self, converted)
if hasattr(converted, "post_setup"):
converted.post_setup()
return converted, True
def find_converter(self):
"""Find appropriate converter for this path"""
if self.ignore_converters:
return None, False
return self.converters.matches(self)
def converted(self):
"""Determine if this path has been converted"""
if self.converters:
return self.converters.converted(self)
return False
def converted_val(self):
"""Return the converted value for this path"""
return self.converters.converted_val(self)
def waiting(self):
"""Return whether we're waiting for this value"""
return self.converters.waiting(self)
def joined(self):
"""Return the dot_join of of the path"""
joined = self._joined
if self._joined is None and self._joined_function is not None:
joined = self._joined = self._joined_function()
if joined is None:
if self.path_is_string:
joined = self._joined = self.path
else:
joined = self._joined = dot_joiner(self.path, self.path_type)
return joined
| 31.751572 | 95 | 0.571754 | 1,229 | 10,097 | 4.560618 | 0.147274 | 0.064228 | 0.023194 | 0.022837 | 0.275468 | 0.196075 | 0.146833 | 0.094202 | 0.058162 | 0.043533 | 0 | 0.001826 | 0.349015 | 10,097 | 317 | 96 | 31.851735 | 0.850905 | 0.212142 | 0 | 0.236715 | 0 | 0 | 0.004335 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.009662 | 0.004831 | 0.36715 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eaf0d7cba39f6baabe80e6ccc1b07ab55100c68 | 7,468 | py | Python | test/helpers/acmeair_utils.py | cloudant-labs/spark-cloudant | 07f772eb21ec268f2e013a65dc2f21e01232f82b | [
"Apache-2.0"
] | 25 | 2015-11-02T16:50:47.000Z | 2020-11-07T13:41:36.000Z | test/helpers/acmeair_utils.py | cloudant-labs/spark-cloudant | 07f772eb21ec268f2e013a65dc2f21e01232f82b | [
"Apache-2.0"
] | 80 | 2015-10-14T11:08:00.000Z | 2018-03-26T01:30:20.000Z | test/helpers/acmeair_utils.py | cloudant-labs/spark-cloudant | 07f772eb21ec268f2e013a65dc2f21e01232f82b | [
"Apache-2.0"
] | 30 | 2015-10-13T19:14:13.000Z | 2020-11-08T07:23:30.000Z | #*******************************************************************************
# Copyright (c) 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import os
import sys
import requests
import subprocess
import signal
import time
import conftest
from helpers.dbutils import CloudantDbUtils
class AcmeAirUtils:
"""
Test AcmeAir app related functions
"""
_api_context_root = "/rest/api"
_app_host = "http://localhost:9080"
_test_properties = None
FORM_CONTENT_TYPE = "application/x-www-form-urlencoded; charset=UTF-8"
test_properties = conftest.test_properties()
def __init__(self):
try:
acmeair_path = os.environ["ACMEAIR_HOME"]
if os.path.exists(acmeair_path):
print ("\nAcmeAir Nodejs app home: ", acmeair_path)
self.acmehome = acmeair_path
else:
raise RuntimeError("Invalid AcmeAir Nodejs app home:", jarpath)
except KeyError:
raise RuntimeError("Environment variable ACMEAIR_HOME not set")
if not all(x in ["cloudantusername", "cloudantpassword", "cloudanthost"] for x in self.test_properties):
raise RuntimeError("test_properties does not contain all required cloudant properties")
def start_acmeair(self):
"""
Set the required env vars for cloudant and start the AcmeAir app locally
If app is already running, check if it's functioning
"""
app_status = self.is_acmeair_running();
if app_status == -1:
raise RuntimeError("AcmeAir is already running but malfunctioning. Please shut it down.")
elif app_status == 1:
print ("AcmeAir app is already running, will not attempt to start\n")
return
cloudantUtils = CloudantDbUtils(self.test_properties)
cloudantUtils.check_databases()
# set the env vars required to start the app
new_env = os.environ.copy()
new_env["dbtype"] = "cloudant"
new_env["CLOUDANT_URL"] = "https://{}:{}@{}".format(
self.test_properties["cloudantusername"],
self.test_properties["cloudantpassword"],
self.test_properties["cloudanthost"])
# start the acmeair app
os.chdir(self.acmehome)
command = ["node", "app.js"]
self.proc = subprocess.Popen(command, env=new_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# wait at most 10 sec for app to come up
timeout = time.time() + 10
while (True):
if not self.is_acmeair_running() == 1:
time.sleep(1)
else:
print ("\nAcemAir started!")
break
if time.time() > timeout:
raise RuntimeError("Cannot connect to AcmeAir!")
def stop_acmeair(self):
"""
Stop the AcmeAir App that was started by this util
"""
if hasattr(self, "proc"):
os.kill(self.proc.pid, signal.SIGTERM)
print ("AcmeAir is shutdown")
def is_acmeair_running(self):
"""
Check if AcmeAir app is running
Return 0: app is not running
Return 1: app is running
Return -1: app is running but malfunctioning, possibly because db were rebuilt
when app was running
"""
r = requests.Session()
url = "{}{}/{}".format(
self._app_host, self._api_context_root, "config/countCustomers")
try:
response = r.get(url)
if response.status_code == 200:
status = 1
else:
# happens when db were rebuilt while app is running
status = -1
except:
# happens when app is not running
status = 0
return status
def load_data(self, num_of_customers):
"""
Call the AcmeAir app REST API to populate with the given # of users
"""
cloudantUtils = CloudantDbUtils(self.test_properties)
if cloudantUtils.get_doc_count("n_customer") > 0:
raise RuntimeException("Test databases already contains unknown data so AcmeAir data loader will not run!")
r = requests.Session()
url = "{}{}/{}".format(
self._app_host, self._api_context_root, "loader/load")
if isinstance(num_of_customers, int):
num_of_customers = str(num_of_customers)
param = {"numCustomers" : num_of_customers}
headers = { "Content-Type" : self.FORM_CONTENT_TYPE}
print ("Start AcmeAir database loader with num_of_customers = ", num_of_customers)
start_time = time.time()
try:
r.get(url, headers=headers, params=param)
except requests.exceptions.ConnectionError:
# the request aborts after 2 mins due to server timeout, wait until expected # of rows is reached
cloudantUtils.wait_for_doc_count("n_customer", num_of_customers, int(num_of_customers) / 500)
cloudantUtils.wait_for_doc_count("n_airportcodemapping", 14, 5)
cloudantUtils.wait_for_doc_count("n_flightsegment", 395, 5)
cloudantUtils.wait_for_doc_count("n_flight", 1971, 20)
print ("Database load completed after {} mins\n".format(int(round((time.time() - start_time) / 60))))
def login(self, user):
"""
Create a user session for the given user. It's needed in order to book a flight.
@return session ID
"""
r = requests.Session()
url = "{}{}/{}".format(
self._app_host, self._api_context_root, "login")
payload = {"login" : user, "password" : "password"}
headers = { "Content-Type" : self.FORM_CONTENT_TYPE}
print ("Login as user: ", user)
response = r.post(url, headers=headers, data=payload)
if response.status_code == 200:
# get session ID
return response.cookies["sessionid"]
else:
raise RuntimeError(response.text)
def book_flights(self, user, toFlightId, retFlightId):
"""
Login as the given user and booking flights.
Set retFlightId=None if booking one way.
"""
r = requests.Session()
url = "{}{}/{}".format(
self._app_host, self._api_context_root, "bookings/bookflights")
headers = { "Content-Type" : self.FORM_CONTENT_TYPE }
payload = {"userid" : user, "toFlightId" : toFlightId}
# see if it's round trip
if retFlightId is None:
payload["oneWayFlight"] = "true"
else:
payload["oneWayFlight"] = "false"
payload["retFlightId"] = retFlightId
# the request must include the cookie retrieved from login
sessionId = self.login(user)
cookies = { "sessionid" : sessionId,
"loggedinuser" : user }
print ("Book flight(s) " + str(payload))
response = r.post(url, headers=headers, data=payload, cookies=cookies)
if response.status_code == 200:
print ("\nFlight(s) booked: {}\n".format(response.text))
else:
raise RuntimeError(response.text)
def get_flightId_by_number(self, flightNum):
"""
Get the generated flight ID for the given flight number
"""
cloudantUtils = CloudantDbUtils(self.test_properties)
url = "https://{}/{}".format(
self.test_properties["cloudanthost"], "n_flight/_design/view/_search/n_flights?q=flightSegmentId:" + flightNum)
param = {"q" : "flightSegmentId:" + flightNum}
response = cloudantUtils.r.get(url, params=param)
data = response.json()
if int(data["total_rows"]) > 0:
# just get one from the dict
return data["rows"][0]["id"]
else:
raise RuntimeError("n_flights has no data for ", flightNum)
| 32.611354 | 115 | 0.684922 | 978 | 7,468 | 5.101227 | 0.309816 | 0.033674 | 0.025256 | 0.015234 | 0.197835 | 0.156344 | 0.108439 | 0.076568 | 0.043295 | 0.043295 | 0 | 0.009508 | 0.183182 | 7,468 | 229 | 116 | 32.611354 | 0.808361 | 0.243706 | 0 | 0.216418 | 0 | 0 | 0.229223 | 0.02046 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0.022388 | 0.059701 | 0 | 0.19403 | 0.067164 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eaff492508ad7fb7bb92e8d684b7499f0632a98 | 596 | py | Python | hackerrank-python/py-collections-namedtuple.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | 2 | 2021-09-06T22:13:12.000Z | 2021-11-22T08:50:04.000Z | hackerrank-python/py-collections-namedtuple.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null | hackerrank-python/py-collections-namedtuple.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/py-collections-namedtuple/problem
from collections import namedtuple
if __name__ == '__main__':
test_cases = int(input())
length = test_cases
sum_marks = 0
colums = input().split()
while(test_cases > 0):
attributes = input().split()
Student = namedtuple('Student', colums)
s = Student(attributes[0], attributes[1], attributes[2], attributes[3])
sum_marks += int(s.MARKS)
test_cases -= 1
print("{:.02f}".format(sum_marks / length))
| 14.9 | 79 | 0.580537 | 64 | 596 | 5.171875 | 0.53125 | 0.108761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021226 | 0.288591 | 596 | 39 | 80 | 15.282051 | 0.759434 | 0.119128 | 0 | 0 | 0 | 0 | 0.042146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eb6aa4e2227819f8ee6cee396c7d5b44ecaa7f9 | 2,147 | py | Python | 0.5.12/ui_tweak/dashcamd.py | eFiniLan/openpilot-patches | ec8f1552651c17e6e0d1821d958412b6fa049228 | [
"MIT"
] | 9 | 2019-03-02T14:54:48.000Z | 2021-09-06T13:48:08.000Z | 0.5.12/ui_tweak/dashcamd.py | eFiniLan/openpilot-patches | ec8f1552651c17e6e0d1821d958412b6fa049228 | [
"MIT"
] | null | null | null | 0.5.12/ui_tweak/dashcamd.py | eFiniLan/openpilot-patches | ec8f1552651c17e6e0d1821d958412b6fa049228 | [
"MIT"
] | 9 | 2019-02-28T18:48:08.000Z | 2021-11-07T20:12:54.000Z | #!/usr/bin/env python2.7
#
# courtesy of pjlao307 (https://github.com/pjlao307/)
# this is just his original implementation but
# in openpilot service form so it's always on
#
# with the highest bit rates, the video is approx. 0.5MB per second
# the default value is set to 2.56Mbps = 0.32MB per second
#
import os
import time
import datetime
dashcam_videos = '/sdcard/dashcam/'
duration = 180 # max is 180
bit_rates = 2560000 # max is 4000000
max_size_per_file = bit_rates/8*duration # 2.56Mbps / 8 * 180 = 57.6MB per 180 seconds
max_storage = max_size_per_file/duration*1024*1024*60*60*6 # 6 hours worth of footage (around 6.5gb)
def dashcamd_thread():
if not os.path.exists(dashcam_videos):
os.makedirs(dashcam_videos)
while 1:
now = datetime.datetime.now()
file_name = now.strftime("%Y-%m-%d_%H-%M-%S")
os.system("screenrecord --bit-rate %s --time-limit %s %s%s.mp4 &" % (bit_rates, duration, dashcam_videos, file_name))
# we should clean up files here if use too much spaces
used_spaces = get_used_spaces()
#print("used spaces: %s" % used_spaces)
last_used_spaces = used_spaces
# when used spaces greater than max available storage
if used_spaces >= max_storage:
# get all the files in the dashcam_videos path
files = [f for f in sorted(os.listdir(dashcam_videos)) if os.path.isfile(dashcam_videos + f)]
for file in files:
# delete file one by one and once it has enough space for 1 video, we skip deleting
if used_spaces - last_used_spaces < max_size_per_file:
os.system("rm -fr %s" % (dashcam_videos + file))
#print("Cleaning")
last_used_spaces = get_used_spaces()
#print("last used spaces: %s" % last_used_spaces)
else:
break
# we start the process 1 second before screenrecord ended
# so we can make sure there are no missing footage
time.sleep(duration-1)
def get_used_spaces():
return sum(os.path.getsize(dashcam_videos + f) for f in os.listdir(dashcam_videos) if os.path.isfile(dashcam_videos + f))
def main(gctx=None):
dashcamd_thread()
if __name__ == "__main__":
main()
| 35.783333 | 123 | 0.695855 | 342 | 2,147 | 4.201754 | 0.44152 | 0.104384 | 0.048713 | 0.029228 | 0.141962 | 0.10856 | 0.069589 | 0.069589 | 0.069589 | 0.069589 | 0 | 0.041496 | 0.203074 | 2,147 | 59 | 124 | 36.389831 | 0.798364 | 0.389846 | 0 | 0 | 0 | 0 | 0.079969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.09375 | 0.03125 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eb775eefae8b7ad81e9c1edc49fc135b3659d88 | 1,196 | py | Python | Problems/Study Plans/Data Structure/Data Structure II/66_find_the_winner_of_the_circular_game.py | andor2718/LeetCode | 59874f49085818e6da751f1cc26867b31079d35d | [
"BSD-3-Clause"
] | 1 | 2022-01-17T19:51:15.000Z | 2022-01-17T19:51:15.000Z | Problems/Study Plans/Data Structure/Data Structure II/66_find_the_winner_of_the_circular_game.py | andor2718/LeetCode | 59874f49085818e6da751f1cc26867b31079d35d | [
"BSD-3-Clause"
] | null | null | null | Problems/Study Plans/Data Structure/Data Structure II/66_find_the_winner_of_the_circular_game.py | andor2718/LeetCode | 59874f49085818e6da751f1cc26867b31079d35d | [
"BSD-3-Clause"
] | null | null | null | # https://leetcode.com/problems/find-the-winner-of-the-circular-game/
class ListNode:
def __init__(self, val=0, next=None, prev=None):
self.val = val
self.next = next
self.prev = prev
class Solution:
def findTheWinner(self, n: int, k: int) -> int:
if n == 1:
return 1
# Model friends
first_node = ListNode(1)
last_node = first_node
for i in range(2, n + 1):
curr_node = ListNode(i, next=first_node, prev=last_node)
last_node.next = curr_node
last_node = curr_node
first_node.prev = last_node
# Simulate game
curr_node = first_node
nr_of_players = n
k -= 1 # In every iteration, we are supposed to make k - 1 steps.
while nr_of_players != 1:
steps_to_make = k % nr_of_players # No need to run in circles.
for _ in range(steps_to_make):
curr_node = curr_node.next
curr_node.prev.next, curr_node.next.prev, curr_node = (
curr_node.next, curr_node.prev, curr_node.next)
nr_of_players -= 1
# We have the winner
return curr_node.val
| 34.171429 | 75 | 0.579431 | 170 | 1,196 | 3.841176 | 0.329412 | 0.159265 | 0.073507 | 0.073507 | 0.162328 | 0.098009 | 0.098009 | 0.098009 | 0 | 0 | 0 | 0.012547 | 0.333612 | 1,196 | 34 | 76 | 35.176471 | 0.806775 | 0.165552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eb7c03290fb08e39908cee05b1cbf3161290e22 | 1,237 | py | Python | gmql/dataset/parsers/__init__.py | DEIB-GECO/PyGMQL | c7582b916c985a1521276a3e7eaba010b7902687 | [
"Apache-2.0"
] | 12 | 2017-06-10T13:18:32.000Z | 2021-05-06T12:19:07.000Z | gmql/dataset/parsers/__init__.py | DEIB-GECO/PyGMQL | c7582b916c985a1521276a3e7eaba010b7902687 | [
"Apache-2.0"
] | 28 | 2017-05-09T13:38:17.000Z | 2021-09-29T11:54:23.000Z | gmql/dataset/parsers/__init__.py | DEIB-GECO/PyGMQL | c7582b916c985a1521276a3e7eaba010b7902687 | [
"Apache-2.0"
] | 5 | 2017-10-11T13:53:57.000Z | 2020-02-28T06:21:21.000Z | # -*- coding: utf-8 -*-
# strings to recognize as NaN
NULL = "null"
INF = "∞"
UNDEFINED = "�"
null_values = [NULL, INF, UNDEFINED]
GTF = "gtf"
TAB = "tab"
VCF = "vcf"
string_aliases = ['string', 'char']
int_aliases = ['long', 'int', 'integer']
float_aliases = ['double', 'float']
bool_aliases = ['bool', 'boolean']
allowed_types = ['bed', 'tab']
COORDS_ZERO_BASED = '0-based'
COORDS_ONE_BASED = '1-based'
COORDS_DEFAULT = 'default'
coordinate_systems = {COORDS_ZERO_BASED, COORDS_ONE_BASED, COORDS_DEFAULT}
def get_parsing_function(type_string):
if type_string in string_aliases:
return str
elif type_string in int_aliases:
return int
elif type_string in float_aliases:
return float
elif type_string in bool_aliases:
return bool
else:
raise ValueError("This type is not supported")
def get_type_name(type_class):
if type_class == str:
return "string"
elif type_class == int:
return "integer"
elif type_class == float:
return "double"
elif type_class == bool:
return 'bool'
else:
raise ValueError("This type is not supported")
from .RegionParser import *
from .Parsers import *
from .MetadataParser import * | 22.907407 | 74 | 0.662894 | 162 | 1,237 | 4.858025 | 0.388889 | 0.060991 | 0.060991 | 0.060991 | 0.129606 | 0.129606 | 0.129606 | 0.129606 | 0.129606 | 0.129606 | 0.000808 | 0.003125 | 0.223929 | 1,237 | 54 | 75 | 22.907407 | 0.814583 | 0.039612 | 0 | 0.097561 | 0 | 0 | 0.137437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0 | 0.317073 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1eb85d0b0b100f5bb627dcd79ecd47d9d0502716 | 15,321 | py | Python | src/atomicfileio/__init__.py | sammck/python-atomicfileio | 23ec410815cf5f825e49cf18cb822b352943b758 | [
"MIT"
] | null | null | null | src/atomicfileio/__init__.py | sammck/python-atomicfileio | 23ec410815cf5f825e49cf18cb822b352943b758 | [
"MIT"
] | null | null | null | src/atomicfileio/__init__.py | sammck/python-atomicfileio | 23ec410815cf5f825e49cf18cb822b352943b758 | [
"MIT"
] | null | null | null | """Support for safely atomically updating files
This module provides tools for overwriting files in such a way that a reader will never see a partially written file.
Portions borrowed from https://code.activestate.com/recipes/579097-safely-and-atomically-write-to-a-file/,
also under the MIT license.
"""
__version__ = "1.0.1"
from typing import Optional, Union, TextIO, BinaryIO, ContextManager
import os
import sys
import stat
import sys
import tempfile
import contextlib
import subprocess
from pwd import getpwnam
from grp import getgrnam
# Import os.replace if possible; otherwise emulate it as well as possible
try:
from os import replace # Python 3.3 and better.
except ImportError:
if sys.platform == 'win32':
# This is definitely not atomic!
# But it's how (for example) Mercurial does it, as of 2016-03-23
# https://selenic.com/repo/hg/file/tip/mercurial/windows.py
def replace(source, destination):
assert sys.platform == 'win32'
try:
os.rename(source, dest)
except OSError as err:
if err.winerr != 183:
raise
os.remove(dest)
os.rename(source, dest)
else:
# Atomic on POSIX. Not sure about Cygwin, OS/2 or others.
from os import rename as replace
def current_umask(thread_safe: bool=True) -> int:
"""Makes a best attempt to determine the current umask value of the calling process in a safe way.
Unfortunately, os.umask() is not threadsafe and poses a security risk, since there is no way to read
the current umask without temporarily setting it to a new value, then restoring it, which will affect
permissions on files created by other threads in this process during the time the umask is changed, or
by any child processses that were forked during this time.
On recent linux kernels (>= 4.1), the current umask can be read from /proc/self/status.
On older systems, the simplest safe way is to spawn a shell and execute the 'umask' command. The shell will
inherit the current process's umask, and will use the unsafe call, but it does so in a separate,
single-threaded process, which makes it safe.
Args:
thread_safe: If False, allows the current umask to be determined in a potentially unsafe, but more
efficient way. Should only be set to False if the caller can guarantee that there
are no other threads running in the current process that might read or set the umask,
create files, or spawn child processes. Default is True.
Returns:
int: The current process's umask value
"""
if not thread_safe:
mask = os.umask(0o066) # 0o066 is arbitrary but poses the least security risk if there is a race.
# WARNING: At this point, and other threads that create files, or spawn subprocesses that create files,
# will be using an incorrect umask of 0o066, which denies all access to anyone but the owner.
os.umask(mask)
else:
mask: Optional[int] = None
try:
with open('/proc/self/status') as fd:
for line in fd:
if line.startswith('Umask:'):
mask = int(line[6:].strip(), 8)
break
except FileNotFoundError:
pass
except ValueError:
pass
if mask is None:
# As a last resort, do the dangerous call under a forked, single-threaded subprocess.
mask = int(subprocess.check_output('umask', shell=True).decode('utf-8').strip(), 8)
return mask
def normalize_uid(uid: Optional[Union[int, str]]) -> Optional[int]:
"""
Normalizes a posix user ID that may be expressed as:
1. An integer UID
2. A string containing a decimal UID
3. A string containing a valid posix username
4. None, which causes None to be returned
Args:
uid: An integer UID, a decimal string UID, a posix username, or None.
Returns:
Optional[int]: The integer UID corresponding to `uid`, or None if `uid` is None
Raises:
KeyError: A nondecimal string was provided and the posix username does not exist
"""
if not uid is None:
try:
uid = int(uid)
return uid
except ValueError:
uid = getpwnam(uid).pw_uid
return uid
def normalize_gid(gid: Optional[Union[int, str]]) -> Optional[int]:
"""
Normalizes a posix group ID that may be expressed as:
1. An integer GID
2. A string containing a decimal GID
3. A string containing a valid posix group name
4. None, which causes None to be returned
Args:
gid: An integer GID, a decimal string GID, a poxix group name, or None.
Returns:
Optional[int]: The integer GID corresponding to `gid`, or None if `gid` is None
Raises:
KeyError: A nondecimal string was provided and the posix group name does not exist
"""
if not gid is None:
try:
gid = int(gid)
return gid
except ValueError:
gid = getgrnam(gid).gr_gid
return gid
@contextlib.contextmanager
def atomic_open(
filename: str,
mode: str='w',
replace_perms: bool=False,
effective_umask: Optional[int]=None,
uid: Optional[Union[int, str]]=None,
gid: Optional[Union[int, str]]=None,
perms: Optional[int]=None,
temp_file_base_name: Optional[str]=None,
temp_file_suffix: str='.tmp',
keep_temp_file_on_error: bool=False,
buffering: int=-1,
encoding: Optional[str]=None,
errors: Optional[str]=None,
newline: Optional[str]=None,
) -> ContextManager[Union[TextIO, BinaryIO]]:
"""Open a file for atomic create or overwrite, using a temporary file managed by a context.
Args:
filename: The final filename to create or overwrite.
mode: The open mode, as defined for `open()`. Only 'w' and 'wb' are allowed. Default is 'w'.
replace_perms: True if the file's UID, GID, and perms should be replaced even
if the file already exists. Default is False.
effective_umask: Optionally, a umask value to use for creation of a new file. If None, the current process's umask is
use. If 0, none of the bits in `perms`will be masked. Any bits set to 1 will be masked off in the
final file permissions if a new file is created. Ignored if `replace_perms` is False and a file
already exists. Default is None.
uid: If the file does not exist or `replace_perms` is True, the owner UID to use for the
new file, or None to use the default UID. For convenience, a string containing the decimal UID or a
username may be provided. Ignored if the file exists and `replace_perms`
is False. Default is None.
gid: If the file does not exist or `replace_perms` is True, the group GID to use for the
new file, or None to use the default GID. For convenience, a string containing the decimal GID or
a group name may be provided. Ignored if the file exists and `replace_perms`
is False. Default is None.
perms: If the file does not exist or `replace_perms` is True, the permission mode bits to use for the
new file, or None to use the default mode bits (typically 0o664). Ignored if the file exists
and `replace_perms` is False. Default is None.
temp_file_base_name: The name to use for the temporary file (after a '.', a random string, and `temp_file_suffix` is appended), or
None to use `filename` as the base name. Default is None.
temp_file_suffix: A string to put at the end of the temp file name. Defaults to '.tmp'
keep_temp_file_on_error: True if the temporary file should be retained if a failure occurs before
it is fully written and atomically moved to the final `filename`. Default
is False
buffering: As defined for open()
encoding: As defined for open()
errors: How to handle encoding errors, as defined for open()
newline: As defined for open()
Returns:
A `ContextManager` that will provide an open, writeable stream to a temporary file. On context exit without any exception raised,
the temporary file will be renamed to atomically replace any previous file.
Example:
Update the linux hostname file atomically (must be run as root):
```
with atomic_open("/etc/hostname", 'w', perms=0o644) as f:
f.write("myhostname")
```
The behavior of this function mimics `open(filename, 'w')` as nearly as possible, except that
the target file will appear, to readers, to be atomically replaced at with-block exit time, and update
will be cancelled if an exception is raised within the with-block.
The context manager opens a temporary file for writing in the same
directory as `filename`. On cleanly exiting the with-block, the temporary
file is renamed to the given filename. If the original file already
exists, it will be overwritten and any existing contents replaced.
On POSIX systems, the rename is atomic. Other operating systems may
not support atomic renames, in which case a best effort is made.
The temporary file will be created in the same directory as `filename`, and will
have the name `f"{base_name}.{random_8_chars}{temp_file_suffix}"`, where
`base_name` is `temp_file_base_name` if provided, or `filename` otherwise.
`random_8_chars` is a random 8-character string as generated by `tempfile.mkstemp()`.
The temporary file naturally ceases to exists with successful
completion of the with-block. If an uncaught exception occurs inside the with-block,
the original file is left untouched. If `keep_temp_file_on_error`
is True, the temporary file is also preserved, for diagnosis or data
recovery. If False (the default), the temporary file is deleted on any catchable
exception. In this case, any errors in deleting the temporary file are ignored.
Of course, uncatchable exceptions or system failures may result
in a leaked temporary file; it is the caller's responsibility to
periodically clean up orphaned temporary files.
By default, the temporary file is opened in text mode. To use binary mode,
pass `mode='wb'` as an argument. On some operating systems, this makes
no difference.
Applications that use this function generally will want to set the final value
of UID, GID, and permissions mode bits on the temporary file before it is renamed
to the target filename. For this reason, additional optional arguments can be included
to make this simple and seamless. If these arguments are omitted, the UID, GID, and
permission mode bits of the target file will be as defined for `open()`.
If `filename` exists and `replace_perms` is False, then the existing UID, GID,
and permission mode bits of `filename` will be applied to the
temporary file before it is written, and preserved when the temporary
file replaces the original. If `filename` does not exist, or `replace_perms`
is True, then `uid`, `gid`, and `perms` are used--if one or more of
these is None, then defaults are used as with open(). Note that in the case of
the perms bits, the defaults are as constrained by the current umask or parameter
`effective_umask`--this is different than tempfile.mkstemp() which limits
permissions to 0o600, but is consistent with the behavior of `open()`.
In any case, the final UID, GID, and permissions mode bits have already been
set appropriately on entry to the with-block, so the caller is free to make
further adjustments to them before exiting the with-block, and their adjustments
will not be overwritten.
"""
if not mode in ['w', 'wt', 'wb']:
raise ValueError(f"atomic_open() does not support open mode \"{mode}\"")
is_text = mode != 'wb'
if temp_file_base_name is None:
temp_file_base_name = filename
pathname = os.path.realpath(filename)
uid = normalize_uid(uid)
gid = normalize_gid(gid)
if not replace_perms:
try:
st = os.stat(pathname)
uid = st.st_uid
gid = st.st_gid
perms = stat.S_IMODE(st.st_mode)
# since we are using an existing file's perms, we never want to mask off permission bits.
effective_umask = 0
except FileNotFoundError:
pass
if perms is None:
perms = 0o666 # By default, newly created files will get all permissions (except execute) not excluded by umask
if effective_umask is None:
effective_umask = current_umask()
perms = perms & (~effective_umask)
dirpath = os.path.dirname(pathname)
fd: Optional[int] = None
temp_pathname: Optional[str] = None
need_close = True
need_delete = not keep_temp_file_on_error
fd, temp_pathname = tempfile.mkstemp(suffix=temp_file_suffix, prefix=temp_file_base_name + '.', dir=dirpath, text=is_text)
# Note that at this point the temporary file is owned by the calling user, with permission bits 600 as defined by `mkstemp`.
# This is different than the default behavior for open() which uses default umask permissions, typically 664 for users and
# 644 for root. Since we want to mimic open(), we will need to compensate for that.
try:
fctx = os.fdopen(fd, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline)
need_close = False # fd is now owned by fctx and will be closed on exit from the with block
with fctx as f:
# We update the owner, group, and permission mode bits before returning the context manager; this allows
# the caller to make additional changes to these properties if desired before exiting the context. To avoid potential
# permission errors, only make changes if they are necessary.
st = os.stat(fd)
current_perms = stat.S_IMODE(st.st_mode)
current_uid = st.st_uid
current_gid = st.st_gid
if not (uid is None or uid == current_uid) or not (gid is None or gid == current_gid):
os.fchown(fd, uid=(-1 if (uid is None or uid == current_uid) else uid), gid=(-1 if (gid is None or gid == current_gid) else gid))
if perms != current_perms:
os.fchmod(fd, perms)
yield f # return context manager to the caller, and wait until the context is closed
# At this point the context returned to the caller has been closed
# If we get here, the caller has cleanly closed the context without raising an exception, and the temporary file is complete and closed.
# Perform an atomic rename (if possible). This will be atomic on POSIX systems, and Windows for Python 3.3 or higher.
replace(temp_pathname, pathname)
# The rename was successful, so there is no need to try to delete the temporary file.
need_delete = False
finally:
try:
if need_close:
os.close(fd)
finally:
if need_delete:
# Silently delete the temporary file. Suppress any errors (original exceptions will propagate), while passing signals, etc.
try:
os.unlink(temp_pathname)
except Exception:
pass
| 46.427273 | 140 | 0.685595 | 2,316 | 15,321 | 4.48532 | 0.213731 | 0.02628 | 0.026184 | 0.009241 | 0.188487 | 0.149981 | 0.118598 | 0.079322 | 0.076242 | 0.054293 | 0 | 0.007757 | 0.251159 | 15,321 | 329 | 141 | 46.568389 | 0.897673 | 0.713204 | 0 | 0.229008 | 0 | 0 | 0.025177 | 0 | 0 | 0 | 0 | 0 | 0.007634 | 1 | 0.038168 | false | 0.030534 | 0.099237 | 0 | 0.175573 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ebaa882aa44892e3a1918f92d6f8a028a36933a | 2,259 | py | Python | iv_plan/examples/hironxsys_hand_ar.py | ryhanai/iv-plan-hironx | 2f89293a55df4608cb35e6a9676db97b9e486e7d | [
"BSD-3-Clause"
] | null | null | null | iv_plan/examples/hironxsys_hand_ar.py | ryhanai/iv-plan-hironx | 2f89293a55df4608cb35e6a9676db97b9e486e7d | [
"BSD-3-Clause"
] | null | null | null | iv_plan/examples/hironxsys_hand_ar.py | ryhanai/iv-plan-hironx | 2f89293a55df4608cb35e6a9676db97b9e486e7d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import roslib; roslib.load_manifest('iv_plan')
from ivutils import *
from hironxsys import *
import rospy
from ar_pose.msg import ARMarkers
from tf.transformations import *
import operator
def encode_FRAME(f):
return reduce(operator.__add__, f.mat) + f.vec
def decode_FRAME(ds):
return FRAME(mat=array(ds[0:9]).reshape(3,3).tolist(), vec=ds[9:])
from setup_rtchandle import *
class MyRobotInterface(HIROController):
def __init__(self, nameserver):
HIROController.__init__(self, nameserver)
self.nameserver = nameserver
self.lhand_markers = []
def connect(self):
HIROController.connect(self)
rospy.init_node('pick_piece')
rospy.Subscriber('/hiro/lhand/ar_pose_marker', ARMarkers, self.lhand_callback)
rospy.Subscriber('/hiro/rhand/ar_pose_marker', ARMarkers, self.rhand_callback)
self.ns = setup_rtchandle(self.nameserver)
self.h_ctrans = self.ns.rtc_handles['CoordTrans0.rtc']
self.h_ctrans.activate()
self.ctsvc = h_ctrans.services['CoordTransService'].provided['CoordTransService0']
def rhand_callback(self, msg):
if len(msg.markers) > 0:
self.rhand_markers = msg.markers
def lhand_callback(self, msg):
if len(msg.markers) > 0:
self.lhand_markers = msg.markers
def recognize(self, camera='lhand', thre=1.5):
def parse_marker(marker):
if rospy.Time.now().to_sec() - marker.header.stamp.to_sec() > thre:
return None
else:
return [marker.id, marker.pose.pose]
if camera == 'lhand':
return filter(None, [parse_marker(m) for m in self.lhand_markers])
else:
return filter(None, [parse_marker(m) for m in self.rhand_markers])
robotframe = [1,0,0,-150, 0,1,0,0, 0,0,1,0, 0,0,0,1]
def pose2mat(pose):
f = quaternion_matrix([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
scale = 1000.0
f[0:3,3] = array([pose.position.x,pose.position.y,pose.position.z]) * scale
return f.reshape(16).tolist()
# rr.connect()
# pose = rr.recognize()[0][1]
# ctsvc.ref.Query('lhandcam', pose2mat(pose), robotframe, rr.get_joint_angles())
| 32.271429 | 104 | 0.660912 | 308 | 2,259 | 4.698052 | 0.357143 | 0.009675 | 0.008293 | 0.029026 | 0.143746 | 0.109191 | 0.109191 | 0.100898 | 0.100898 | 0.052522 | 0 | 0.024444 | 0.203187 | 2,259 | 69 | 105 | 32.73913 | 0.779444 | 0.062417 | 0 | 0.083333 | 0 | 0 | 0.061051 | 0.02461 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.166667 | 0.041667 | 0.520833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ebb910855fd59083774495a20a52d1eeadfcf75 | 1,513 | py | Python | tests/test_randomvariablelist.py | ralfrost/probnum | 6b0988009a9dd7ecda87ba28c9d5c0b8019981b6 | [
"MIT"
] | null | null | null | tests/test_randomvariablelist.py | ralfrost/probnum | 6b0988009a9dd7ecda87ba28c9d5c0b8019981b6 | [
"MIT"
] | null | null | null | tests/test_randomvariablelist.py | ralfrost/probnum | 6b0988009a9dd7ecda87ba28c9d5c0b8019981b6 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from probnum._randomvariablelist import _RandomVariableList
from probnum.random_variables import Dirac
class TestRandomVariableList(unittest.TestCase):
def setUp(self):
self.rv_list = _RandomVariableList([Dirac(0.1), Dirac(0.2)])
def test_inputs(self):
"""Inputs rejected or accepted according to expected types."""
numpy_array = np.ones(3) * Dirac(0.1)
dirac_list = [Dirac(0.1), Dirac(0.4)]
number_list = [0.5, 0.41]
inputs = [numpy_array, dirac_list, number_list]
inputs_acceptable = [False, True, False]
for inputs, is_acceptable in zip(inputs, inputs_acceptable):
with self.subTest(input=inputs, is_acceptable=is_acceptable):
if is_acceptable:
_RandomVariableList(inputs)
else:
with self.assertRaises(TypeError):
_RandomVariableList(inputs)
def test_mean(self):
mean = self.rv_list.mean
self.assertEqual(mean.shape, (2,))
def test_cov(self):
cov = self.rv_list.cov
self.assertEqual(cov.shape, (2,))
def test_var(self):
var = self.rv_list.var
self.assertEqual(var.shape, (2,))
def test_std(self):
std = self.rv_list.std
self.assertEqual(std.shape, (2,))
def test_getitem(self):
item = self.rv_list[0]
self.assertIsInstance(item, Dirac)
if __name__ == "__main__":
unittest.main()
| 29.666667 | 73 | 0.623265 | 184 | 1,513 | 4.918478 | 0.342391 | 0.039779 | 0.066298 | 0.057459 | 0.028729 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018987 | 0.269002 | 1,513 | 50 | 74 | 30.26 | 0.799277 | 0.037013 | 0 | 0.054054 | 0 | 0 | 0.005513 | 0 | 0 | 0 | 0 | 0 | 0.162162 | 1 | 0.189189 | false | 0 | 0.108108 | 0 | 0.324324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ebbf6764706c37fc954f30837690895994c3e65 | 1,759 | py | Python | example_code/python/plot_curve_fit.py | NicoJG/PraktikumPhysikRepoVorlage | b29a4302958edc6205e2b107f7253f614cea0181 | [
"MIT"
] | 1 | 2021-08-21T17:08:39.000Z | 2021-08-21T17:08:39.000Z | example_code/python/plot_curve_fit.py | NicoJG/PraktikumPhysikRepoVorlage | b29a4302958edc6205e2b107f7253f614cea0181 | [
"MIT"
] | null | null | null | example_code/python/plot_curve_fit.py | NicoJG/PraktikumPhysikRepoVorlage | b29a4302958edc6205e2b107f7253f614cea0181 | [
"MIT"
] | null | null | null | # Erstelle aus gegebnen Daten eine Ausgleichskurve
# Und Plotte diese Kurve + die Daten
# wechsle die Working Directory zum Versuchsordner, damit das Python-Script von überall ausgeführt werden kann
import os,pathlib
project_path = pathlib.Path(__file__).absolute().parent.parent
os.chdir(project_path)
# benutze die matplotlibrc und header-matplotlib.tex Dateien aus dem default Ordner
os.environ['MATPLOTLIBRC'] = str(project_path.parent/'default'/'python'/'matplotlibrc')
os.environ['TEXINPUTS'] = str(project_path.parent/'default'/'python')+':'
# Imports
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
# Daten einlesen
x,y,z = np.genfromtxt('data/NAME.csv',delimiter=',',skip_header=1,unpack=True)
# Ausgleichskurven Funktion (hier Ausgleichsgerade)
def f(x,a,b):
return a*x + b
# oder als Lambda Funktion
f = lambda x,a,b: a*x + b
# Ausgleichskurve berechnen
params,pcov = curve_fit(f,x,y)
# Parameter
a = params[0]
b = params[1]
# Unsicherheiten
a_err = np.absolute(pcov[0][0])**0.5
b_err = np.absolute(pcov[1][1])**0.5
# Werte irgendwie ausgeben lassen
# z.B. mit print, aber besser als JSON Datei abspeichern
print(f'{a = }+-{a_err}')
print(f'{b = :.2f}+-{b_err:.2f}')
# Plot der Ausgleichskurve
x_linspace = np.linspace(np.min(x), np.max(x), 100)
plt.plot(x_linspace, f(x_linspace,*params), 'k-', label='Ausgleichskurve')
# Plot der Daten
plt.plot(x, y, 'ro', label='Daten')
# Achsenbeschriftung mit LaTeX (nur wenn matplotlibrc benutzt wird)
plt.xlabel(r'$\alpha \:/\: \si{\ohm}$')
plt.ylabel(r'$y \:/\: \si{\micro\joule}$')
# in matplotlibrc leider (noch) nicht möglich
plt.legend()
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# Plot als PDF speichern
plt.savefig('build/plot_NAME.pdf') | 30.859649 | 110 | 0.728823 | 278 | 1,759 | 4.532374 | 0.507194 | 0.034921 | 0.022222 | 0.031746 | 0.052381 | 0.052381 | 0 | 0 | 0 | 0 | 0 | 0.014829 | 0.118249 | 1,759 | 57 | 111 | 30.859649 | 0.79755 | 0.388289 | 0 | 0 | 0 | 0 | 0.194707 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.148148 | 0.037037 | 0.222222 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ebc01631e08d866baf1828fd8f7817027107805 | 2,547 | py | Python | pachicounter/util.py | yukkeorg/PachiCounter | 764526874e9ec03e06e7b61b136b20100e32d471 | [
"MIT"
] | null | null | null | pachicounter/util.py | yukkeorg/PachiCounter | 764526874e9ec03e06e7b61b136b20100e32d471 | [
"MIT"
] | null | null | null | pachicounter/util.py | yukkeorg/PachiCounter | 764526874e9ec03e06e7b61b136b20100e32d471 | [
"MIT"
] | null | null | null | # vim: ts=4 sts=4 sw=4 et
#####################################################################
# Pachi Counter
# Copyright (c) 2011-2020, Yusuke Ohshima All rights reserved.
#
# License: MIT.
# For details, please see LICENSE file.
#####################################################################
def bit_is_enable(val, bit):
return bool(val & (1 << bit))
def ordering(n):
less100 = n % 100
less10 = less100 % 10
if less10 in (1, 2, 3) and not (10 <= less100 < 20):
t = str(n) + ('st', 'nd', 'rd')[less10 - 1]
else:
t = str(n) + 'th'
return t
def bulk_set_color(d, color):
for k in d:
d[k]['color'] = color
def rgb(r, g, b, a=0xff):
return (a << 24) + (r << 16) + (g << 8) + b
def decolate_number(num, min_disp_digit, num_color=None, zero_color=None):
if zero_color is None:
zero_color = '#888888'
last_zero_pos = max(min_disp_digit - len(str(num)), 0)
raw_num_str = '{{0:0{0}}}'.format(min_disp_digit).format(num)
if num_color is None:
num_fmt = str(num)
else:
num_fmt = ('<span color="{0}">{1}</span>'
.format(num_color, raw_num_str[last_zero_pos:]))
if last_zero_pos == 0:
zero_fmt = ''
else:
zero_fmt = ('<span color="{0}">{1}</span>'
.format(zero_color, raw_num_str[0:last_zero_pos]))
return ''.join((zero_fmt, num_fmt))
def bonusrate(total, now, ndec=1):
nd = 10 ** ndec
try:
p = int((float(total) / now) * nd)
d = p // nd
f = p % nd
ratestr = '1/{{0}}.{{1:0{}}}'.format(ndec)
bonus_rate = ratestr.format(d, f)
except ZeroDivisionError:
bonus_rate = '1/-.{0}'.format("-"*ndec)
return bonus_rate
def gen_chain(n_chain, suffix=None):
suffix = suffix or "Chain(s)"
chain = ""
if n_chain > 0:
chain = ('\n<span size="xx-large">{0}</span>{1}'
.format(decolate_number(n_chain, 3), suffix))
return chain
def gen_history(history, n, sep=" ", isfill=False):
a = []
if history:
n = min(n, 5)
for h in list(reversed(history))[0:n]:
if h[0] is None:
a.append(str(h[1]))
else:
a.append('{1}<span size="small">({0})</span>'.format(*h))
if isfill:
for i in range(5):
a.append('')
return sep.join(a[:n])
def calcLpsOnNorm(bc, r):
return 1.6667 * (-1.0 + ((bc * r) / (250.0 + bc * r)))
def calcLpsOnChance(base):
return 1.6667 * (-1.0 + base)
| 26.53125 | 74 | 0.505693 | 366 | 2,547 | 3.39071 | 0.34153 | 0.008058 | 0.035455 | 0.016116 | 0.059629 | 0.038678 | 0.038678 | 0 | 0 | 0 | 0 | 0.054083 | 0.274048 | 2,547 | 95 | 75 | 26.810526 | 0.61709 | 0.058893 | 0 | 0.060606 | 0 | 0 | 0.084814 | 0.043517 | 0 | 0 | 0.001776 | 0 | 0 | 1 | 0.151515 | false | 0 | 0 | 0.060606 | 0.287879 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ebea48b67d5622f6da1e6d92ac82373b246ae19 | 309 | py | Python | Prediction.py | neerajgupta2407/Fake-News-Foe | 394c83be8ea98caa60af7fdca2c8ec34af67c33f | [
"Apache-2.0"
] | 9 | 2020-03-26T12:22:57.000Z | 2021-05-01T20:07:55.000Z | Prediction.py | neerajgupta2407/Fake-News-Foe | 394c83be8ea98caa60af7fdca2c8ec34af67c33f | [
"Apache-2.0"
] | 4 | 2020-10-20T15:13:44.000Z | 2022-02-10T01:46:36.000Z | Prediction.py | neerajgupta2407/Fake-News-Foe | 394c83be8ea98caa60af7fdca2c8ec34af67c33f | [
"Apache-2.0"
] | 5 | 2020-03-25T15:26:14.000Z | 2020-12-03T20:07:23.000Z | import pickle
#function to run for prediction
def detecting_fake_news(var):
#retrieving the best model for prediction call
load_model = pickle.load(open('model/final_model.sav', 'rb'))
prediction = load_model.predict([var])
prob = load_model.predict_proba([var])
return prediction, prob
| 28.090909 | 65 | 0.734628 | 43 | 309 | 5.116279 | 0.604651 | 0.122727 | 0.145455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.165049 | 309 | 10 | 66 | 30.9 | 0.852713 | 0.242718 | 0 | 0 | 0 | 0 | 0.099138 | 0.090517 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ec140a711eadb53834529c9948538cfabdd7acf | 9,058 | py | Python | chapter01/tic_tac_toe.py | TomaszGolan/rl-intro | 1676bfaea0c1661f931c160ed9ae104ee0b7b968 | [
"MIT"
] | null | null | null | chapter01/tic_tac_toe.py | TomaszGolan/rl-intro | 1676bfaea0c1661f931c160ed9ae104ee0b7b968 | [
"MIT"
] | null | null | null | chapter01/tic_tac_toe.py | TomaszGolan/rl-intro | 1676bfaea0c1661f931c160ed9ae104ee0b7b968 | [
"MIT"
] | null | null | null | import re
import random
import numpy as np
from itertools import product
from enum import Enum, unique
from abc import ABC, abstractmethod
from typing import Iterator, Dict, List
SIZE = 3 # board size (represented by numpy.array SIZExSIZE)
# for human player
KEYS = {'q': (0, 0), 'w': (0, 1), 'e': (0, 2),
'a': (1, 0), 's': (1, 1), 'd': (1, 2),
'z': (2, 0), 'x': (2, 1), 'c': (2, 2)}
# first player must be 1 and the second one -1
SYMBOLS = {"player1": 1, "player2": -1}
# first player: X, second player: O, empty: . (+ remove array brackets)
GUI = {'0': 'O', '1': '.', '2': 'X', '[': None, ']': None, ' ': None}
@unique
class Status(Enum):
INVALID, NOTEND, PLAYER1, PLAYER2, DRAW = range(-1, 4)
def get_rcd_sums(board: np.array) -> tuple:
"""Return a tuple of sums of all rows, columns and diagonals."""
return (*(sum(x) for x in board), # rows
*(sum(x) for x in board.T), # columns
np.trace(board), np.trace(np.flip(board, 0))) # diagonals
def get_status(board: np.array) -> Status:
"""Return a status of a game for given board."""
total = np.sum(board) # sum of all fields
if total not in (0, 1): # because 1 - first player, -1 - second player
return Status.INVALID
rcd_sums = get_rcd_sums(board) # sum of rows, cols, and diags
# do players have 3 in row
player1_has3, player2_has3 = 3 in rcd_sums, -3 in rcd_sums
# player1 won, unless player2 moved afterwards (invalid)
# or winning move was done after player2 won already (invalid)
if player1_has3:
return Status.INVALID if total == 0 or player2_has3 else Status.PLAYER1
# player2 won, unless player1 moved afterwards (invalid)
if player2_has3:
return Status.INVALID if total == 1 else Status.PLAYER2
# draw or the game is not done yet
return Status.NOTEND if (board == 0).any() else Status.DRAW
def get_id(board: np.array) -> int:
"""Get unique id for a board."""
board_id = 0
# 3 possible state of a field -> the factor 3 ensure uniqueness
# -1 -> 2 to avoid negative ids
for field in board.flatten():
board_id = 3 * board_id + (field if field >= 0 else 2)
return board_id
def get_free_cells(board: np.array) -> tuple:
"""Return a tuple with (i, j) coordinates of available cells."""
return np.argwhere(board == 0)
# all possible combinations of -1, 0, 1 on a SIZExSIZE board
# (including impossible tic tac toe states)
BOARD_GENERATOR = (np.array(state).reshape(SIZE, SIZE)
for state in product(range(-1, 2), repeat=SIZE**2))
# (board, state) generator
STATE_GENERATOR = ((board, get_status(board)) for board in BOARD_GENERATOR)
# dictionary of valid tic tac toe states -> id: board, status, free cells
STATES = {get_id(board):
{'board': board, 'status': status, 'free': get_free_cells(board)}
for board, status in STATE_GENERATOR if status != Status.INVALID}
def show_board(board: np.array):
"""Print given board."""
# board + 1: 0 (player2), 1 (empty), 2 (player1) instead of -1, 0, 1
print(np.array_str(board + 1).translate(str.maketrans(GUI)), end='\n\n')
def show_result(status: Status):
"""Print the game status."""
print(status.name, "won!"
if status == Status.PLAYER1 or status == Status.PLAYER2 else '')
class Player(ABC):
"""Abstract player class."""
def __init__(self, symbol: str) -> None:
"""Assign a symbol - player1: 1, player2: -1"""
self.symbol = SYMBOLS[symbol]
@abstractmethod
def make_move(self, state_id: int) -> tuple:
"""Return (i, j) coordinates for the next move."""
pass
def reset(self):
pass
def save(self, state_id: int):
pass
def update_estimates(self):
pass
class Human_player(Player):
def make_move(self, state_id: int) -> tuple:
"""Take input from keyboard until valid move is provided."""
while True:
key = input()
if key not in KEYS.keys():
print("Use qweasdzxc.\n")
continue
if KEYS[key] not in ((i, j) for i, j in STATES[state_id]['free']):
print("Choose empty cell.\n")
continue
return KEYS[key]
class Agent(Player):
"""RL agent."""
def __init__(self, symbol: str, step_size: float=0.1,
epsilon: float=0.1)-> None:
"""
step_size -- the step size used in the temporal-difference rule
epsilon -- the probability of exploration
"""
Player.__init__(self, symbol)
self.step_size = step_size
self.epsilon = epsilon
self.V: Dict[int, float] = dict() # estimates of state-value
self.init_estimations() # arbitrary initialized
self.history: List[int] = [] # all "visited" states
self.explore_ids: List[int] = [] # the indices of exploratory moves
def reset(self) -> None:
"""Clear history etc."""
self.history.clear()
self.explore_ids.clear()
def save(self, state_id: int) -> None:
"""Remember all visited state in current episode."""
self.history.append(state_id)
def init_estimations(self) -> None:
"""Initilize estimate V."""
# symbol == 1 for player1
win, lose = ((Status.PLAYER1, Status.PLAYER2) if self.symbol == 1
else (Status.PLAYER2, Status.PLAYER1))
# generate estimates for all possible states
for state_id, state in STATES.items():
# win -> 1, lose -> 0, draw or game in progress -> 0.5
reward = (1.0 if state['status'] == win else
0.0 if state['status'] == lose else 0.5)
self.V[state_id] = reward
def make_move(self, state_id: int) -> tuple:
"""Exploratory (random) move or based on current est. Q."""
if random.random() < self.epsilon:
# exploratory move
self.explore_ids.append(len(self.history))
return tuple(random.choice(STATES[state_id]['free']))
values = [] # the list of tuple(estimation, (i, j) - next move)
for i, j in STATES[state_id]['free']:
# get board and add player's symbol on next free cell
board = STATES[state_id]['board'].copy()
board[i][j] = self.symbol
# value + (i, j)
values.append((self.V[get_id(board)], (i, j)))
# return (i, j) corresponding to the highest current value
return sorted(values, key=lambda x: x[0], reverse=True)[0][1]
def update_estimates(self) -> None:
"""Update estimates according to last episode."""
for i in reversed(range(len(self.history) - 1)):
# skip exploratory moves
if i in self.explore_ids:
continue
temp_diff = self.V[self.history[i+1]] - self.V[self.history[i]]
self.V[self.history[i]] += self.step_size * temp_diff
def stop_exploring(self) -> None:
"""Only greedy moves will be performed."""
self.epsilon = 0.0
class Game:
"""A single tic tac toe game."""
def __init__(self, player1: Player, player2: Player) -> None:
"""Note, that it is hardcoded that player1 = 1 and player2 = -1."""
self.player1 = player1
self.player2 = player2
def queue(self) -> Iterator[Player]:
"""Next player generator."""
while True:
yield self.player1
yield self.player2
def play(self, show=False) -> Status:
_queue = self.queue()
self.player1.reset()
self.player2.reset()
self.state_id = 0 # empty board
# play until state != NOTEND
while STATES[self.state_id]['status'] == Status.NOTEND:
player = next(_queue) # current player
# get current board and update according to player move
board = STATES[self.state_id]['board'].copy()
board[player.make_move(self.state_id)] = player.symbol
not show or show_board(board)
self.state_id = get_id(board) # update board state
# save current state in agent's history
self.player1.save(self.state_id)
self.player2.save(self.state_id)
not show or show_result(STATES[self.state_id]['status'])
# after an episode it is time to update V
self.player1.update_estimates()
self.player2.update_estimates()
return STATES[self.state_id]['status']
if __name__ == "__main__":
agent = Agent("player1")
game = Game(agent, Agent("player2"))
nof_episodes = 10000
for i in range(nof_episodes):
game.play()
print("Agent's training... [{:>7.2%}]\r".format(i/nof_episodes), end='')
print("\n\nPlay with the agent using qweasdzxc:\n\n")
agent.stop_exploring()
game = Game(agent, Human_player("player2"))
while True:
game.play(True)
| 31.894366 | 80 | 0.593729 | 1,234 | 9,058 | 4.269044 | 0.210697 | 0.027904 | 0.029233 | 0.013288 | 0.104973 | 0.069476 | 0.037206 | 0.026196 | 0 | 0 | 0 | 0.022633 | 0.278097 | 9,058 | 283 | 81 | 32.007067 | 0.782994 | 0.261758 | 0 | 0.087248 | 0 | 0 | 0.038072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147651 | false | 0.026846 | 0.04698 | 0 | 0.302013 | 0.040268 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ec1f348503a6ddff54e195931bc4015e263f684 | 2,034 | py | Python | run/make_cal_sinograms.py | TysonReimer/itDAS | d4b4313afe253186b12e1e8eda5ef55bada0c764 | [
"Apache-2.0"
] | 8 | 2020-04-27T05:43:33.000Z | 2021-12-05T14:48:09.000Z | run/make_cal_sinograms.py | TysonReimer/itDAS | d4b4313afe253186b12e1e8eda5ef55bada0c764 | [
"Apache-2.0"
] | null | null | null | run/make_cal_sinograms.py | TysonReimer/itDAS | d4b4313afe253186b12e1e8eda5ef55bada0c764 | [
"Apache-2.0"
] | 2 | 2021-02-16T11:07:07.000Z | 2021-12-05T14:48:27.000Z | """
Tyson Reimer
University of Manitoba
January 28th, 2020
"""
import os
from umbms import get_proj_path, get_script_logger
from umbms.loadsave import load_pickle, save_pickle
from umbms.beamform.sigproc import iczt
###############################################################################
__DATA_DIR = os.path.join(get_proj_path(), 'data/')
# Define the parameters for the ICZT, used to convert from the
# frequency domain to the time domain
__INI_T = 0
__FIN_T = 6e-9
__N_TIME_PTS = 700
# Define the initial and final frequencies used in the phantom scans
__INI_F = 1e9
__FIN_F = 8e9
###############################################################################
if __name__ == "__main__":
logger = get_script_logger(__file__) # Init logger
logger.info("Beginning...Making Calibrated Sinograms...")
# Import the frequency-domain, uncalibrated data
fd_data = load_pickle(os.path.join(__DATA_DIR, 'fd_data.pickle'))
cal_td_data = dict() # Init dict to save
for expt_id in fd_data: # For each expt
# If the experiment was not an adipose-only reference scan
if 'adi' not in expt_id:
logger.info('\tCalibrating expt:\t%s...' % expt_id)
# Get the target (tumour-containing) and reference
# (adipose-only) scan data
tar_fd_data = fd_data[expt_id]
ref_fd_data = fd_data['%sadi' % expt_id[:-3]]
# Perform ideal air-tissue reflection subtraction
cal_fd_data = tar_fd_data - ref_fd_data
# Convert to the time-domain via ICZT
td_cal_data = iczt(fd_data=cal_fd_data,
ini_t=__INI_T, fin_t=__FIN_T,
n_time_pts=__N_TIME_PTS,
ini_f=__INI_F, fin_f=__FIN_F)
cal_td_data[expt_id] = td_cal_data # Store this
save_pickle(cal_td_data,
os.path.join(__DATA_DIR, 'td_cal_data.pickle'))
| 30.358209 | 80 | 0.576696 | 266 | 2,034 | 4.007519 | 0.379699 | 0.067542 | 0.028143 | 0.028143 | 0.031895 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 0.272861 | 2,034 | 66 | 81 | 30.818182 | 0.709263 | 0.26352 | 0 | 0 | 0 | 0 | 0.096491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ec58569c44771c682f58f8e09b37bae0d7d95c2 | 5,477 | py | Python | waveform_analysis/tests/test_ITU_R_468_weighting.py | pirun/waveform_analysis | 66809614b1fc985e694af1720341035316a5ac8e | [
"MIT"
] | 125 | 2017-08-27T01:48:02.000Z | 2022-01-20T10:47:13.000Z | waveform_analysis/tests/test_ITU_R_468_weighting.py | pirun/waveform_analysis | 66809614b1fc985e694af1720341035316a5ac8e | [
"MIT"
] | 13 | 2017-06-25T14:57:43.000Z | 2022-03-18T19:54:19.000Z | waveform_analysis/tests/test_ITU_R_468_weighting.py | pirun/waveform_analysis | 66809614b1fc985e694af1720341035316a5ac8e | [
"MIT"
] | 48 | 2017-06-25T10:42:10.000Z | 2022-03-09T18:13:55.000Z | import pytest
from scipy import signal
from scipy.interpolate import interp1d
import numpy as np
from numpy import pi
# This package must first be installed with `pip install -e .` or similar
from waveform_analysis import (ITU_R_468_weighting_analog,
ITU_R_468_weighting, ITU_R_468_weight)
# It will plot things for sanity-checking if MPL is installed
try:
import matplotlib.pyplot as plt
mpl = True
except ImportError:
mpl = False
# Rec. ITU-R BS.468-4 Measurement of audio-frequency noise voltage
# level in sound broadcasting Table 1
frequencies = np.array((
31.5, 63, 100, 200, 400, 800, 1000, 2000, 3150, 4000, 5000,
6300,
7100, 8000, 9000, 10000, 12500, 14000, 16000, 20000, 31500
))
responses = np.array((
-29.9, -23.9, -19.8, -13.8, -7.8, -1.9, 0, +5.6, +9.0, +10.5, +11.7,
+12.2,
+12.0, +11.4, +10.1, +8.1, 0, -5.3, -11.7, -22.2, -42.7
))
upper_limits = np.array((
+2.0, +1.4, +1.0, +0.85, +0.7, +0.55, +0.5, +0.5, +0.5, +0.5, +0.5,
+0.01, # Actually 0 tolerance, but specified with 1 significant figure
+0.2, +0.4, +0.6, +0.8, +1.2, +1.4, +1.6, +2.0, +2.8
))
lower_limits = np.array((
-2.0, -1.4, -1.0, -0.85, -0.7, -0.55, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.01, # Actually 0 tolerance, but specified with 1 significant figure
-0.2, -0.4, -0.6, -0.8, -1.2, -1.4, -1.6, -2.0, -float('inf')
))
class TestITU468WeightingAnalog(object):
def test_invalid_params(self):
with pytest.raises(TypeError):
ITU_R_468_weighting_analog('eels')
def test_freq_resp(self):
# Test that frequency response meets tolerance from ITU-R BS.468-4
upper = responses + upper_limits
lower = responses + lower_limits
z, p, k = ITU_R_468_weighting_analog()
w, h = signal.freqs_zpk(z, p, k, 2*pi*frequencies)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('468')
plt.title('ITU 468 weighting limits')
plt.semilogx(frequencies, levels, alpha=0.7, label='analog')
plt.semilogx(frequencies, upper, 'r:', alpha=0.7)
plt.semilogx(frequencies, lower, 'r:', alpha=0.7)
plt.grid(True, color='0.7', linestyle='-', which='major')
plt.grid(True, color='0.9', linestyle='-', which='minor')
plt.legend()
assert all(np.less_equal(levels, upper))
assert all(np.greater_equal(levels, lower))
class TestITU468Weighting(object):
def test_invalid_params(self):
with pytest.raises(TypeError):
ITU_R_468_weighting(fs='spam')
with pytest.raises(ValueError):
ITU_R_468_weighting(fs=10000, output='eggs')
def test_freq_resp_ba(self):
# Test that frequency response meets tolerance from ITU-R BS.468-4
fs = 300000
b, a = ITU_R_468_weighting(fs)
w, h = signal.freqz(b, a, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('468')
plt.semilogx(frequencies, levels, alpha=0.7, label='ba')
plt.legend()
assert all(np.less_equal(levels, responses + upper_limits))
assert all(np.greater_equal(levels, responses + lower_limits))
def test_freq_resp_zpk(self):
# Test that frequency response meets tolerance from ITU-R BS.468-4
fs = 270000
z, p, k = ITU_R_468_weighting(fs, 'zpk')
w, h = signal.freqz_zpk(z, p, k, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('468')
plt.semilogx(frequencies, levels, alpha=0.7, label='zpk')
plt.legend()
assert all(np.less_equal(levels, responses + upper_limits))
assert all(np.greater_equal(levels, responses + lower_limits))
def test_freq_resp_sos(self):
# Test that frequency response meets tolerance from ITU-R BS.468-4
fs = 400000
sos = ITU_R_468_weighting(fs, output='sos')
w, h = signal.sosfreqz(sos, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('468')
plt.semilogx(frequencies, levels, alpha=0.7, label='sos')
plt.legend()
assert all(np.less_equal(levels, responses + upper_limits))
assert all(np.greater_equal(levels, responses + lower_limits))
class TestITU468Weight(object):
def test_invalid_params(self):
with pytest.raises(TypeError):
ITU_R_468_weight('change this')
def test_freq_resp(self):
# Test that frequency response meets tolerance from ITU-R BS.468-4
N = 12000
fs = 300000
impulse = signal.unit_impulse(N)
out = ITU_R_468_weight(impulse, fs)
freq = np.fft.rfftfreq(N, 1/fs)
levels = 20 * np.log10(abs(np.fft.rfft(out)))
if mpl:
plt.figure('468')
plt.semilogx(freq, levels, alpha=0.7, label='fft')
plt.legend()
plt.axis([20, 45000, -50, +15])
# Interpolate FFT points to measure response at spec's frequencies
func = interp1d(freq, levels)
levels = func(frequencies)
assert all(np.less_equal(levels, responses + upper_limits))
assert all(np.greater_equal(levels, responses + lower_limits))
if __name__ == '__main__':
# Without capture sys it doesn't work sometimes, I'm not sure why.
pytest.main([__file__, "--capture=sys"])
| 34.88535 | 75 | 0.606171 | 815 | 5,477 | 3.955828 | 0.26135 | 0.022333 | 0.026055 | 0.044665 | 0.583747 | 0.530087 | 0.514888 | 0.486352 | 0.463089 | 0.463089 | 0 | 0.099926 | 0.258171 | 5,477 | 156 | 76 | 35.108974 | 0.693576 | 0.148074 | 0 | 0.359649 | 0 | 0 | 0.02816 | 0 | 0 | 0 | 0 | 0 | 0.087719 | 1 | 0.070175 | false | 0 | 0.070175 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ec6ee8435e7b21f5ea4fab4d3b576dbdb55a35f | 14,154 | py | Python | src/ride.py | yuanmingqi/RISE | 95577ac03c91937e5ddc22a8596270ad3e9d5e49 | [
"MIT"
] | null | null | null | src/ride.py | yuanmingqi/RISE | 95577ac03c91937e5ddc22a8596270ad3e9d5e49 | [
"MIT"
] | null | null | null | src/ride.py | yuanmingqi/RISE | 95577ac03c91937e5ddc22a8596270ad3e9d5e49 | [
"MIT"
] | null | null | null | import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils import data
import os
import time
from collections import deque
import numpy as np
import torch
from a2c_ppo_acktr import algo
from a2c_ppo_acktr import utils
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.storage import RolloutStorage
from a2c_ppo_acktr.model import Policy
class CNNEmbeddingNetwork(nn.Module):
def __init__(self, kwargs):
super(CNNEmbeddingNetwork, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(kwargs['in_channels'], 32, (8, 8), stride=(4, 4)), nn.ReLU(),
nn.Conv2d(32, 64, (4, 4), stride=(2, 2)), nn.ReLU(),
nn.Conv2d(64, 32, (3, 3), stride=(1, 1)), nn.ReLU(), nn.Flatten(),
nn.Linear(32 * 7 * 7, kwargs['embedding_size']))
def forward(self, ob):
x = self.main(ob)
return x
class MLPEmbeddingNetwork(nn.Module):
def __init__(self, kwargs):
super(MLPEmbeddingNetwork, self).__init__()
self.main = nn.Sequential(
nn.Linear(kwargs['input_dim'], 32), nn.ReLU(),
nn.Linear(32, 64), nn.ReLU(),
nn.Linear(64, kwargs['embedding_size'])
)
def forward(self, ob):
x = self.main(ob)
return x
class DisInverseDynamicModel(nn.Module):
def __init__(self, kwargs):
super(DisInverseDynamicModel, self).__init__()
self.main = nn.Sequential(
nn.Linear(kwargs['embedding_size'] * 2, 32), nn.ReLU(),
nn.Linear(64, 64), nn.ReLU(),
nn.Linear(64, kwargs['action_shape'])
)
def forward(self, ob_emb, next_ob_emb):
probs = self.main(torch.cat([ob_emb, next_ob_emb], dim=1))
pred_action = F.softmax(probs, dim=1)
return pred_action
class DisForwardDynamicModel(nn.Module):
def __init__(self, kwargs):
super(DisForwardDynamicModel, self).__init__()
self.nA = kwargs['nA']
self.main = nn.Sequential(
nn.Linear(kwargs['embedding_size'] + kwargs['action_shape'], 32), nn.ReLU(),
nn.Linear(32, 64), nn.ReLU(),
nn.Linear(64, kwargs['embedding_size'])
)
def forward(self, ob_emb, true_action):
onehot_action = F.one_hot(true_action, num_classes=self.nA)
pred_next_ob_emb = self.main(torch.cat([ob_emb, onehot_action], dim=1))
return pred_next_ob_emb
class ConInverseDynamicModel(nn.Module):
def __init__(self, kwargs):
super(ConInverseDynamicModel, self).__init__()
self.main = nn.Sequential(
nn.Linear(kwargs['embedding_size'] * 2, 32), nn.ReLU(),
nn.Linear(32, 64), nn.ReLU(),
nn.Linear(64, kwargs['action_shape'])
)
def forward(self, ob_emb, next_ob_emb):
pred_action = self.main(torch.cat([ob_emb, next_ob_emb], dim=1))
return pred_action
class ConForwardDynamicModel(nn.Module):
def __init__(self, kwargs):
super(ConForwardDynamicModel, self).__init__()
self.main = nn.Sequential(
nn.Linear(kwargs['embedding_size'] + kwargs['action_shape'], 32), nn.ReLU(),
nn.Linear(32, 64), nn.ReLU(),
nn.Linear(64, kwargs['embedding_size'])
)
def forward(self, ob_emb, true_action):
pred_next_ob_emb = self.main(torch.cat([ob_emb, true_action], dim=1))
return pred_next_ob_emb
class RIDE:
def __init__(
self,
ob_shape,
action_shape,
device
):
self.ob_shape = ob_shape
self.action_shape = action_shape
self.device = device
if len(ob_shape) == 3:
self.embedding_network = CNNEmbeddingNetwork(kwargs={'in_channels': ob_shape[0], 'embedding_size': 128})
self.idm = DisInverseDynamicModel(kwargs={'embedding_size': 128, 'action_shape':action_shape})
self.fdm = DisForwardDynamicModel(kwargs={'embedding_size': 128, 'action_shape':action_shape})
else:
self.embedding_network = MLPEmbeddingNetwork(kwargs={'input_dim': ob_shape[0], 'embedding_size': 64})
self.idm = ConInverseDynamicModel(kwargs={'embedding_size': 64, 'action_shape': action_shape[0]})
self.fdm = ConForwardDynamicModel(kwargs={'embedding_size': 64, 'action_shape': action_shape[0]})
self.embedding_network.to(self.device)
self.idm.to(self.device)
self.fdm.to(self.device)
self.optimzer_en = optim.Adam(self.embedding_network.parameters(), lr=5e-4)
self.optimzer_idm = optim.Adam(self.idm.parameters(), lr=5e-4)
self.optimzer_fdm = optim.Adam(self.fdm.parameters(), lr=5e-4)
def compute_rewards(self, obs_buffer):
size = obs_buffer.size()
obs = obs_buffer[:-1].view(-1, *obs_buffer.size()[2:])
next_obs = obs_buffer[1:].view(-1, *obs_buffer.size()[2:])
obs_emb = self.embedding_network(obs.to(self.device))
next_obs_emb = self.embedding_network(next_obs.to(self.device))
rewards = torch.norm(obs_emb - next_obs_emb, p=2, dim=1)
return rewards.view(size[0] - 1, size[1], 1)
def pseudo_counts(
self,
step,
episodic_memory,
current_c_ob,
k=10,
kernel_cluster_distance=0.008,
kernel_epsilon=0.0001,
c=0.001,
sm=8,
):
counts = torch.zeros(size=(episodic_memory.size()[1], 1))
for process in range(episodic_memory.size()[1]):
process_episodic_memory = episodic_memory[:step + 1, process, :]
ob_dist = [(c_ob, torch.dist(c_ob, current_c_ob)) for c_ob in process_episodic_memory]
# ob_dist = [(c_ob, torch.dist(c_ob, current_c_ob)) for c_ob in episodic_memory]
ob_dist.sort(key=lambda x: x[1])
ob_dist = ob_dist[:k]
dist = [d[1].item() for d in ob_dist]
dist = np.array(dist)
# TODO: moving average
dist = dist / np.mean(dist)
dist = np.max(dist - kernel_cluster_distance, 0)
kernel = kernel_epsilon / (dist + kernel_epsilon)
s = np.sqrt(np.sum(kernel)) + c
if np.isnan(s) or s > sm:
counts[process] = 0.
else:
counts[process] = 1 / s
return counts
def update(self, obs_buffer, actions_buffer):
obs = obs_buffer[:-1].view(-1, *obs_buffer.size()[2:])
next_obs = obs_buffer[1:].view(-1, *obs_buffer.size()[2:])
if len(self.ob_shape) == 3:
actions = actions_buffer.view(-1, 1)
else:
actions = actions_buffer.view(-1, *actions_buffer.size()[2:])
dataset = data.TensorDataset(obs, actions, next_obs)
loader = data.DataLoader(
dataset=dataset,
batch_size=64,
shuffle=True,
drop_last=True
)
for idx, batch_data in enumerate(loader):
self.optimzer_en.zero_grad()
self.optimzer_idm.zero_grad()
self.optimzer_fdm.zero_grad()
batch_obs, batch_actions, batch_next_obs = batch_data
batch_obs_emb = self.embedding_network(batch_obs.to(self.device))
batch_next_obs_emb = self.embedding_network(batch_next_obs.to(self.device))
batch_actions = batch_actions.to(self.device)
if len(self.ob_shape) == 3:
pred_actions_logits = self.idm(batch_obs_emb, batch_next_obs_emb)
inverse_loss = F.cross_entropy(pred_actions_logits, batch_actions.squeeze(1))
else:
pred_actions = self.idm(batch_obs_emb, batch_next_obs_emb)
inverse_loss = F.mse_loss(pred_actions, batch_actions)
pred_next_obs_emb = self.fdm(batch_obs_emb, batch_actions)
forward_loss = F.mse_loss(pred_next_obs_emb, batch_next_obs_emb)
total_loss = inverse_loss + forward_loss
total_loss.backward()
self.optimzer_en.step()
self.optimzer_idm.step()
self.optimzer_fdm.step()
# device = torch.device('cuda:0')
# ride = RIDE(ob_shape=[4, 84, 84], nA=7, device=device)
# obs_buffer = torch.rand(size=[129, 8, 4, 84, 84])
# actions_buffer = torch.randint(low=0, high=6, size=(128, 8, 1))
# ride.update(obs_buffer, actions_buffer)
# rewards = ride.compute_rewards(obs_buffer)
# print(rewards, rewards.size())
def train(args):
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
log_dir = os.path.expanduser(args.log_dir)
# eval_log_dir = log_dir + "_eval"
utils.cleanup_log_dir(log_dir)
# utils.cleanup_log_dir(eval_log_dir)
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, device, False)
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
''' ride initialization '''
if envs.observation_space.__class__.__name__ == 'Discrete':
ride = RIDE(
ob_shape=envs.observation_space.shape,
action_shape=envs.action_space.n,
device=device
)
episodic_emb_memory = torch.zeros(size=(args.num_steps + 1, args.num_processes, 128)).to(device)
elif envs.observation_space.__class__.__name__ == 'Box':
ride = RIDE(
ob_shape=envs.observation_space.shape,
action_shape=envs.action_space.shape,
device=device
)
episodic_emb_memory = torch.zeros(size=(args.num_steps + 1, args.num_processes, 64)).to(device)
else:
raise NotImplementedError
episode_rewards = deque(maxlen=10)
start = time.time()
num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(agent.optimizer, j, num_updates, args.lr)
episodic_emb_memory[0, :, :] = ride.embedding_network(rollouts.obs[0].to(device))
pseudo_counts = torch.zeros_like(rollouts.rewards).to(device)
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_obs = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
''' pseudo-count '''
next_obs_emb = ride.embedding_network(obs.to(device))
pseudo_counts[step, :, :] = ride.pseudo_counts(step, episodic_emb_memory, next_obs_emb)
episodic_emb_memory[step + 1, :, :] = next_obs_emb
for info in infos:
if 'episode' in info.keys():
episode_rewards.append(info['episode']['r'])
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(obs, recurrent_hidden_obs, action,
action_log_prob, value, reward, masks, bad_masks)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
''' compute intrinsic rewards '''
intrinsic_rewards = ride.compute_rewards(rollouts.obs)
rollouts.rewards += intrinsic_rewards * pseudo_counts
rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, args.use_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
''' update ride '''
ride.update(rollouts.obs, rollouts.actions)
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0
or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'obs_rms', None)
], os.path.join(save_path, args.env_name + ".pt"))
if j % args.log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args.num_processes * args.num_steps
end = time.time()
print(
'ALGO {}, ENV {}, EPISODE {}, TIME STEPS {}, FPS {} \n MEAN/MEDIAN REWARD {:.3f}|{:.3f}, MIN|MAX REWARDS {:.3f}|{:.3f}\n'.format(
args.algo, args.env_name, j, total_num_steps, int(total_num_steps / (end - start)),
np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards),
np.max(episode_rewards)
)) | 37.84492 | 145 | 0.608026 | 1,814 | 14,154 | 4.483462 | 0.162624 | 0.015492 | 0.012787 | 0.017214 | 0.344399 | 0.29325 | 0.249354 | 0.213697 | 0.202877 | 0.189352 | 0 | 0.02109 | 0.273068 | 14,154 | 374 | 146 | 37.84492 | 0.769365 | 0.046701 | 0 | 0.231579 | 0 | 0.003509 | 0.038945 | 0 | 0 | 0 | 0 | 0.002674 | 0 | 1 | 0.059649 | false | 0.003509 | 0.049123 | 0 | 0.161404 | 0.003509 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ec73c33d161f5986058bec7dbdd80972b054c47 | 957 | py | Python | contents/monte_carlo_integration/code/python/monte_carlo.py | hybrideagle/algorithm-archive | f94e22021dce5bda08b74dbc42d19da0b530e068 | [
"MIT"
] | null | null | null | contents/monte_carlo_integration/code/python/monte_carlo.py | hybrideagle/algorithm-archive | f94e22021dce5bda08b74dbc42d19da0b530e068 | [
"MIT"
] | null | null | null | contents/monte_carlo_integration/code/python/monte_carlo.py | hybrideagle/algorithm-archive | f94e22021dce5bda08b74dbc42d19da0b530e068 | [
"MIT"
] | null | null | null | # submitted by hybrideagle
from random import random
from math import pi
def in_circle(x_pos, y_pos):
radius = 1
# Compute euclidian distance from origin
return (x_pos * x_pos + y_pos * y_pos) < (radius * radius)
def monte_carlo(n):
"""
Computes PI using the monte carlo method using `n` points
"""
pi_count = 0
for i in range(n):
x = random()
y = random()
if in_circle(x, y):
pi_count += 1
# This is using a quarter of the unit sphere in a 1x1 box.
# The formula is pi = (boxLength^2 / radius^2) * (piCount / n), but we
# are only using the upper quadrant and the unit circle, so we can use
# 4*piCount/n instead
# piEstimate = 4*piCount/n
pi_estimate = 4 * pi_count / n
print('Pi is {0:} ({1:.4f}% error)'.format(
pi_estimate, (pi - pi_estimate) / pi * 100))
# If this file was run directly
if __name__ == "__main__":
monte_carlo(100000)
| 25.864865 | 74 | 0.61442 | 150 | 957 | 3.76 | 0.493333 | 0.021277 | 0.037234 | 0.028369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031977 | 0.281087 | 957 | 36 | 75 | 26.583333 | 0.787791 | 0.410658 | 0 | 0 | 0 | 0 | 0.064695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.294118 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ec8250d4bb968d047bd4b22e1b37d8786d787f0 | 4,703 | py | Python | crawler/crawlerhelpers/efa_beta.py | jenerous/vvs-delay | cb02813552422392b88fe64dd57a880e1a726025 | [
"MIT"
] | null | null | null | crawler/crawlerhelpers/efa_beta.py | jenerous/vvs-delay | cb02813552422392b88fe64dd57a880e1a726025 | [
"MIT"
] | null | null | null | crawler/crawlerhelpers/efa_beta.py | jenerous/vvs-delay | cb02813552422392b88fe64dd57a880e1a726025 | [
"MIT"
] | null | null | null | from time import strftime
class API_efaBeta(object):
def __init__( self ):
self.name = 'efaBeta'
self.baseurl = 'https://www3.vvs.de/mngvvs/XML_DM_REQUEST'
def convert_station_id( self, station_id ):
"""
convert station id that is given to the api specific
representation if necessary
@param station_id: id in general representation
@return id in api specific representation
"""
return station_id
def get_params( self, current_time_raw, station ):
"""
@param current_time_raw: time as gmtime object
@param station: station id in general representation
@return dict with key value pairs for api parameters
"""
itdDate = strftime("%Y%m%d", current_time_raw)
itdTime = strftime("%H%M", current_time_raw)
return {
'SpEncId' : 0,
'coordOutputFormat' : "EPSG:4326",
'deleteAssignedStops' : 1,
'itdDate' : itdDate,
'itdTime' : itdTime,
'limit' : 50,
'mode' : "direct",
'name_dm' : "de:8111:{}".format(self.convert_station_id(station)),
'outputFormat' : "rapidJSON",
'serverInfo' : "1",
'type_dm' : "any",
'useRealtime' : "1",
'version' : "10.2.2.48"
}
def function_to_call( self, results ):
"""
function that gets called on an api response
@param results: queue object of the api that contains result dicts from
the api call.
{
'timestamp': gmtime object -> when was the api call made
'name': api's name (id),
'station': station id,
'results': crawl results -> what came back from api
}
"""
results.put(None)
converted_results = []
for r in iter(results.get, None):
station = {}
current_dict = {}
station[r['station']] = [current_dict]
current_dict['timestamp'] = strftime('%Y-%m-%dT%H:%M:%SZ', r['timestamp']) # "2017-04-14 TEST"
current_dict['lines'] = {}
if not 'results' in r or not 'stopEvents' in r['results']:
continue
stop_events = filter(lambda elem:
elem['transportation']['product']['name']
== 'S-Bahn', r['results']['stopEvents'])
for st_event in stop_events:
departure_dict = {}
# print st_event
if 'isRealtimeControlled' in st_event:
departure_dict['isRealtimeControlled'] = st_event['isRealtimeControlled']
else:
departure_dict['isRealtimeControlled'] = False
if 'isRealtimeControlled' in departure_dict and 'departureTimeEstimated' in st_event:
departure_dict['departureTimeEstimated'] = st_event['departureTimeEstimated']
# else:
# departure_dict['departureTimeEstimated'] = None
departure_dict['departureTimePlanned'] = st_event['departureTimePlanned']
if 'infos' in st_event:
departure_dict['infos'] = []
for i in range(len(st_event['infos'])):
info = {}
if 'content' in st_event['infos'][i]:
info['content'] = st_event['infos'][i]['content']
else:
info['content'] = ""
info['title'] = st_event['infos'][i]['title']
info['subtitle'] = st_event['infos'][i]['subtitle']
info['properties'] = st_event['infos'][i]['properties']
departure_dict['infos'].append(info)
line = st_event['transportation']['number']
departure_dict['name'] = st_event['transportation']['product']['name']
departure_dict['class'] = st_event['transportation']['product']['class']
if line in current_dict['lines']:
current_dict['lines'][line].append(departure_dict)
else:
current_dict['lines'][line] = [departure_dict]
converted_results.append(station)
# print "Results: "
# with open("results.json", 'w') as output:
# json.dump(converted_results, output, indent=4)
# pprint(converted_results)
return converted_results
| 41.991071 | 107 | 0.512439 | 447 | 4,703 | 5.232662 | 0.346756 | 0.050876 | 0.030782 | 0.02779 | 0.054724 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010166 | 0.372528 | 4,703 | 111 | 108 | 42.369369 | 0.782447 | 0.182862 | 0 | 0.043478 | 0 | 0 | 0.212896 | 0.018344 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.014493 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ecae2e0cdf0ce7ac01b4920249b41f67edaa21f | 1,075 | py | Python | 2019/d22/d22.py | pravin/advent-2016 | ecb0f72b9152c13e9c05d3ed2510bf7b8aa0907c | [
"Apache-2.0"
] | null | null | null | 2019/d22/d22.py | pravin/advent-2016 | ecb0f72b9152c13e9c05d3ed2510bf7b8aa0907c | [
"Apache-2.0"
] | null | null | null | 2019/d22/d22.py | pravin/advent-2016 | ecb0f72b9152c13e9c05d3ed2510bf7b8aa0907c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
class Day22:
deck = list(range(0, 10007))
def dealIntoNewStack(self):
self.deck = self.deck[::-1]
def cutNCards(self, n):
self.deck = self.deck[n:] + self.deck[:n]
def incrementN(self, n):
deck_copy = [None] * len(self.deck)
pointer = 0
while (len(self.deck) > 0):
deck_copy[pointer] = self.deck.pop(0)
pointer += n
if pointer > len(deck_copy):
pointer = pointer % len(deck_copy)
self.deck = deck_copy
def partA(self):
with open('input.txt') as fp:
for line in fp:
words = line.strip().split()
if words[0] == 'deal':
if words[1] == 'with':
self.incrementN(int(words[3]))
else:
self.dealIntoNewStack()
elif words[0] == 'cut':
self.cutNCards(int(words[1]))
if __name__ == '__main__':
d = Day22()
d.partA()
print(d.deck.index(2019))
| 28.289474 | 54 | 0.476279 | 125 | 1,075 | 3.992 | 0.4 | 0.144289 | 0.048096 | 0.064128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036309 | 0.385116 | 1,075 | 37 | 55 | 29.054054 | 0.718608 | 0.019535 | 0 | 0 | 0 | 0 | 0.026591 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.2 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ed18e3a5bc3e56a30a5c177a3d61fd5dbe77a14 | 3,373 | py | Python | scripts/utils/MatlabUtils.py | Vants/stampsreplacer | a61d5174e1ee1d840a327ce6ec059a3a9f84a13b | [
"MIT"
] | 18 | 2019-02-04T11:22:25.000Z | 2022-02-15T10:02:14.000Z | scripts/utils/MatlabUtils.py | Vants/stampsreplacer | a61d5174e1ee1d840a327ce6ec059a3a9f84a13b | [
"MIT"
] | 1 | 2019-02-12T11:19:41.000Z | 2019-06-01T14:46:48.000Z | scripts/utils/MatlabUtils.py | Vants/stampsreplacer | a61d5174e1ee1d840a327ce6ec059a3a9f84a13b | [
"MIT"
] | 5 | 2019-11-27T16:22:12.000Z | 2021-04-14T11:10:44.000Z | import scipy.interpolate
import scipy.signal
from builtins import staticmethod
import numpy as np
from scripts.utils.ArrayUtils import ArrayUtils
class MatlabUtils:
@staticmethod
def max(array: np.ndarray):
if len(array) > 1:
return np.amax(array, axis=0)
else:
return np.amax(array)
@staticmethod
def min(array: np.ndarray):
if len(array) > 1:
return np.amin(array, axis=0)
else:
return np.amin(array)
@staticmethod
def sum(array: np.ndarray):
if len(array.shape) > 1:
return np.sum(array, axis=0)
else:
return np.sum(array)
@staticmethod
def gausswin(M: int, alpha=2.5):
"""
This function works like Matlab's Gaussian function. In SciPy (scipy.signal.gaussian) it
originally works bit differently.
Idea: https://github.com/openworm/open-worm-analysis-toolbox/blob/master/open_worm_analysis_toolbox/utils.py
"""
N = M - 1
n = np.arange(start=0, stop=M) - N / 2
w = np.exp(-0.5 * np.power((alpha * n / (N / 2)), 2))
return w
@staticmethod
def hist(a: np.ndarray, bins: np.ndarray, density=False):
"""Adds np.Inf to the bins end to make Numpy histograms equal to Matlab's.
Density helps with decimal values in response."""
new_bins = np.r_[-np.Inf, 0.5 * (bins[:-1] + bins[1:]), np.Inf]
return np.histogram(a, new_bins, density=density)
@staticmethod
def interp(vector: np.ndarray, interp_factor: int, kind: str = 'cubic'):
vector_len = len(vector)
arange = np.linspace(0, .1, vector_len)
interp_fun = scipy.interpolate.interp1d(arange, vector, kind=kind)
xnew = np.linspace(0, .1, vector_len * interp_factor)
return interp_fun(xnew)
@staticmethod
def std(array: np.ndarray, axis=None):
"""https://stackoverflow.com/questions/27600207/why-does-numpy-std-give-a-different-result-to-matlab-std"""
return np.std(array, axis, ddof=1)
@staticmethod
def polyfit_polyval(x: np.ndarray, y: np.ndarray, deg: int, max_desinty_or_percent_rand: float):
"""
Function that works like polyfit and polyval where polyfit returns three values.
https://stackoverflow.com/questions/45338872/matlab-polyval-function-with-three-outputs-equivalent-in-python-numpy/45339206#45339206
"""
mu = np.mean(x)
std = MatlabUtils.std(y)
c_scaled = np.polyfit((x - mu) / std, y, deg)
p_scaled = np.poly1d(c_scaled)
polyval = p_scaled((max_desinty_or_percent_rand - mu) / std)
return polyval
@staticmethod
def filter2(h, x, mode='same'):
"""https://stackoverflow.com/questions/43270274/equivalent-of-matlab-filter2filter-image-valid-in-python"""
return scipy.signal.convolve2d(x, np.rot90(h, 2), mode)
@staticmethod
def lscov(A: np.ndarray, B: np.ndarray, weights: np.ndarray):
"""Least-squares solution in presence of known covariance
https://stackoverflow.com/questions/27128688/how-to-use-least-squares-with-weight-matrix-in-python"""
W_col_array = weights[:, np.newaxis]
Aw = A * np.sqrt(W_col_array)
Bw = B * np.sqrt(weights)[:, np.newaxis]
return np.linalg.lstsq(Aw, Bw)[0]
| 33.068627 | 140 | 0.631782 | 460 | 3,373 | 4.567391 | 0.365217 | 0.051404 | 0.026654 | 0.057116 | 0.121847 | 0.099952 | 0.057116 | 0.031414 | 0.031414 | 0 | 0 | 0.031715 | 0.242811 | 3,373 | 101 | 141 | 33.39604 | 0.790916 | 0.27394 | 0 | 0.245902 | 0 | 0 | 0.003843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163934 | false | 0 | 0.081967 | 0 | 0.47541 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |